From f1e76ff94570f4c44bee35608978575b0a1f4547 Mon Sep 17 00:00:00 2001 From: "david.bao" Date: Fri, 22 Jul 2022 10:21:53 +0800 Subject: [PATCH] integrate buildah Signed-off-by: david.bao --- .github/workflows/go.yml | 1 - CONTRIBUTING.md | 1 + apply/apply.go | 31 +- apply/driver/local.go | 51 +- apply/processor/create.go | 39 +- apply/processor/delete.go | 9 +- apply/processor/gen.go | 13 +- build/buildinstruction/utils.go | 98 +- cmd/sealer/cmd/build.go | 159 +- cmd/sealer/cmd/images.go | 105 +- cmd/sealer/cmd/inspect.go | 37 +- cmd/sealer/cmd/load.go | 21 +- cmd/sealer/cmd/login.go | 20 +- cmd/sealer/cmd/pull.go | 33 +- cmd/sealer/cmd/push.go | 19 +- cmd/sealer/cmd/rmi.go | 45 +- cmd/sealer/cmd/save.go | 73 +- cmd/sealer/cmd/search.go | 4 + cmd/sealer/main.go | 5 + docs/design/build_implementation.md | 15 + go.mod | 46 +- go.sum | 570 +- hack/build.sh | 10 +- pkg/auth/auth.go | 29 + pkg/filesystem/clusterimage/clusterimage.go | 38 +- pkg/filesystem/filesystem.go | 13 +- pkg/guest/guest.go | 49 +- pkg/image/distributionutil/login.go | 2 +- pkg/image/distributionutil/repository.go | 2 +- pkg/image/distributionutil/utils.go | 1 - pkg/imageengine/buildah/build.go | 370 + pkg/imageengine/buildah/build_rootfs.go | 50 + pkg/imageengine/buildah/commit.go | 123 + pkg/imageengine/buildah/common.go | 208 + pkg/imageengine/buildah/copy.go | 95 + pkg/imageengine/buildah/engine.go | 61 + pkg/imageengine/buildah/from.go | 262 + pkg/imageengine/buildah/get_annotation.go | 48 + pkg/imageengine/buildah/images.go | 300 + pkg/imageengine/buildah/init.go | 79 + pkg/imageengine/buildah/inspect.go | 91 + pkg/imageengine/buildah/load.go | 59 + pkg/imageengine/buildah/login.go | 52 + pkg/imageengine/buildah/logout.go | 43 + pkg/imageengine/buildah/manifest.go | 87 + pkg/imageengine/buildah/mount.go | 86 + pkg/imageengine/buildah/pull.go | 92 + pkg/imageengine/buildah/push.go | 124 + pkg/imageengine/buildah/rmi.go | 68 + pkg/imageengine/buildah/save.go | 48 + .../buildah/sealer_image_extension.go | 42 + pkg/imageengine/buildah/util.go | 56 + pkg/imageengine/common.go | 24 + pkg/imageengine/common/options.go | 157 + pkg/imageengine/interface.go | 57 + .../suites/build/fixtures/lite_build/Kubefile | 4 +- test/testhelper/settings/common.go | 4 +- types/api/v1/image_types.go | 11 +- types/api/v1/sealer_image_extension.go | 28 + utils/os/readers.go | 6 +- utils/platform/platform.go | 1 - vendor/github.com/Azure/go-ansiterm/go.mod | 5 + vendor/github.com/Azure/go-ansiterm/go.sum | 2 + .../Azure/go-ansiterm/winterm/ansi.go | 24 +- vendor/github.com/BurntSushi/toml/.gitignore | 5 +- vendor/github.com/BurntSushi/toml/.travis.yml | 15 - vendor/github.com/BurntSushi/toml/COMPATIBLE | 4 +- vendor/github.com/BurntSushi/toml/Makefile | 19 - vendor/github.com/BurntSushi/toml/README.md | 101 +- vendor/github.com/BurntSushi/toml/decode.go | 303 +- .../BurntSushi/toml/decode_go116.go | 19 + .../github.com/BurntSushi/toml/deprecated.go | 21 + vendor/github.com/BurntSushi/toml/doc.go | 28 +- vendor/github.com/BurntSushi/toml/encode.go | 476 +- .../BurntSushi/toml/encoding_types.go | 19 - .../BurntSushi/toml/encoding_types_1.1.go | 18 - vendor/github.com/BurntSushi/toml/error.go | 229 + vendor/github.com/BurntSushi/toml/go.mod | 3 + .../github.com/BurntSushi/toml/internal/tz.go | 36 + vendor/github.com/BurntSushi/toml/lex.go | 750 +- .../toml/{decode_meta.go => meta.go} | 117 +- vendor/github.com/BurntSushi/toml/parse.go | 659 +- vendor/github.com/BurntSushi/toml/session.vim | 1 - .../github.com/BurntSushi/toml/type_fields.go | 4 +- .../toml/{type_check.go => type_toml.go} | 23 +- .../github.com/Microsoft/go-winio/README.md | 29 +- .../Microsoft/go-winio/backuptar/noop.go | 4 + .../Microsoft/go-winio/backuptar/strconv.go | 68 + .../Microsoft/go-winio/backuptar/tar.go | 480 ++ .../pkg/security/grantvmgroupaccess.go | 161 + .../go-winio/pkg/security/syscall_windows.go | 7 + .../go-winio/pkg/security/zsyscall_windows.go | 70 + .../Microsoft/go-winio/privilege.go | 5 +- .../github.com/Microsoft/go-winio/vhd/vhd.go | 323 + .../Microsoft/go-winio/vhd/zvhd_windows.go | 106 + .../Microsoft/hcsshim/.gitattributes | 1 + .../github.com/Microsoft/hcsshim/.gitignore | 38 + .../Microsoft/hcsshim/.golangci.yml | 99 + .../github.com/Microsoft/hcsshim/CODEOWNERS | 1 + vendor/github.com/Microsoft/hcsshim/LICENSE | 21 + vendor/github.com/Microsoft/hcsshim/Makefile | 87 + .../Microsoft/hcsshim/Protobuild.toml | 49 + vendor/github.com/Microsoft/hcsshim/README.md | 120 + .../hcsshim/computestorage/attach.go | 38 + .../hcsshim/computestorage/destroy.go | 26 + .../hcsshim/computestorage/detach.go | 26 + .../hcsshim/computestorage/export.go | 46 + .../hcsshim/computestorage/format.go | 26 + .../hcsshim/computestorage/helpers.go | 193 + .../hcsshim/computestorage/import.go | 41 + .../hcsshim/computestorage/initialize.go | 38 + .../Microsoft/hcsshim/computestorage/mount.go | 27 + .../Microsoft/hcsshim/computestorage/setup.go | 74 + .../hcsshim/computestorage/storage.go | 50 + .../computestorage/zsyscall_windows.go | 319 + .../github.com/Microsoft/hcsshim/container.go | 223 + vendor/github.com/Microsoft/hcsshim/errors.go | 245 + .../Microsoft/hcsshim/functional_tests.ps1 | 12 + vendor/github.com/Microsoft/hcsshim/go.mod | 39 + vendor/github.com/Microsoft/hcsshim/go.sum | 993 +++ .../github.com/Microsoft/hcsshim/hcsshim.go | 28 + .../Microsoft/hcsshim/hnsendpoint.go | 118 + .../Microsoft/hcsshim/hnsglobals.go | 16 + .../Microsoft/hcsshim/hnsnetwork.go | 36 + .../github.com/Microsoft/hcsshim/hnspolicy.go | 60 + .../Microsoft/hcsshim/hnspolicylist.go | 47 + .../Microsoft/hcsshim/hnssupport.go | 13 + .../github.com/Microsoft/hcsshim/interface.go | 114 + .../Microsoft/hcsshim/internal/cow/cow.go | 91 + .../hcsshim/internal/hcs/callback.go | 161 + .../Microsoft/hcsshim/internal/hcs/errors.go | 343 + .../Microsoft/hcsshim/internal/hcs/process.go | 557 ++ .../hcsshim/internal/hcs/schema1/schema1.go | 250 + .../internal/hcs/schema2/attachment.go | 36 + .../hcsshim/internal/hcs/schema2/battery.go | 13 + .../hcs/schema2/cache_query_stats_response.go | 18 + .../hcsshim/internal/hcs/schema2/chipset.go | 27 + .../internal/hcs/schema2/close_handle.go | 14 + .../hcsshim/internal/hcs/schema2/com_port.go | 17 + .../internal/hcs/schema2/compute_system.go | 26 + .../internal/hcs/schema2/configuration.go | 72 + .../internal/hcs/schema2/console_size.go | 16 + .../hcsshim/internal/hcs/schema2/container.go | 36 + ...r_credential_guard_add_instance_request.go | 16 + ...edential_guard_hv_socket_service_config.go | 15 + .../container_credential_guard_instance.go | 16 + ...ainer_credential_guard_modify_operation.go | 17 + ...iner_credential_guard_operation_request.go | 15 + ...redential_guard_remove_instance_request.go | 14 + .../container_credential_guard_state.go | 25 + .../container_credential_guard_system_info.go | 14 + .../schema2/container_memory_information.go | 25 + .../hcsshim/internal/hcs/schema2/cpu_group.go | 15 + .../hcs/schema2/cpu_group_affinity.go | 15 + .../internal/hcs/schema2/cpu_group_config.go | 18 + .../hcs/schema2/cpu_group_configurations.go | 15 + .../hcs/schema2/cpu_group_operations.go | 18 + .../hcs/schema2/cpu_group_property.go | 15 + .../hcs/schema2/create_group_operation.go | 17 + .../hcs/schema2/delete_group_operation.go | 15 + .../hcsshim/internal/hcs/schema2/device.go | 27 + .../hcsshim/internal/hcs/schema2/devices.go | 46 + .../hcs/schema2/enhanced_mode_video.go | 14 + .../hcs/schema2/flexible_io_device.go | 18 + .../internal/hcs/schema2/guest_connection.go | 19 + .../hcs/schema2/guest_connection_info.go | 21 + .../hcs/schema2/guest_crash_reporting.go | 14 + .../hcsshim/internal/hcs/schema2/guest_os.go | 14 + .../internal/hcs/schema2/guest_state.go | 22 + .../schema2/host_processor_modify_request.go | 16 + .../internal/hcs/schema2/hosted_system.go | 16 + .../hcsshim/internal/hcs/schema2/hv_socket.go | 16 + .../internal/hcs/schema2/hv_socket_2.go | 15 + .../internal/hcs/schema2/hv_socket_address.go | 17 + .../hcs/schema2/hv_socket_service_config.go | 28 + .../hcs/schema2/hv_socket_system_config.go | 22 + .../hcs/schema2/interrupt_moderation_mode.go | 42 + .../internal/hcs/schema2/iov_settings.go | 22 + .../hcsshim/internal/hcs/schema2/keyboard.go | 13 + .../hcsshim/internal/hcs/schema2/layer.go | 21 + .../hcs/schema2/linux_kernel_direct.go | 18 + .../internal/hcs/schema2/logical_processor.go | 18 + .../internal/hcs/schema2/mapped_directory.go | 20 + .../internal/hcs/schema2/mapped_pipe.go | 18 + .../hcsshim/internal/hcs/schema2/memory.go | 14 + .../hcsshim/internal/hcs/schema2/memory_2.go | 49 + .../hcs/schema2/memory_information_for_vm.go | 18 + .../internal/hcs/schema2/memory_stats.go | 19 + .../model_container_definition_device.go | 14 + .../hcs/schema2/model_device_category.go | 15 + .../hcs/schema2/model_device_extension.go | 15 + .../hcs/schema2/model_device_instance.go | 17 + .../hcs/schema2/model_device_namespace.go | 16 + .../hcs/schema2/model_interface_class.go | 16 + .../internal/hcs/schema2/model_namespace.go | 15 + .../hcs/schema2/model_object_directory.go | 18 + .../hcs/schema2/model_object_namespace.go | 16 + .../hcs/schema2/model_object_symlink.go | 18 + .../hcs/schema2/modification_request.go | 15 + .../hcs/schema2/modify_setting_request.go | 20 + .../hcsshim/internal/hcs/schema2/mouse.go | 13 + .../internal/hcs/schema2/network_adapter.go | 17 + .../internal/hcs/schema2/networking.go | 23 + .../hcs/schema2/pause_notification.go | 15 + .../internal/hcs/schema2/pause_options.go | 17 + .../hcsshim/internal/hcs/schema2/plan9.go | 14 + .../internal/hcs/schema2/plan9_share.go | 34 + .../internal/hcs/schema2/process_details.go | 33 + .../hcs/schema2/process_modify_request.go | 19 + .../hcs/schema2/process_parameters.go | 46 + .../internal/hcs/schema2/process_status.go | 21 + .../hcsshim/internal/hcs/schema2/processor.go | 18 + .../internal/hcs/schema2/processor_2.go | 23 + .../internal/hcs/schema2/processor_stats.go | 19 + .../hcs/schema2/processor_topology.go | 15 + .../internal/hcs/schema2/properties.go | 54 + .../internal/hcs/schema2/property_query.go | 15 + .../internal/hcs/schema2/property_type.go | 26 + .../hcs/schema2/rdp_connection_options.go | 16 + .../internal/hcs/schema2/registry_changes.go | 16 + .../internal/hcs/schema2/registry_key.go | 18 + .../internal/hcs/schema2/registry_value.go | 30 + .../internal/hcs/schema2/restore_state.go | 19 + .../internal/hcs/schema2/save_options.go | 19 + .../hcsshim/internal/hcs/schema2/scsi.go | 16 + .../hcs/schema2/service_properties.go | 18 + .../schema2/shared_memory_configuration.go | 14 + .../hcs/schema2/shared_memory_region.go | 22 + .../hcs/schema2/shared_memory_region_info.go | 16 + .../internal/hcs/schema2/silo_properties.go | 17 + .../internal/hcs/schema2/statistics.go | 29 + .../hcsshim/internal/hcs/schema2/storage.go | 21 + .../internal/hcs/schema2/storage_qo_s.go | 16 + .../internal/hcs/schema2/storage_stats.go | 21 + .../hcsshim/internal/hcs/schema2/topology.go | 16 + .../hcsshim/internal/hcs/schema2/uefi.go | 20 + .../internal/hcs/schema2/uefi_boot_entry.go | 22 + .../hcsshim/internal/hcs/schema2/version.go | 16 + .../internal/hcs/schema2/video_monitor.go | 18 + .../internal/hcs/schema2/virtual_machine.go | 32 + .../internal/hcs/schema2/virtual_node_info.go | 20 + .../hcs/schema2/virtual_p_mem_controller.go | 20 + .../hcs/schema2/virtual_p_mem_device.go | 18 + .../hcs/schema2/virtual_p_mem_mapping.go | 15 + .../hcs/schema2/virtual_pci_device.go | 16 + .../hcs/schema2/virtual_pci_function.go | 18 + .../internal/hcs/schema2/virtual_smb.go | 16 + .../internal/hcs/schema2/virtual_smb_share.go | 20 + .../hcs/schema2/virtual_smb_share_options.go | 62 + .../hcsshim/internal/hcs/schema2/vm_memory.go | 26 + .../hcs/schema2/vm_processor_limits.go | 22 + .../hcs/schema2/windows_crash_reporting.go | 16 + .../Microsoft/hcsshim/internal/hcs/service.go | 49 + .../Microsoft/hcsshim/internal/hcs/system.go | 637 ++ .../Microsoft/hcsshim/internal/hcs/utils.go | 62 + .../hcsshim/internal/hcs/waithelper.go | 68 + .../hcsshim/internal/hcserror/hcserror.go | 47 + .../Microsoft/hcsshim/internal/hns/hns.go | 23 + .../hcsshim/internal/hns/hnsendpoint.go | 338 + .../hcsshim/internal/hns/hnsfuncs.go | 49 + .../hcsshim/internal/hns/hnsglobals.go | 28 + .../hcsshim/internal/hns/hnsnetwork.go | 141 + .../hcsshim/internal/hns/hnspolicy.go | 109 + .../hcsshim/internal/hns/hnspolicylist.go | 201 + .../hcsshim/internal/hns/hnssupport.go | 49 + .../hcsshim/internal/hns/namespace.go | 111 + .../hcsshim/internal/hns/zsyscall_windows.go | 76 + .../hcsshim/internal/interop/interop.go | 23 + .../internal/interop/zsyscall_windows.go | 48 + .../Microsoft/hcsshim/internal/log/g.go | 23 + .../hcsshim/internal/logfields/fields.go | 32 + .../hcsshim/internal/longpath/longpath.go | 24 + .../hcsshim/internal/mergemaps/merge.go | 52 + .../Microsoft/hcsshim/internal/oc/exporter.go | 43 + .../Microsoft/hcsshim/internal/oc/span.go | 17 + .../hcsshim/internal/safefile/safeopen.go | 375 + .../hcsshim/internal/timeout/timeout.go | 74 + .../hcsshim/internal/vmcompute/vmcompute.go | 610 ++ .../internal/vmcompute/zsyscall_windows.go | 581 ++ .../hcsshim/internal/wclayer/activatelayer.go | 27 + .../hcsshim/internal/wclayer/baselayer.go | 182 + .../hcsshim/internal/wclayer/createlayer.go | 27 + .../internal/wclayer/createscratchlayer.go | 34 + .../internal/wclayer/deactivatelayer.go | 24 + .../hcsshim/internal/wclayer/destroylayer.go | 25 + .../internal/wclayer/expandscratchsize.go | 140 + .../hcsshim/internal/wclayer/exportlayer.go | 94 + .../internal/wclayer/getlayermountpath.go | 50 + .../internal/wclayer/getsharedbaseimages.go | 29 + .../hcsshim/internal/wclayer/grantvmaccess.go | 26 + .../hcsshim/internal/wclayer/importlayer.go | 166 + .../hcsshim/internal/wclayer/layerexists.go | 28 + .../hcsshim/internal/wclayer/layerid.go | 22 + .../hcsshim/internal/wclayer/layerutils.go | 97 + .../hcsshim/internal/wclayer/legacy.go | 811 +++ .../hcsshim/internal/wclayer/nametoguid.go | 29 + .../hcsshim/internal/wclayer/preparelayer.go | 44 + .../hcsshim/internal/wclayer/processimage.go | 41 + .../internal/wclayer/unpreparelayer.go | 25 + .../hcsshim/internal/wclayer/wclayer.go | 35 + .../internal/wclayer/zsyscall_windows.go | 569 ++ .../hcsshim/internal/winapi/console.go | 44 + .../hcsshim/internal/winapi/devices.go | 13 + .../hcsshim/internal/winapi/errors.go | 15 + .../hcsshim/internal/winapi/filesystem.go | 110 + .../Microsoft/hcsshim/internal/winapi/iocp.go | 3 + .../hcsshim/internal/winapi/jobobject.go | 215 + .../hcsshim/internal/winapi/logon.go | 30 + .../hcsshim/internal/winapi/memory.go | 4 + .../Microsoft/hcsshim/internal/winapi/net.go | 3 + .../Microsoft/hcsshim/internal/winapi/path.go | 11 + .../hcsshim/internal/winapi/process.go | 8 + .../hcsshim/internal/winapi/processor.go | 7 + .../hcsshim/internal/winapi/system.go | 52 + .../hcsshim/internal/winapi/thread.go | 12 + .../hcsshim/internal/winapi/utils.go | 80 + .../hcsshim/internal/winapi/winapi.go | 5 + .../internal/winapi/zsyscall_windows.go | 360 + vendor/github.com/Microsoft/hcsshim/layer.go | 107 + .../hcsshim/osversion/osversion_windows.go | 50 + .../hcsshim/osversion/windowsbuilds.go | 50 + .../github.com/Microsoft/hcsshim/process.go | 98 + .../Microsoft/hcsshim/zsyscall_windows.go | 54 + .../go-crypto/openpgp/key_generation.go | 9 + .../ProtonMail/go-crypto/openpgp/keys.go | 236 +- .../go-crypto/openpgp/keys_test_data.go | 122 +- .../go-crypto/openpgp/packet/packet.go | 1 + .../go-crypto/openpgp/packet/public_key.go | 8 +- .../go-crypto/openpgp/packet/signature.go | 35 +- .../ProtonMail/go-crypto/openpgp/read.go | 30 +- .../ProtonMail/go-crypto/openpgp/write.go | 11 +- vendor/github.com/VividCortex/ewma/.gitignore | 3 + .../github.com/VividCortex/ewma/.whitesource | 3 + vendor/github.com/VividCortex/ewma/LICENSE | 21 + vendor/github.com/VividCortex/ewma/README.md | 145 + .../github.com/VividCortex/ewma/codecov.yml | 6 + vendor/github.com/VividCortex/ewma/ewma.go | 126 + vendor/github.com/VividCortex/ewma/go.mod | 3 + .../stripansi}/LICENSE | 2 +- .../github.com/acarl005/stripansi/README.md | 30 + .../acarl005/stripansi/stripansi.go | 13 + vendor/github.com/blang/semver/.travis.yml | 21 + vendor/github.com/blang/semver/LICENSE | 22 + vendor/github.com/blang/semver/README.md | 194 + vendor/github.com/blang/semver/json.go | 23 + vendor/github.com/blang/semver/package.json | 17 + vendor/github.com/blang/semver/range.go | 416 ++ vendor/github.com/blang/semver/semver.go | 418 ++ vendor/github.com/blang/semver/sort.go | 28 + vendor/github.com/blang/semver/sql.go | 30 + .../github.com/cespare/xxhash/v2/.travis.yml | 8 - vendor/github.com/cespare/xxhash/v2/README.md | 6 +- vendor/github.com/cespare/xxhash/v2/xxhash.go | 1 - .../cespare/xxhash/v2/xxhash_amd64.s | 62 +- .../cespare/xxhash/v2/xxhash_unsafe.go | 53 +- vendor/github.com/chzyer/readline/.gitignore | 1 + vendor/github.com/chzyer/readline/.travis.yml | 8 + .../github.com/chzyer/readline/CHANGELOG.md | 58 + vendor/github.com/chzyer/readline/LICENSE | 22 + vendor/github.com/chzyer/readline/README.md | 114 + .../chzyer/readline/ansi_windows.go | 249 + vendor/github.com/chzyer/readline/complete.go | 285 + .../chzyer/readline/complete_helper.go | 165 + .../chzyer/readline/complete_segment.go | 82 + vendor/github.com/chzyer/readline/history.go | 330 + .../github.com/chzyer/readline/operation.go | 531 ++ vendor/github.com/chzyer/readline/password.go | 33 + .../chzyer/readline/rawreader_windows.go | 125 + vendor/github.com/chzyer/readline/readline.go | 326 + vendor/github.com/chzyer/readline/remote.go | 475 ++ vendor/github.com/chzyer/readline/runebuf.go | 629 ++ vendor/github.com/chzyer/readline/runes.go | 223 + vendor/github.com/chzyer/readline/search.go | 164 + vendor/github.com/chzyer/readline/std.go | 197 + .../github.com/chzyer/readline/std_windows.go | 9 + vendor/github.com/chzyer/readline/term.go | 123 + vendor/github.com/chzyer/readline/term_bsd.go | 29 + .../github.com/chzyer/readline/term_linux.go | 33 + .../chzyer/readline/term_solaris.go | 32 + .../github.com/chzyer/readline/term_unix.go | 24 + .../chzyer/readline/term_windows.go | 171 + vendor/github.com/chzyer/readline/terminal.go | 238 + vendor/github.com/chzyer/readline/utils.go | 277 + .../github.com/chzyer/readline/utils_unix.go | 83 + .../chzyer/readline/utils_windows.go | 41 + vendor/github.com/chzyer/readline/vim.go | 176 + .../github.com/chzyer/readline/windows_api.go | 152 + vendor/github.com/containerd/cgroups/LICENSE | 201 + .../containerd/cgroups/stats/v1/doc.go | 17 + .../containerd/cgroups/stats/v1/metrics.pb.go | 6125 +++++++++++++++++ .../cgroups/stats/v1/metrics.pb.txt | 790 +++ .../containerd/cgroups/stats/v1/metrics.proto | 158 + .../containerd/containerd/log/context.go | 68 + .../containerd/pkg/userns/userns_linux.go | 62 + .../pkg/userns/userns_unsupported.go | 25 + .../containerd/platforms/compare.go | 193 + .../containerd/platforms/cpuinfo.go | 131 + .../containerd/platforms/database.go | 114 + .../containerd/platforms/defaults.go | 43 + .../containerd/platforms/defaults_unix.go | 24 + .../containerd/platforms/defaults_windows.go | 81 + .../containerd/platforms/platforms.go | 278 + .../containerd/containerd/sys/epoll.go | 33 + .../containerd/containerd/sys/fds.go | 34 + .../containerd/containerd/sys/filesys_unix.go | 31 + .../containerd/sys/filesys_windows.go | 330 + .../containerd/containerd/sys/mount_linux.go | 145 + .../containerd/containerd/sys/oom_linux.go | 83 + .../containerd/sys/oom_unsupported.go | 48 + .../containerd/containerd/sys/socket_unix.go | 80 + .../containerd/sys/socket_windows.go | 32 + .../containerd/containerd/sys/stat_bsd.go | 44 + .../containerd/containerd/sys/stat_openbsd.go | 45 + .../containerd/containerd/sys/stat_unix.go | 44 + .../containerd/sys/subprocess_unsafe_linux.go | 30 + .../containerd/sys/subprocess_unsafe_linux.s | 15 + .../containerd/sys/userns_deprecated.go | 23 + .../stargz-snapshotter/estargz/LICENSE | 202 + .../stargz-snapshotter/estargz/build.go | 628 ++ .../estargz/errorutil/errors.go | 40 + .../stargz-snapshotter/estargz/estargz.go | 1044 +++ .../stargz-snapshotter/estargz/go.mod | 11 + .../stargz-snapshotter/estargz/go.sum | 22 + .../stargz-snapshotter/estargz/gzip.go | 238 + .../stargz-snapshotter/estargz/testutil.go | 2009 ++++++ .../stargz-snapshotter/estargz/types.go | 316 + .../containernetworking/cni/LICENSE | 202 + .../containernetworking/cni/libcni/api.go | 679 ++ .../containernetworking/cni/libcni/conf.go | 268 + .../cni/pkg/invoke/args.go | 128 + .../cni/pkg/invoke/delegate.go | 80 + .../cni/pkg/invoke/exec.go | 138 + .../cni/pkg/invoke/find.go | 48 + .../cni/pkg/invoke/os_unix.go | 20 + .../cni/pkg/invoke/os_windows.go | 18 + .../cni/pkg/invoke/raw_exec.go | 88 + .../cni/pkg/types/020/types.go | 189 + .../cni/pkg/types/040/types.go | 306 + .../cni/pkg/types/100/types.go | 307 + .../containernetworking/cni/pkg/types/args.go | 122 + .../cni/pkg/types/create/create.go | 56 + .../cni/pkg/types/internal/convert.go | 92 + .../cni/pkg/types/internal/create.go | 66 + .../cni/pkg/types/types.go | 234 + .../cni/pkg/utils/utils.go | 84 + .../cni/pkg/version/conf.go | 26 + .../cni/pkg/version/plugin.go | 144 + .../cni/pkg/version/reconcile.go | 49 + .../cni/pkg/version/version.go | 89 + .../containernetworking/plugins/LICENSE | 201 + .../plugins/pkg/ns/README.md | 41 + .../plugins/pkg/ns/ns_linux.go | 234 + .../github.com/containers/buildah/.cirrus.yml | 294 + .../github.com/containers/buildah/.gitignore | 12 + .../containers/buildah/.golangci.yml | 13 + .../containers/buildah/CHANGELOG.md | 2390 +++++++ .../containers/buildah/CODE-OF-CONDUCT.md | 3 + .../containers/buildah/CONTRIBUTING.md | 174 + vendor/github.com/containers/buildah/LICENSE | 201 + .../github.com/containers/buildah/MAINTAINERS | 4 + vendor/github.com/containers/buildah/Makefile | 190 + vendor/github.com/containers/buildah/OWNERS | 28 + .../github.com/containers/buildah/README.md | 132 + .../github.com/containers/buildah/SECURITY.md | 3 + vendor/github.com/containers/buildah/add.go | 650 ++ .../containers/buildah/bind/mount.go | 299 + .../buildah/bind/mount_unsupported.go | 13 + .../containers/buildah/bind/util.go | 27 + .../containers/buildah/btrfs_installed_tag.sh | 7 + .../containers/buildah/btrfs_tag.sh | 7 + .../github.com/containers/buildah/buildah.go | 526 ++ .../containers/buildah/changelog.txt | 2300 +++++++ .../containers/buildah/chroot/run.go | 1421 ++++ .../containers/buildah/chroot/seccomp.go | 200 + .../buildah/chroot/seccomp_unsupported.go | 23 + .../containers/buildah/chroot/selinux.go | 22 + .../buildah/chroot/selinux_unsupported.go | 18 + .../containers/buildah/chroot/unsupported.go | 15 + .../github.com/containers/buildah/commit.go | 416 ++ .../github.com/containers/buildah/common.go | 88 + .../github.com/containers/buildah/config.go | 680 ++ .../containers/buildah/copier/copier.go | 1897 +++++ .../containers/buildah/copier/syscall_unix.go | 95 + .../buildah/copier/syscall_windows.go | 88 + .../containers/buildah/copier/unwrap_112.go | 11 + .../containers/buildah/copier/unwrap_113.go | 18 + .../containers/buildah/copier/xattrs.go | 97 + .../buildah/copier/xattrs_unsupported.go | 15 + .../containers/buildah/define/build.go | 254 + .../containers/buildah/define/isolation.go | 32 + .../containers/buildah/define/namespace.go | 87 + .../containers/buildah/define/pull.go | 50 + .../containers/buildah/define/types.go | 224 + .../containers/buildah/define/types_unix.go | 9 + .../buildah/define/types_unsupported.go | 6 + .../github.com/containers/buildah/delete.go | 17 + .../containers/buildah/developmentplan.md | 13 + .../github.com/containers/buildah/digester.go | 269 + .../containers/buildah/docker/AUTHORS | 1788 +++++ .../containers/buildah/docker/types.go | 258 + vendor/github.com/containers/buildah/go.mod | 48 + vendor/github.com/containers/buildah/go.sum | 1613 +++++ vendor/github.com/containers/buildah/image.go | 815 +++ .../containers/buildah/imagebuildah/build.go | 651 ++ .../buildah/imagebuildah/executor.go | 808 +++ .../buildah/imagebuildah/stage_executor.go | 1572 +++++ .../containers/buildah/imagebuildah/util.go | 12 + .../github.com/containers/buildah/import.go | 174 + vendor/github.com/containers/buildah/info.go | 222 + .../github.com/containers/buildah/install.md | 481 ++ .../buildah/internal/parse/parse.go | 408 ++ .../containers/buildah/internal/types.go | 11 + .../containers/buildah/internal/util/util.go | 24 + .../containers/buildah/libdm_tag.sh | 15 + vendor/github.com/containers/buildah/mount.go | 53 + vendor/github.com/containers/buildah/new.go | 338 + .../buildah/pkg/blobcache/blobcache.go | 568 ++ .../containers/buildah/pkg/chrootuser/user.go | 116 + .../buildah/pkg/chrootuser/user_basic.go | 31 + .../buildah/pkg/chrootuser/user_linux.go | 297 + .../containers/buildah/pkg/cli/common.go | 438 ++ .../containers/buildah/pkg/cli/exec_codes.go | 13 + .../buildah/pkg/completion/completion.go | 23 + .../containers/buildah/pkg/formats/formats.go | 167 + .../buildah/pkg/formats/templates.go | 78 + .../containers/buildah/pkg/overlay/overlay.go | 311 + .../containers/buildah/pkg/parse/parse.go | 1076 +++ .../buildah/pkg/parse/parse_unix.go | 50 + .../buildah/pkg/parse/parse_unsupported.go | 16 + .../containers/buildah/pkg/rusage/rusage.go | 48 + .../buildah/pkg/rusage/rusage_unix.go | 35 + .../buildah/pkg/rusage/rusage_unsupported.go | 18 + .../buildah/pkg/sshagent/sshagent.go | 231 + .../containers/buildah/pkg/util/util.go | 81 + vendor/github.com/containers/buildah/pull.go | 96 + vendor/github.com/containers/buildah/push.go | 135 + .../github.com/containers/buildah/release.sh | 107 + vendor/github.com/containers/buildah/run.go | 176 + .../containers/buildah/run_linux.go | 2871 ++++++++ .../github.com/containers/buildah/run_unix.go | 31 + .../containers/buildah/run_unsupported.go | 27 + .../github.com/containers/buildah/seccomp.go | 35 + .../containers/buildah/seccomp_unsupported.go | 15 + .../github.com/containers/buildah/selinux.go | 41 + .../containers/buildah/selinux_tag.sh | 4 + .../containers/buildah/selinux_unsupported.go | 18 + .../containers/buildah/troubleshooting.md | 158 + .../github.com/containers/buildah/unmount.go | 19 + vendor/github.com/containers/buildah/util.go | 227 + .../containers/buildah/util/types.go | 20 + .../containers/buildah/util/util.go | 485 ++ .../containers/buildah/util/util_linux.go | 20 + .../buildah/util/util_not_uint64.go | 14 + .../containers/buildah/util/util_uint64.go | 14 + .../containers/buildah/util/util_unix.go | 39 + .../buildah/util/util_unsupported.go | 8 + .../containers/buildah/util/util_windows.go | 24 + vendor/github.com/containers/common/LICENSE | 201 + .../containers/common/libimage/copier.go | 441 ++ .../containers/common/libimage/disk_usage.go | 130 + .../containers/common/libimage/events.go | 62 + .../containers/common/libimage/filters.go | 382 + .../containers/common/libimage/history.go | 80 + .../containers/common/libimage/image.go | 949 +++ .../common/libimage/image_config.go | 241 + .../containers/common/libimage/image_tree.go | 117 + .../containers/common/libimage/import.go | 127 + .../containers/common/libimage/inspect.go | 233 + .../containers/common/libimage/layer_tree.go | 299 + .../containers/common/libimage/load.go | 133 + .../common/libimage/manifest_list.go | 401 ++ .../common/libimage/manifests/copy.go | 15 + .../common/libimage/manifests/manifests.go | 430 ++ .../containers/common/libimage/normalize.go | 181 + .../containers/common/libimage/oci.go | 97 + .../containers/common/libimage/pull.go | 634 ++ .../containers/common/libimage/push.go | 89 + .../containers/common/libimage/runtime.go | 739 ++ .../containers/common/libimage/save.go | 227 + .../containers/common/libimage/search.go | 324 + .../common/libnetwork/cni/README.md | 10 + .../common/libnetwork/cni/cni.coverprofile | 483 ++ .../common/libnetwork/cni/cni_conversion.go | 397 ++ .../common/libnetwork/cni/cni_exec.go | 110 + .../common/libnetwork/cni/cni_types.go | 281 + .../common/libnetwork/cni/config.go | 208 + .../common/libnetwork/cni/network.go | 275 + .../containers/common/libnetwork/cni/run.go | 274 + .../common/libnetwork/internal/util/bridge.go | 68 + .../common/libnetwork/internal/util/create.go | 41 + .../libnetwork/internal/util/interface.go | 19 + .../libnetwork/internal/util/interfaces.go | 34 + .../common/libnetwork/internal/util/ip.go | 70 + .../common/libnetwork/internal/util/parse.go | 37 + .../common/libnetwork/internal/util/util.go | 123 + .../libnetwork/internal/util/validate.go | 124 + .../common/libnetwork/netavark/config.go | 249 + .../common/libnetwork/netavark/const.go | 5 + .../common/libnetwork/netavark/exec.go | 161 + .../common/libnetwork/netavark/ipam.go | 372 + .../common/libnetwork/netavark/network.go | 309 + .../common/libnetwork/netavark/run.go | 132 + .../common/libnetwork/network/interface.go | 203 + .../common/libnetwork/types/const.go | 47 + .../common/libnetwork/types/define.go | 25 + .../common/libnetwork/types/network.go | 282 + .../common/libnetwork/util/filters.go | 80 + .../containers/common/libnetwork/util/ip.go | 56 + .../common/libnetwork/util/ip_calc.go | 53 + .../common/pkg/apparmor/apparmor.go | 22 + .../common/pkg/apparmor/apparmor_linux.go | 301 + .../pkg/apparmor/apparmor_linux_template.go | 49 + .../pkg/apparmor/apparmor_unsupported.go | 31 + .../apparmor/internal/supported/supported.go | 113 + .../containers/common/pkg/auth/auth.go | 333 + .../containers/common/pkg/auth/cli.go | 80 + .../common/pkg/capabilities/capabilities.go | 209 + .../containers/common/pkg/cgroups/blkio.go | 149 + .../containers/common/pkg/cgroups/cgroups.go | 671 ++ .../common/pkg/cgroups/cgroups_supported.go | 129 + .../common/pkg/cgroups/cgroups_unsupported.go | 22 + .../containers/common/pkg/cgroups/cpu.go | 160 + .../containers/common/pkg/cgroups/cpuset.go | 85 + .../containers/common/pkg/cgroups/memory.go | 66 + .../containers/common/pkg/cgroups/pids.go | 69 + .../containers/common/pkg/cgroups/systemd.go | 80 + .../common/pkg/cgroupv2/cgroups_linux.go | 27 + .../pkg/cgroupv2/cgroups_unsupported.go | 8 + .../containers/common/pkg/chown/chown.go | 65 + .../containers/common/pkg/chown/chown_unix.go | 66 + .../common/pkg/chown/chown_windows.go | 11 + .../common/pkg/completion/completion.go | 155 + .../containers/common/pkg/config/config.go | 1228 ++++ .../common/pkg/config/config_darwin.go | 30 + .../common/pkg/config/config_linux.go | 44 + .../common/pkg/config/config_local.go | 115 + .../common/pkg/config/config_remote.go | 33 + .../common/pkg/config/config_unsupported.go | 7 + .../common/pkg/config/config_windows.go | 19 + .../common/pkg/config/containers.conf | 607 ++ .../containers/common/pkg/config/default.go | 567 ++ .../common/pkg/config/default_linux.go | 50 + .../common/pkg/config/default_unsupported.go | 24 + .../common/pkg/config/default_windows.go | 22 + .../containers/common/pkg/config/nosystemd.go | 28 + .../common/pkg/config/pull_policy.go | 95 + .../containers/common/pkg/config/systemd.go | 87 + .../common/pkg/download/download.go | 31 + .../containers/common/pkg/filters/filters.go | 118 + .../containers/common/pkg/manifests/errors.go | 16 + .../common/pkg/manifests/manifests.go | 495 ++ .../containers/common/pkg/parse/parse.go | 182 + .../containers/common/pkg/parse/parse_unix.go | 50 + .../containers/common/pkg/retry/retry.go | 108 + .../common/pkg/retry/retry_linux.go | 9 + .../common/pkg/retry/retry_unsupported.go | 7 + .../common/pkg/seccomp/conversion.go | 197 + .../common/pkg/seccomp/default_linux.go | 886 +++ .../common/pkg/seccomp/errno_list.go | 93 + .../containers/common/pkg/seccomp/filter.go | 237 + .../common/pkg/seccomp/seccomp.json | 1041 +++ .../common/pkg/seccomp/seccomp_linux.go | 218 + .../common/pkg/seccomp/seccomp_unsupported.go | 46 + .../common/pkg/seccomp/supported.go | 49 + .../containers/common/pkg/seccomp/types.go | 117 + .../containers/common/pkg/seccomp/validate.go | 29 + .../common/pkg/signal/signal_common.go | 41 + .../common/pkg/signal/signal_linux.go | 108 + .../common/pkg/signal/signal_linux_mipsx.go | 108 + .../common/pkg/signal/signal_unsupported.go | 99 + .../common/pkg/subscriptions/mounts.conf | 1 + .../common/pkg/subscriptions/subscriptions.go | 386 ++ .../common/pkg/supplemented/errors.go | 17 + .../common/pkg/supplemented/supplemented.go | 402 ++ .../common/pkg/timetype/timestamp.go | 131 + .../containers/common/pkg/umask/umask_unix.go | 20 + .../common/pkg/umask/umask_unsupported.go | 7 + .../containers/common/pkg/util/util.go | 24 + .../common/pkg/util/util_supported.go | 80 + .../common/pkg/util/util_windows.go | 12 + .../containers/common/version/version.go | 4 + vendor/github.com/containers/image/v5/LICENSE | 189 + .../containers/image/v5/copy/copy.go | 1782 +++++ .../image/v5/copy/digesting_reader.go | 62 + .../containers/image/v5/copy/encrypt.go | 24 + .../containers/image/v5/copy/manifest.go | 170 + .../image/v5/copy/progress_reader.go | 104 + .../containers/image/v5/copy/sign.go | 31 + .../image/v5/directory/directory_dest.go | 295 + .../image/v5/directory/directory_src.go | 96 + .../image/v5/directory/directory_transport.go | 189 + .../v5/directory/explicitfilepath/path.go | 56 + .../image/v5/docker/archive/dest.go | 81 + .../image/v5/docker/archive/reader.go | 120 + .../containers/image/v5/docker/archive/src.go | 42 + .../image/v5/docker/archive/transport.go | 211 + .../image/v5/docker/archive/writer.go | 82 + .../containers/image/v5/docker/cache.go | 23 + .../image/v5/docker/daemon/client.go | 85 + .../image/v5/docker/daemon/daemon_dest.go | 154 + .../image/v5/docker/daemon/daemon_src.go | 53 + .../v5/docker/daemon/daemon_transport.go | 223 + .../image/v5/docker/docker_client.go | 820 +++ .../image/v5/docker/docker_image.go | 153 + .../image/v5/docker/docker_image_dest.go | 706 ++ .../image/v5/docker/docker_image_src.go | 765 ++ .../image/v5/docker/docker_transport.go | 168 + .../containers/image/v5/docker/errors.go | 61 + .../image/v5/docker/internal/tarfile/dest.go | 200 + .../v5/docker/internal/tarfile/reader.go | 269 + .../image/v5/docker/internal/tarfile/src.go | 331 + .../image/v5/docker/internal/tarfile/types.go | 28 + .../v5/docker/internal/tarfile/writer.go | 381 + .../containers/image/v5/docker/lookaside.go | 236 + .../v5/docker/policyconfiguration/naming.go | 77 + .../image/v5/docker/reference/README.md | 2 + .../image/v5/docker/reference/helpers.go | 42 + .../image/v5/docker/reference/normalize.go | 181 + .../image/v5/docker/reference/reference.go | 433 ++ .../image/v5/docker/reference/regexp.go | 143 + .../image/v5/docker/wwwauthenticate.go | 159 + .../containers/image/v5/image/docker_list.go | 34 + .../image/v5/image/docker_schema1.go | 248 + .../image/v5/image/docker_schema2.go | 400 ++ .../containers/image/v5/image/manifest.go | 113 + .../containers/image/v5/image/memory.go | 64 + .../containers/image/v5/image/oci.go | 248 + .../containers/image/v5/image/oci_index.go | 34 + .../containers/image/v5/image/sourced.go | 104 + .../containers/image/v5/image/unparsed.go | 95 + .../internal/blobinfocache/blobinfocache.go | 64 + .../image/v5/internal/blobinfocache/types.go | 45 + .../image/v5/internal/iolimits/iolimits.go | 60 + .../internal/pkg/platform/platform_matcher.go | 196 + .../internal/putblobdigest/put_blob_digest.go | 57 + .../image/v5/internal/rootless/rootless.go | 25 + .../v5/internal/streamdigest/stream_digest.go | 41 + .../image/v5/internal/tmpdir/tmpdir.go | 34 + .../image/v5/internal/types/types.go | 91 + .../v5/internal/uploadreader/upload_reader.go | 61 + .../containers/image/v5/manifest/common.go | 211 + .../image/v5/manifest/docker_schema1.go | 320 + .../image/v5/manifest/docker_schema2.go | 297 + .../image/v5/manifest/docker_schema2_list.go | 221 + .../containers/image/v5/manifest/list.go | 80 + .../containers/image/v5/manifest/manifest.go | 270 + .../containers/image/v5/manifest/oci.go | 220 + .../containers/image/v5/manifest/oci_index.go | 233 + .../image/v5/oci/archive/oci_dest.go | 168 + .../image/v5/oci/archive/oci_src.go | 122 + .../image/v5/oci/archive/oci_transport.go | 198 + .../image/v5/oci/internal/oci_util.go | 126 + .../image/v5/oci/layout/oci_dest.go | 347 + .../containers/image/v5/oci/layout/oci_src.go | 209 + .../image/v5/oci/layout/oci_transport.go | 264 + .../image/v5/openshift/openshift-copies.go | 1194 ++++ .../image/v5/openshift/openshift.go | 584 ++ .../image/v5/openshift/openshift_transport.go | 157 + .../containers/image/v5/ostree/ostree_dest.go | 519 ++ .../containers/image/v5/ostree/ostree_src.go | 431 ++ .../image/v5/ostree/ostree_transport.go | 253 + .../v5/pkg/blobinfocache/boltdb/boltdb.go | 393 ++ .../image/v5/pkg/blobinfocache/default.go | 66 + .../internal/prioritize/prioritize.go | 110 + .../v5/pkg/blobinfocache/memory/memory.go | 186 + .../image/v5/pkg/blobinfocache/none/none.go | 50 + .../image/v5/pkg/compression/compression.go | 167 + .../v5/pkg/compression/internal/types.go | 65 + .../image/v5/pkg/compression/types/types.go | 41 + .../image/v5/pkg/compression/zstd.go | 59 + .../image/v5/pkg/docker/config/config.go | 802 +++ .../image/v5/pkg/shortnames/shortnames.go | 477 ++ .../image/v5/pkg/strslice/README.md | 1 + .../image/v5/pkg/strslice/strslice.go | 30 + .../v5/pkg/sysregistriesv2/shortnames.go | 347 + .../sysregistriesv2/system_registries_v2.go | 1000 +++ .../v5/pkg/tlsclientconfig/tlsclientconfig.go | 111 + .../containers/image/v5/sif/load.go | 211 + .../github.com/containers/image/v5/sif/src.go | 217 + .../containers/image/v5/sif/transport.go | 164 + .../containers/image/v5/signature/docker.go | 89 + .../containers/image/v5/signature/json.go | 88 + .../image/v5/signature/mechanism.go | 96 + .../image/v5/signature/mechanism_gpgme.go | 203 + .../image/v5/signature/mechanism_openpgp.go | 172 + .../image/v5/signature/policy_config.go | 777 +++ .../image/v5/signature/policy_eval.go | 288 + .../v5/signature/policy_eval_baselayer.go | 20 + .../v5/signature/policy_eval_signedby.go | 130 + .../image/v5/signature/policy_eval_simple.go | 29 + .../v5/signature/policy_reference_match.go | 154 + .../image/v5/signature/policy_types.go | 163 + .../image/v5/signature/signature.go | 287 + .../image/v5/storage/storage_image.go | 1347 ++++ .../image/v5/storage/storage_reference.go | 288 + .../image/v5/storage/storage_transport.go | 388 ++ .../containers/image/v5/tarball/doc.go | 60 + .../image/v5/tarball/tarball_reference.go | 93 + .../image/v5/tarball/tarball_src.go | 274 + .../image/v5/tarball/tarball_transport.go | 75 + .../transports/alltransports/alltransports.go | 48 + .../transports/alltransports/docker_daemon.go | 9 + .../alltransports/docker_daemon_stub.go | 10 + .../v5/transports/alltransports/ostree.go | 9 + .../transports/alltransports/ostree_stub.go | 10 + .../v5/transports/alltransports/storage.go | 9 + .../transports/alltransports/storage_stub.go | 10 + .../containers/image/v5/transports/stub.go | 36 + .../image/v5/transports/transports.go | 90 + .../containers/image/v5/types/types.go | 694 ++ .../containers/image/v5/version/version.go | 18 + .../containers/libtrust/CONTRIBUTING.md | 13 + vendor/github.com/containers/libtrust/LICENSE | 191 + .../containers/libtrust/MAINTAINERS | 3 + .../github.com/containers/libtrust/README.md | 22 + .../containers/libtrust/certificates.go | 175 + vendor/github.com/containers/libtrust/doc.go | 9 + .../github.com/containers/libtrust/ec_key.go | 422 ++ .../containers/libtrust/ec_key_no_openssl.go | 23 + .../containers/libtrust/ec_key_openssl.go | 24 + .../github.com/containers/libtrust/filter.go | 50 + vendor/github.com/containers/libtrust/hash.go | 56 + .../containers/libtrust/jsonsign.go | 657 ++ vendor/github.com/containers/libtrust/key.go | 253 + .../containers/libtrust/key_files.go | 255 + .../containers/libtrust/key_manager.go | 175 + .../github.com/containers/libtrust/rsa_key.go | 427 ++ vendor/github.com/containers/libtrust/util.go | 363 + .../containers/ocicrypt/.travis.yml | 29 + .../containers/ocicrypt/ADOPTERS.md | 10 + .../containers/ocicrypt/CODE-OF-CONDUCT.md | 3 + vendor/github.com/containers/ocicrypt/LICENSE | 189 + .../containers/ocicrypt/MAINTAINERS | 6 + .../github.com/containers/ocicrypt/Makefile | 34 + .../github.com/containers/ocicrypt/README.md | 50 + .../containers/ocicrypt/SECURITY.md | 3 + .../ocicrypt/blockcipher/blockcipher.go | 160 + .../blockcipher/blockcipher_aes_ctr.go | 193 + .../containers/ocicrypt/config/config.go | 114 + .../ocicrypt/config/constructors.go | 245 + .../config/keyprovider-config/config.go | 81 + .../ocicrypt/config/pkcs11config/config.go | 124 + .../ocicrypt/crypto/pkcs11/common.go | 134 + .../ocicrypt/crypto/pkcs11/pkcs11helpers.go | 485 ++ .../crypto/pkcs11/pkcs11helpers_nocgo.go | 31 + .../ocicrypt/crypto/pkcs11/utils.go | 114 + .../containers/ocicrypt/encryption.go | 350 + vendor/github.com/containers/ocicrypt/go.mod | 21 + vendor/github.com/containers/ocicrypt/go.sum | 117 + vendor/github.com/containers/ocicrypt/gpg.go | 425 ++ .../containers/ocicrypt/gpgvault.go | 100 + .../ocicrypt/helpers/parse_helpers.go | 380 + .../ocicrypt/keywrap/jwe/keywrapper_jwe.go | 136 + .../keywrap/keyprovider/keyprovider.go | 242 + .../containers/ocicrypt/keywrap/keywrap.go | 48 + .../ocicrypt/keywrap/pgp/keywrapper_gpg.go | 273 + .../keywrap/pkcs11/keywrapper_pkcs11.go | 147 + .../keywrap/pkcs7/keywrapper_pkcs7.go | 136 + .../github.com/containers/ocicrypt/reader.go | 40 + .../containers/ocicrypt/spec/spec.go | 12 + .../ocicrypt/utils/delayedreader.go | 109 + .../containers/ocicrypt/utils/ioutils.go | 56 + .../utils/keyprovider/keyprovider.pb.go | 243 + .../utils/keyprovider/keyprovider.proto | 17 + .../containers/ocicrypt/utils/testing.go | 166 + .../containers/ocicrypt/utils/utils.go | 250 + .../github.com/containers/storage/.cirrus.yml | 177 + .../containers/storage/.dockerignore | 3 + .../github.com/containers/storage/.gitignore | 32 + .../containers/storage/.golangci.yml | 37 + vendor/github.com/containers/storage/.mailmap | 254 + vendor/github.com/containers/storage/AUTHORS | 1523 ++++ .../containers/storage/CODE-OF-CONDUCT.md | 3 + .../containers/storage/CONTRIBUTING.md | 144 + vendor/github.com/containers/storage/LICENSE | 191 + vendor/github.com/containers/storage/Makefile | 125 + vendor/github.com/containers/storage/NOTICE | 19 + .../github.com/containers/storage/README.md | 46 + .../github.com/containers/storage/SECURITY.md | 3 + vendor/github.com/containers/storage/VERSION | 1 + .../containers/storage/containers.go | 657 ++ .../containers/storage/drivers/aufs/aufs.go | 766 +++ .../containers/storage/drivers/aufs/dirs.go | 64 + .../containers/storage/drivers/aufs/mount.go | 21 + .../storage/drivers/aufs/mount_linux.go | 7 + .../storage/drivers/aufs/mount_unsupported.go | 12 + .../containers/storage/drivers/btrfs/btrfs.go | 682 ++ .../drivers/btrfs/dummy_unsupported.go | 3 + .../storage/drivers/btrfs/version.go | 26 + .../storage/drivers/btrfs/version_none.go | 14 + .../containers/storage/drivers/chown.go | 133 + .../containers/storage/drivers/chown_unix.go | 75 + .../storage/drivers/chown_windows.go | 14 + .../containers/storage/drivers/chroot_unix.go | 21 + .../storage/drivers/chroot_windows.go | 15 + .../storage/drivers/copy/copy_linux.go | 306 + .../storage/drivers/copy/copy_unsupported.go | 40 + .../containers/storage/drivers/counter.go | 63 + .../storage/drivers/devmapper/device_setup.go | 248 + .../storage/drivers/devmapper/deviceset.go | 2869 ++++++++ .../drivers/devmapper/devmapper_doc.go | 108 + .../storage/drivers/devmapper/driver.go | 273 + .../storage/drivers/devmapper/jsoniter.go | 5 + .../storage/drivers/devmapper/mount.go | 88 + .../containers/storage/drivers/driver.go | 408 ++ .../storage/drivers/driver_freebsd.go | 21 + .../storage/drivers/driver_linux.go | 194 + .../storage/drivers/driver_solaris.go | 96 + .../storage/drivers/driver_unsupported.go | 15 + .../storage/drivers/driver_windows.go | 14 + .../containers/storage/drivers/fsdiff.go | 220 + .../containers/storage/drivers/jsoniter.go | 5 + .../storage/drivers/overlay/check.go | 220 + .../storage/drivers/overlay/check_115.go | 42 + .../storage/drivers/overlay/check_116.go | 42 + .../storage/drivers/overlay/jsoniter.go | 5 + .../storage/drivers/overlay/mount.go | 88 + .../storage/drivers/overlay/overlay.go | 2103 ++++++ .../storage/drivers/overlay/overlay_cgo.go | 21 + .../storage/drivers/overlay/overlay_nocgo.go | 16 + .../drivers/overlay/overlay_unsupported.go | 7 + .../storage/drivers/overlay/randomid.go | 81 + .../drivers/overlayutils/overlayutils.go | 20 + .../storage/drivers/quota/projectquota.go | 404 ++ .../drivers/quota/projectquota_unsupported.go | 33 + .../storage/drivers/register/register_aufs.go | 8 + .../drivers/register/register_btrfs.go | 8 + .../drivers/register/register_devicemapper.go | 8 + .../drivers/register/register_overlay.go | 8 + .../storage/drivers/register/register_vfs.go | 6 + .../drivers/register/register_windows.go | 6 + .../storage/drivers/register/register_zfs.go | 8 + .../containers/storage/drivers/template.go | 52 + .../storage/drivers/vfs/copy_linux.go | 7 + .../storage/drivers/vfs/copy_unsupported.go | 9 + .../containers/storage/drivers/vfs/driver.go | 299 + .../drivers/windows/jsoniter_windows.go | 5 + .../storage/drivers/windows/windows.go | 1006 +++ .../storage/drivers/zfs/MAINTAINERS | 2 + .../containers/storage/drivers/zfs/zfs.go | 512 ++ .../storage/drivers/zfs/zfs_freebsd.go | 39 + .../storage/drivers/zfs/zfs_linux.go | 29 + .../storage/drivers/zfs/zfs_unsupported.go | 11 + .../github.com/containers/storage/errors.go | 63 + vendor/github.com/containers/storage/go.mod | 34 + vendor/github.com/containers/storage/go.sum | 1069 +++ vendor/github.com/containers/storage/idset.go | 220 + .../github.com/containers/storage/images.go | 843 +++ .../github.com/containers/storage/jsoniter.go | 5 + .../github.com/containers/storage/layers.go | 1895 +++++ .../containers/storage/lockfile_compat.go | 15 + .../containers/storage/pkg/archive/README.md | 1 + .../containers/storage/pkg/archive/archive.go | 1520 ++++ .../storage/pkg/archive/archive_110.go | 22 + .../storage/pkg/archive/archive_19.go | 13 + .../storage/pkg/archive/archive_freebsd.go | 125 + .../storage/pkg/archive/archive_linux.go | 191 + .../storage/pkg/archive/archive_other.go | 11 + .../storage/pkg/archive/archive_unix.go | 120 + .../storage/pkg/archive/archive_windows.go | 79 + .../storage/pkg/archive/archive_zstd.go | 41 + .../containers/storage/pkg/archive/changes.go | 500 ++ .../storage/pkg/archive/changes_linux.go | 401 ++ .../storage/pkg/archive/changes_other.go | 99 + .../storage/pkg/archive/changes_unix.go | 50 + .../storage/pkg/archive/changes_windows.go | 30 + .../containers/storage/pkg/archive/copy.go | 460 ++ .../storage/pkg/archive/copy_unix.go | 11 + .../storage/pkg/archive/copy_windows.go | 9 + .../containers/storage/pkg/archive/diff.go | 257 + .../storage/pkg/archive/time_linux.go | 16 + .../storage/pkg/archive/time_unsupported.go | 16 + .../storage/pkg/archive/whiteouts.go | 23 + .../containers/storage/pkg/archive/wrap.go | 59 + .../storage/pkg/chrootarchive/archive.go | 186 + .../storage/pkg/chrootarchive/archive_unix.go | 207 + .../pkg/chrootarchive/archive_windows.go | 29 + .../storage/pkg/chrootarchive/chroot_linux.go | 114 + .../storage/pkg/chrootarchive/chroot_unix.go | 16 + .../storage/pkg/chrootarchive/diff.go | 23 + .../storage/pkg/chrootarchive/diff_unix.go | 129 + .../storage/pkg/chrootarchive/diff_windows.go | 45 + .../storage/pkg/chrootarchive/init_unix.go | 29 + .../storage/pkg/chrootarchive/init_windows.go | 4 + .../storage/pkg/chrootarchive/jsoniter.go | 5 + .../storage/pkg/chunked/cache_linux.go | 630 ++ .../storage/pkg/chunked/compression.go | 277 + .../pkg/chunked/compressor/compressor.go | 454 ++ .../storage/pkg/chunked/compressor/rollsum.go | 81 + .../pkg/chunked/internal/compression.go | 184 + .../containers/storage/pkg/chunked/storage.go | 26 + .../storage/pkg/chunked/storage_linux.go | 1774 +++++ .../pkg/chunked/storage_unsupported.go | 16 + .../containers/storage/pkg/config/config.go | 337 + .../storage/pkg/devicemapper/devmapper.go | 821 +++ .../storage/pkg/devicemapper/devmapper_log.go | 121 + .../pkg/devicemapper/devmapper_wrapper.go | 252 + .../devmapper_wrapper_deferred_remove.go | 31 + .../devicemapper/devmapper_wrapper_dynamic.go | 6 + .../devmapper_wrapper_no_deferred_remove.go | 15 + .../devicemapper/devmapper_wrapper_static.go | 6 + .../storage/pkg/devicemapper/ioctl.go | 28 + .../storage/pkg/devicemapper/log.go | 11 + .../storage/pkg/directory/directory.go | 32 + .../storage/pkg/directory/directory_unix.go | 60 + .../pkg/directory/directory_windows.go | 49 + .../storage/pkg/dmesg/dmesg_linux.go | 20 + .../storage/pkg/fileutils/fileutils.go | 372 + .../storage/pkg/fileutils/fileutils_darwin.go | 27 + .../pkg/fileutils/fileutils_solaris.go | 7 + .../storage/pkg/fileutils/fileutils_unix.go | 22 + .../pkg/fileutils/fileutils_windows.go | 7 + .../storage/pkg/fsutils/fsutils_linux.go | 88 + .../containers/storage/pkg/homedir/homedir.go | 52 + .../storage/pkg/homedir/homedir_others.go | 20 + .../storage/pkg/homedir/homedir_unix.go | 95 + .../storage/pkg/homedir/homedir_windows.go | 32 + .../containers/storage/pkg/idtools/idtools.go | 358 + .../storage/pkg/idtools/idtools_supported.go | 71 + .../storage/pkg/idtools/idtools_unix.go | 213 + .../pkg/idtools/idtools_unsupported.go | 11 + .../storage/pkg/idtools/idtools_windows.go | 23 + .../containers/storage/pkg/idtools/parser.go | 59 + .../storage/pkg/idtools/usergroupadd_linux.go | 164 + .../pkg/idtools/usergroupadd_unsupported.go | 12 + .../storage/pkg/idtools/utils_unix.go | 32 + .../containers/storage/pkg/ioutils/buffer.go | 51 + .../storage/pkg/ioutils/bytespipe.go | 186 + .../storage/pkg/ioutils/fswriters.go | 195 + .../storage/pkg/ioutils/fswriters_linux.go | 11 + .../pkg/ioutils/fswriters_unsupported.go | 11 + .../containers/storage/pkg/ioutils/readers.go | 171 + .../storage/pkg/ioutils/temp_unix.go | 10 + .../storage/pkg/ioutils/temp_windows.go | 18 + .../storage/pkg/ioutils/writeflusher.go | 92 + .../containers/storage/pkg/ioutils/writers.go | 66 + .../containers/storage/pkg/locker/README.md | 65 + .../containers/storage/pkg/locker/locker.go | 112 + .../storage/pkg/lockfile/lockfile.go | 107 + .../storage/pkg/lockfile/lockfile_unix.go | 262 + .../storage/pkg/lockfile/lockfile_windows.go | 75 + .../storage/pkg/longpath/longpath.go | 26 + .../storage/pkg/loopback/attach_loopback.go | 157 + .../containers/storage/pkg/loopback/ioctl.go | 53 + .../storage/pkg/loopback/loop_wrapper.go | 52 + .../storage/pkg/loopback/loopback.go | 63 + .../pkg/loopback/loopback_unsupported.go | 1 + .../containers/storage/pkg/mount/flags.go | 149 + .../storage/pkg/mount/flags_linux.go | 87 + .../storage/pkg/mount/flags_unsupported.go | 31 + .../containers/storage/pkg/mount/mount.go | 110 + .../storage/pkg/mount/mounter_freebsd.go | 54 + .../storage/pkg/mount/mounter_linux.go | 74 + .../storage/pkg/mount/mounter_unsupported.go | 7 + .../containers/storage/pkg/mount/mountinfo.go | 13 + .../storage/pkg/mount/mountinfo_linux.go | 5 + .../storage/pkg/mount/sharedsubtree_linux.go | 64 + .../storage/pkg/mount/unmount_unix.go | 22 + .../storage/pkg/mount/unmount_unsupported.go | 7 + .../storage/pkg/parsers/kernel/kernel.go | 74 + .../pkg/parsers/kernel/kernel_darwin.go | 56 + .../storage/pkg/parsers/kernel/kernel_unix.go | 45 + .../pkg/parsers/kernel/kernel_windows.go | 70 + .../storage/pkg/parsers/kernel/uname_linux.go | 17 + .../pkg/parsers/kernel/uname_solaris.go | 14 + .../pkg/parsers/kernel/uname_unsupported.go | 18 + .../containers/storage/pkg/parsers/parsers.go | 69 + .../containers/storage/pkg/pools/pools.go | 119 + .../containers/storage/pkg/promise/promise.go | 11 + .../containers/storage/pkg/reexec/README.md | 5 + .../storage/pkg/reexec/command_linux.go | 34 + .../storage/pkg/reexec/command_unix.go | 32 + .../storage/pkg/reexec/command_unsupported.go | 20 + .../storage/pkg/reexec/command_windows.go | 34 + .../containers/storage/pkg/reexec/reexec.go | 66 + .../containers/storage/pkg/stringid/README.md | 1 + .../storage/pkg/stringid/stringid.go | 99 + .../storage/pkg/stringutils/README.md | 1 + .../storage/pkg/stringutils/stringutils.go | 110 + .../containers/storage/pkg/system/chmod.go | 17 + .../containers/storage/pkg/system/chtimes.go | 35 + .../storage/pkg/system/chtimes_unix.go | 14 + .../storage/pkg/system/chtimes_windows.go | 28 + .../containers/storage/pkg/system/errors.go | 10 + .../containers/storage/pkg/system/exitcode.go | 33 + .../containers/storage/pkg/system/init.go | 22 + .../storage/pkg/system/init_windows.go | 17 + .../containers/storage/pkg/system/lchown.go | 20 + .../storage/pkg/system/lcow_unix.go | 8 + .../storage/pkg/system/lcow_windows.go | 6 + .../storage/pkg/system/lstat_unix.go | 20 + .../storage/pkg/system/lstat_windows.go | 14 + .../containers/storage/pkg/system/meminfo.go | 17 + .../storage/pkg/system/meminfo_linux.go | 65 + .../storage/pkg/system/meminfo_solaris.go | 129 + .../storage/pkg/system/meminfo_unsupported.go | 8 + .../storage/pkg/system/meminfo_windows.go | 45 + .../containers/storage/pkg/system/mknod.go | 22 + .../storage/pkg/system/mknod_freebsd.go | 22 + .../storage/pkg/system/mknod_windows.go | 13 + .../containers/storage/pkg/system/path.go | 21 + .../storage/pkg/system/path_unix.go | 9 + .../storage/pkg/system/path_windows.go | 33 + .../storage/pkg/system/process_unix.go | 24 + .../containers/storage/pkg/system/rm.go | 79 + .../storage/pkg/system/stat_darwin.go | 13 + .../storage/pkg/system/stat_freebsd.go | 13 + .../storage/pkg/system/stat_linux.go | 19 + .../storage/pkg/system/stat_openbsd.go | 13 + .../storage/pkg/system/stat_solaris.go | 13 + .../storage/pkg/system/stat_unix.go | 74 + .../storage/pkg/system/stat_windows.go | 63 + .../storage/pkg/system/syscall_unix.go | 25 + .../storage/pkg/system/syscall_windows.go | 127 + .../containers/storage/pkg/system/umask.go | 13 + .../storage/pkg/system/umask_windows.go | 9 + .../storage/pkg/system/utimes_freebsd.go | 24 + .../storage/pkg/system/utimes_linux.go | 25 + .../storage/pkg/system/utimes_unsupported.go | 10 + .../storage/pkg/system/xattrs_linux.go | 87 + .../storage/pkg/system/xattrs_unsupported.go | 31 + .../storage/pkg/tarlog/tarlogger.go | 66 + .../storage/pkg/truncindex/truncindex.go | 139 + .../storage/pkg/unshare/getenv_linux_cgo.go | 22 + .../storage/pkg/unshare/getenv_linux_nocgo.go | 11 + .../containers/storage/pkg/unshare/unshare.c | 378 + .../containers/storage/pkg/unshare/unshare.go | 56 + .../storage/pkg/unshare/unshare_cgo.go | 10 + .../storage/pkg/unshare/unshare_gccgo.go | 25 + .../storage/pkg/unshare/unshare_linux.go | 605 ++ .../pkg/unshare/unshare_unsupported.go | 45 + .../pkg/unshare/unshare_unsupported_cgo.go | 10 + .../containers/storage/storage.conf | 210 + vendor/github.com/containers/storage/store.go | 3753 ++++++++++ .../storage/types/default_override_test.conf | 11 + .../containers/storage/types/errors.go | 58 + .../containers/storage/types/idmappings.go | 102 + .../containers/storage/types/options.go | 461 ++ .../storage/types/storage_broken.conf | 18 + .../storage/types/storage_test.conf | 35 + .../containers/storage/types/utils.go | 213 + .../github.com/containers/storage/userns.go | 302 + vendor/github.com/containers/storage/utils.go | 74 + .../coreos/go-systemd/v22/dbus/dbus.go | 261 + .../coreos/go-systemd/v22/dbus/methods.go | 830 +++ .../coreos/go-systemd/v22/dbus/properties.go | 237 + .../coreos/go-systemd/v22/dbus/set.go | 47 + .../go-systemd/v22/dbus/subscription.go | 333 + .../go-systemd/v22/dbus/subscription_set.go | 57 + .../cpuguy83/go-md2man/v2/md2man/roff.go | 105 +- .../cyphar/filepath-securejoin/.travis.yml | 8 +- .../cyphar/filepath-securejoin/README.md | 20 +- .../cyphar/filepath-securejoin/VERSION | 2 +- .../cyphar/filepath-securejoin/go.mod | 3 + .../cyphar/filepath-securejoin/join.go | 25 +- .../cyphar/filepath-securejoin/vendor.conf | 1 - .../disiqueira/gotree/v3/.gitignore | 137 + .../disiqueira/gotree/v3/.travis.yml | 11 + .../github.com/disiqueira/gotree/v3/LICENSE | 21 + .../github.com/disiqueira/gotree/v3/README.md | 104 + .../disiqueira/gotree/v3/_config.yml | 1 + vendor/github.com/disiqueira/gotree/v3/go.mod | 3 + .../disiqueira/gotree/v3/gotree-logo.png | Bin 0 -> 24183 bytes .../github.com/disiqueira/gotree/v3/gotree.go | 129 + .../docker/distribution/.golangci.yml | 20 + .../docker/distribution/.gometalinter.json | 16 - .../github.com/docker/distribution/.mailmap | 17 + .../docker/distribution/.travis.yml | 51 - .../github.com/docker/distribution/Dockerfile | 53 +- .../github.com/docker/distribution/Makefile | 2 +- .../github.com/docker/distribution/README.md | 2 +- .../github.com/docker/distribution/blobs.go | 2 +- .../docker/distribution/docker-bake.hcl | 65 + .../distribution/manifest/schema2/manifest.go | 2 +- .../docker/distribution/manifests.go | 2 +- .../distribution/reference/normalize.go | 29 + .../distribution/reference/reference.go | 2 +- .../registry/api/errcode/errors.go | 6 +- .../distribution/registry/api/v2/urls.go | 12 - .../client/auth/challenge/authchallenge.go | 4 +- .../registry/client/repository.go | 9 +- .../docker/distribution/vendor.conf | 4 +- .../docker/docker-credential-helpers/LICENSE | 20 + .../client/client.go | 121 + .../client/command.go | 57 + .../credentials/credentials.go | 186 + .../credentials/error.go | 102 + .../credentials/helper.go | 14 + .../credentials/version.go | 4 + vendor/github.com/docker/docker/AUTHORS | 153 +- vendor/github.com/docker/docker/api/common.go | 2 +- .../github.com/docker/docker/api/swagger.yaml | 551 +- .../docker/docker/api/types/client.go | 15 +- .../docker/api/types/container/host_config.go | 69 +- .../docker/docker/api/types/events/events.go | 47 +- .../docker/docker/api/types/types.go | 30 +- .../docker/api/types/versions/compare.go | 3 - .../docker/docker/client/build_cancel.go | 2 +- .../github.com/docker/docker/client/client.go | 15 + .../docker/docker/client/config_create.go | 2 +- .../docker/docker/client/config_inspect.go | 4 +- .../docker/docker/client/config_remove.go | 2 +- .../docker/docker/client/config_update.go | 2 +- .../docker/docker/client/container_commit.go | 2 +- .../docker/docker/client/container_copy.go | 2 +- .../docker/docker/client/container_create.go | 2 +- .../docker/docker/client/container_inspect.go | 4 +- .../docker/docker/client/container_restart.go | 2 +- .../docker/docker/client/container_update.go | 2 +- .../docker/docker/client/disk_usage.go | 19 +- .../docker/client/distribution_inspect.go | 2 +- .../docker/docker/client/image_build.go | 4 +- .../docker/docker/client/image_create.go | 2 +- .../docker/docker/client/image_import.go | 2 +- .../docker/docker/client/image_inspect.go | 4 +- .../docker/docker/client/image_search.go | 2 +- .../docker/docker/client/interface.go | 2 +- .../docker/docker/client/network_inspect.go | 4 +- .../docker/docker/client/node_inspect.go | 4 +- .../docker/docker/client/plugin_inspect.go | 4 +- .../docker/docker/client/request.go | 22 +- .../docker/docker/client/secret_create.go | 2 +- .../docker/docker/client/secret_inspect.go | 4 +- .../docker/docker/client/secret_remove.go | 2 +- .../docker/docker/client/secret_update.go | 2 +- .../docker/docker/client/service_create.go | 2 +- .../docker/docker/client/service_inspect.go | 4 +- .../docker/docker/client/task_inspect.go | 6 +- .../docker/docker/client/volume_inspect.go | 4 +- .../docker/docker/errdefs/http_helpers.go | 138 - .../docker/docker/pkg/archive/README.md | 1 + .../docker/docker/pkg/archive/archive.go | 1322 ++++ .../docker/pkg/archive/archive_linux.go | 100 + .../docker/pkg/archive/archive_other.go | 8 + .../docker/docker/pkg/archive/archive_unix.go | 116 + .../docker/pkg/archive/archive_windows.go | 67 + .../docker/docker/pkg/archive/changes.go | 445 ++ .../docker/pkg/archive/changes_linux.go | 286 + .../docker/pkg/archive/changes_other.go | 98 + .../docker/docker/pkg/archive/changes_unix.go | 44 + .../docker/pkg/archive/changes_windows.go | 34 + .../docker/docker/pkg/archive/copy.go | 480 ++ .../docker/docker/pkg/archive/copy_unix.go | 12 + .../docker/docker/pkg/archive/copy_windows.go | 9 + .../docker/docker/pkg/archive/diff.go | 260 + .../docker/docker/pkg/archive/time_linux.go | 16 + .../docker/pkg/archive/time_unsupported.go | 17 + .../docker/docker/pkg/archive/whiteouts.go | 23 + .../docker/docker/pkg/archive/wrap.go | 59 + .../docker/docker/pkg/fileutils/fileutils.go | 222 +- .../docker/pkg/fileutils/fileutils_unix.go | 3 +- .../docker/docker/pkg/idtools/idtools.go | 241 + .../docker/docker/pkg/idtools/idtools_unix.go | 296 + .../docker/pkg/idtools/idtools_windows.go | 25 + .../docker/pkg/idtools/usergroupadd_linux.go | 164 + .../pkg/idtools/usergroupadd_unsupported.go | 13 + .../docker/docker/pkg/idtools/utils_unix.go | 32 + .../docker/docker/pkg/ioutils/bytespipe.go | 14 +- .../docker/docker/pkg/ioutils/fswriters.go | 5 +- .../docker/docker/pkg/ioutils/readers.go | 16 +- .../docker/docker/pkg/ioutils/temp_unix.go | 6 +- .../docker/docker/pkg/ioutils/temp_windows.go | 6 +- .../docker/docker/pkg/pools/pools.go | 137 + .../docker/docker/pkg/stdcopy/stdcopy.go | 190 + .../docker/docker/pkg/system/args_windows.go | 16 + .../docker/docker/pkg/system/chtimes.go | 31 + .../docker/pkg/system/chtimes_nowindows.go | 15 + .../docker/pkg/system/chtimes_windows.go | 26 + .../docker/docker/pkg/system/errors.go | 13 + .../docker/docker/pkg/system/exitcode.go | 19 + .../docker/docker/pkg/system/filesys_unix.go | 68 + .../docker/pkg/system/filesys_windows.go | 292 + .../docker/docker/pkg/system/init.go | 22 + .../docker/docker/pkg/system/init_windows.go | 29 + .../docker/docker/pkg/system/lcow.go | 49 + .../docker/pkg/system/lcow_unsupported.go | 29 + .../docker/docker/pkg/system/lstat_unix.go | 21 + .../docker/docker/pkg/system/lstat_windows.go | 14 + .../docker/docker/pkg/system/meminfo.go | 17 + .../docker/docker/pkg/system/meminfo_linux.go | 71 + .../docker/pkg/system/meminfo_unsupported.go | 9 + .../docker/pkg/system/meminfo_windows.go | 45 + .../docker/docker/pkg/system/mknod.go | 17 + .../docker/docker/pkg/system/mknod_freebsd.go | 14 + .../docker/docker/pkg/system/mknod_unix.go | 14 + .../docker/docker/pkg/system/mknod_windows.go | 11 + .../docker/docker/pkg/system/path.go | 64 + .../docker/docker/pkg/system/path_unix.go | 11 + .../docker/docker/pkg/system/path_windows.go | 27 + .../docker/docker/pkg/system/process_unix.go | 45 + .../docker/pkg/system/process_windows.go | 18 + .../github.com/docker/docker/pkg/system/rm.go | 79 + .../docker/docker/pkg/system/rm_windows.go | 6 + .../docker/docker/pkg/system/stat_bsd.go | 16 + .../docker/docker/pkg/system/stat_darwin.go | 13 + .../docker/docker/pkg/system/stat_linux.go | 20 + .../docker/docker/pkg/system/stat_openbsd.go | 13 + .../docker/docker/pkg/system/stat_solaris.go | 13 + .../docker/docker/pkg/system/stat_unix.go | 67 + .../docker/docker/pkg/system/stat_windows.go | 49 + .../docker/docker/pkg/system/syscall_unix.go | 12 + .../docker/pkg/system/syscall_windows.go | 136 + .../docker/docker/pkg/system/umask.go | 14 + .../docker/docker/pkg/system/umask_windows.go | 7 + .../docker/docker/pkg/system/utimes_unix.go | 25 + .../docker/pkg/system/utimes_unsupported.go | 11 + .../docker/docker/pkg/system/xattrs_linux.go | 37 + .../docker/pkg/system/xattrs_unsupported.go | 14 + .../github.com/docker/docker/registry/auth.go | 59 +- .../docker/docker/registry/config.go | 4 + .../docker/docker/registry/endpoint_v1.go | 50 +- .../docker/docker/registry/registry.go | 14 +- .../docker/docker/registry/service.go | 25 +- .../docker/docker/registry/service_v2.go | 2 +- .../docker/docker/registry/types.go | 5 +- .../docker/go-connections/nat/nat.go | 28 +- .../docker/go-connections/nat/sort.go | 4 +- .../docker/go-connections/sockets/proxy.go | 39 +- .../docker/go-connections/sockets/sockets.go | 12 - .../go-connections/sockets/sockets_unix.go | 13 +- .../go-connections/sockets/sockets_windows.go | 5 +- .../go-connections/sockets/unix_socket.go | 116 +- .../docker/go-connections/tlsconfig/config.go | 31 +- .../tlsconfig/versions_go113.go | 16 + .../tlsconfig/versions_other.go | 15 + vendor/github.com/docker/libnetwork/LICENSE | 202 + .../docker/libnetwork/resolvconf/README.md | 1 + .../libnetwork/resolvconf/dns/resolvconf.go | 26 + .../libnetwork/resolvconf/resolvconf.go | 285 + .../docker/libnetwork/types/types.go | 649 ++ vendor/github.com/fsnotify/fsnotify/.mailmap | 2 + .../github.com/fsnotify/fsnotify/.travis.yml | 36 - vendor/github.com/fsnotify/fsnotify/AUTHORS | 16 +- .../github.com/fsnotify/fsnotify/CHANGELOG.md | 116 +- vendor/github.com/fsnotify/fsnotify/README.md | 6 +- vendor/github.com/fsnotify/fsnotify/fen.go | 1 + .../github.com/fsnotify/fsnotify/fsnotify.go | 1 + vendor/github.com/fsnotify/fsnotify/go.mod | 4 +- vendor/github.com/fsnotify/fsnotify/go.sum | 4 +- .../github.com/fsnotify/fsnotify/inotify.go | 3 +- .../fsnotify/fsnotify/inotify_poller.go | 1 + vendor/github.com/fsnotify/fsnotify/kqueue.go | 1 + .../fsnotify/fsnotify/open_mode_bsd.go | 1 + .../fsnotify/fsnotify/open_mode_darwin.go | 1 + .../github.com/fsnotify/fsnotify/windows.go | 1 + .../fsouza/go-dockerclient/.gitattributes | 1 + .../fsouza/go-dockerclient/.gitignore | 2 + .../fsouza/go-dockerclient/.golangci.yaml | 8 + .../github.com/fsouza/go-dockerclient/AUTHORS | 209 + .../fsouza/go-dockerclient/DOCKER-LICENSE | 6 + .../github.com/fsouza/go-dockerclient/LICENSE | 23 + .../fsouza/go-dockerclient/Makefile | 30 + .../fsouza/go-dockerclient/README.md | 128 + .../github.com/fsouza/go-dockerclient/auth.go | 385 ++ .../fsouza/go-dockerclient/change.go | 43 + .../fsouza/go-dockerclient/client.go | 1156 ++++ .../fsouza/go-dockerclient/client_unix.go | 32 + .../fsouza/go-dockerclient/client_windows.go | 46 + .../fsouza/go-dockerclient/container.go | 597 ++ .../go-dockerclient/container_archive.go | 58 + .../go-dockerclient/container_attach.go | 74 + .../go-dockerclient/container_changes.go | 28 + .../go-dockerclient/container_commit.go | 46 + .../fsouza/go-dockerclient/container_copy.go | 50 + .../go-dockerclient/container_create.go | 79 + .../go-dockerclient/container_export.go | 37 + .../go-dockerclient/container_inspect.go | 55 + .../fsouza/go-dockerclient/container_kill.go | 46 + .../fsouza/go-dockerclient/container_list.go | 37 + .../fsouza/go-dockerclient/container_logs.go | 58 + .../fsouza/go-dockerclient/container_pause.go | 24 + .../fsouza/go-dockerclient/container_prune.go | 40 + .../go-dockerclient/container_remove.go | 41 + .../go-dockerclient/container_rename.go | 33 + .../go-dockerclient/container_resize.go | 22 + .../go-dockerclient/container_restart.go | 64 + .../fsouza/go-dockerclient/container_start.go | 57 + .../fsouza/go-dockerclient/container_stats.go | 215 + .../fsouza/go-dockerclient/container_stop.go | 42 + .../fsouza/go-dockerclient/container_top.go | 40 + .../go-dockerclient/container_unpause.go | 24 + .../go-dockerclient/container_update.go | 43 + .../fsouza/go-dockerclient/container_wait.go | 42 + .../fsouza/go-dockerclient/distribution.go | 27 + .../github.com/fsouza/go-dockerclient/env.go | 172 + .../fsouza/go-dockerclient/event.go | 451 ++ .../github.com/fsouza/go-dockerclient/exec.go | 224 + .../github.com/fsouza/go-dockerclient/go.mod | 18 + .../github.com/fsouza/go-dockerclient/go.sum | 958 +++ .../fsouza/go-dockerclient/image.go | 761 ++ .../github.com/fsouza/go-dockerclient/misc.go | 198 + .../fsouza/go-dockerclient/network.go | 340 + .../fsouza/go-dockerclient/plugin.go | 461 ++ .../fsouza/go-dockerclient/registry_auth.go | 10 + .../fsouza/go-dockerclient/signal.go | 49 + .../fsouza/go-dockerclient/swarm.go | 161 + .../fsouza/go-dockerclient/swarm_configs.go | 175 + .../fsouza/go-dockerclient/swarm_node.go | 134 + .../fsouza/go-dockerclient/swarm_secrets.go | 175 + .../fsouza/go-dockerclient/swarm_service.go | 218 + .../fsouza/go-dockerclient/swarm_task.go | 72 + .../fsouza/go-dockerclient/system.go | 73 + .../github.com/fsouza/go-dockerclient/tar.go | 122 + .../github.com/fsouza/go-dockerclient/tls.go | 116 + .../fsouza/go-dockerclient/volume.go | 194 + vendor/github.com/ghodss/yaml/.gitignore | 20 + vendor/github.com/ghodss/yaml/.travis.yml | 7 + vendor/github.com/ghodss/yaml/LICENSE | 50 + vendor/github.com/ghodss/yaml/README.md | 121 + vendor/github.com/ghodss/yaml/fields.go | 501 ++ vendor/github.com/ghodss/yaml/yaml.go | 277 + .../github.com/godbus/dbus/v5/CONTRIBUTING.md | 50 + vendor/github.com/godbus/dbus/v5/LICENSE | 25 + vendor/github.com/godbus/dbus/v5/MAINTAINERS | 3 + vendor/github.com/godbus/dbus/v5/README.md | 46 + vendor/github.com/godbus/dbus/v5/auth.go | 255 + .../godbus/dbus/v5/auth_anonymous.go | 16 + .../godbus/dbus/v5/auth_external.go | 26 + vendor/github.com/godbus/dbus/v5/auth_sha1.go | 102 + vendor/github.com/godbus/dbus/v5/call.go | 69 + vendor/github.com/godbus/dbus/v5/conn.go | 983 +++ .../github.com/godbus/dbus/v5/conn_darwin.go | 37 + .../github.com/godbus/dbus/v5/conn_other.go | 93 + vendor/github.com/godbus/dbus/v5/conn_unix.go | 17 + .../github.com/godbus/dbus/v5/conn_windows.go | 15 + vendor/github.com/godbus/dbus/v5/dbus.go | 432 ++ vendor/github.com/godbus/dbus/v5/decoder.go | 292 + .../godbus/dbus/v5/default_handler.go | 342 + vendor/github.com/godbus/dbus/v5/doc.go | 69 + vendor/github.com/godbus/dbus/v5/encoder.go | 235 + vendor/github.com/godbus/dbus/v5/export.go | 464 ++ vendor/github.com/godbus/dbus/v5/go.mod | 3 + vendor/github.com/godbus/dbus/v5/go.sum | 0 vendor/github.com/godbus/dbus/v5/homedir.go | 28 + .../godbus/dbus/v5/homedir_dynamic.go | 15 + .../godbus/dbus/v5/homedir_static.go | 45 + vendor/github.com/godbus/dbus/v5/match.go | 89 + vendor/github.com/godbus/dbus/v5/message.go | 384 ++ vendor/github.com/godbus/dbus/v5/object.go | 174 + vendor/github.com/godbus/dbus/v5/sequence.go | 24 + .../godbus/dbus/v5/sequential_handler.go | 125 + .../godbus/dbus/v5/server_interfaces.go | 107 + vendor/github.com/godbus/dbus/v5/sig.go | 293 + .../godbus/dbus/v5/transport_darwin.go | 6 + .../godbus/dbus/v5/transport_generic.go | 52 + .../godbus/dbus/v5/transport_nonce_tcp.go | 39 + .../godbus/dbus/v5/transport_tcp.go | 41 + .../godbus/dbus/v5/transport_unix.go | 214 + .../dbus/v5/transport_unixcred_dragonfly.go | 95 + .../dbus/v5/transport_unixcred_freebsd.go | 92 + .../dbus/v5/transport_unixcred_linux.go | 25 + .../dbus/v5/transport_unixcred_netbsd.go | 14 + .../dbus/v5/transport_unixcred_openbsd.go | 14 + vendor/github.com/godbus/dbus/v5/variant.go | 150 + .../godbus/dbus/v5/variant_lexer.go | 284 + .../godbus/dbus/v5/variant_parser.go | 817 +++ vendor/github.com/golang/groupcache/LICENSE | 191 + .../github.com/golang/groupcache/lru/lru.go | 133 + .../github.com/google/go-cmp/cmp/compare.go | 17 - .../google/go-cmp/cmp/export_panic.go | 1 + .../google/go-cmp/cmp/export_unsafe.go | 1 + .../go-cmp/cmp/internal/diff/debug_disable.go | 1 + .../go-cmp/cmp/internal/diff/debug_enable.go | 1 + .../cmp/internal/flags/toolchain_legacy.go | 10 - .../cmp/internal/flags/toolchain_recent.go | 10 - .../google/go-cmp/cmp/internal/value/name.go | 7 + .../cmp/internal/value/pointer_purego.go | 1 + .../cmp/internal/value/pointer_unsafe.go | 1 + vendor/github.com/google/go-cmp/cmp/path.go | 2 +- .../google/go-cmp/cmp/report_reflect.go | 3 +- .../google/go-cmp/cmp/report_slices.go | 6 +- vendor/github.com/google/go-intervals/LICENSE | 202 + .../go-intervals/intervalset/intervalset.go | 545 ++ .../intervalset/intervalset_immutable.go | 85 + vendor/github.com/google/uuid/null.go | 118 + vendor/github.com/google/uuid/uuid.go | 45 +- vendor/github.com/google/uuid/version4.go | 27 +- vendor/github.com/hashicorp/errwrap/LICENSE | 354 + vendor/github.com/hashicorp/errwrap/README.md | 89 + .../github.com/hashicorp/errwrap/errwrap.go | 169 + vendor/github.com/hashicorp/errwrap/go.mod | 1 + .../hashicorp/go-multierror/LICENSE | 353 + .../hashicorp/go-multierror/Makefile | 31 + .../hashicorp/go-multierror/README.md | 150 + .../hashicorp/go-multierror/append.go | 43 + .../hashicorp/go-multierror/flatten.go | 26 + .../hashicorp/go-multierror/format.go | 27 + .../github.com/hashicorp/go-multierror/go.mod | 5 + .../github.com/hashicorp/go-multierror/go.sum | 2 + .../hashicorp/go-multierror/group.go | 38 + .../hashicorp/go-multierror/multierror.go | 121 + .../hashicorp/go-multierror/prefix.go | 37 + .../hashicorp/go-multierror/sort.go | 16 + .../github.com/ishidawataru/sctp/.gitignore | 16 + .../github.com/ishidawataru/sctp/.travis.yml | 29 + .../github.com/ishidawataru/sctp/GO_LICENSE | 27 + vendor/github.com/ishidawataru/sctp/LICENSE | 201 + vendor/github.com/ishidawataru/sctp/NOTICE | 3 + vendor/github.com/ishidawataru/sctp/README.md | 18 + vendor/github.com/ishidawataru/sctp/go.mod | 3 + .../ishidawataru/sctp/ipsock_linux.go | 222 + vendor/github.com/ishidawataru/sctp/sctp.go | 729 ++ .../ishidawataru/sctp/sctp_linux.go | 305 + .../ishidawataru/sctp/sctp_unsupported.go | 98 + vendor/github.com/jinzhu/copier/License | 20 + vendor/github.com/jinzhu/copier/README.md | 132 + vendor/github.com/jinzhu/copier/copier.go | 697 ++ vendor/github.com/jinzhu/copier/errors.go | 10 + vendor/github.com/jinzhu/copier/go.mod | 3 + vendor/github.com/json-iterator/go/README.md | 2 - vendor/github.com/json-iterator/go/go.mod | 2 +- vendor/github.com/json-iterator/go/go.sum | 5 +- .../kevinburke/ssh_config/.travis.yml | 20 - .../kevinburke/ssh_config/README.md | 8 + .../kevinburke/ssh_config/config.go | 174 +- .../kevinburke/ssh_config/validators.go | 24 + .../klauspost/compress/.gitattributes | 2 + .../github.com/klauspost/compress/.gitignore | 25 + .../klauspost/compress/.goreleaser.yml | 141 + vendor/github.com/klauspost/compress/LICENSE | 304 + .../github.com/klauspost/compress/README.md | 452 ++ .../klauspost/compress/compressible.go | 85 + .../klauspost/compress/flate/deflate.go | 911 +++ .../klauspost/compress/flate/dict_decoder.go | 184 + .../klauspost/compress/flate/fast_encoder.go | 233 + .../compress/flate/huffman_bit_writer.go | 1133 +++ .../klauspost/compress/flate/huffman_code.go | 372 + .../compress/flate/huffman_sortByFreq.go | 178 + .../compress/flate/huffman_sortByLiteral.go | 201 + .../klauspost/compress/flate/inflate.go | 1001 +++ .../klauspost/compress/flate/inflate_gen.go | 1002 +++ .../klauspost/compress/flate/level1.go | 179 + .../klauspost/compress/flate/level2.go | 205 + .../klauspost/compress/flate/level3.go | 229 + .../klauspost/compress/flate/level4.go | 212 + .../klauspost/compress/flate/level5.go | 294 + .../klauspost/compress/flate/level6.go | 307 + .../klauspost/compress/flate/regmask_amd64.go | 37 + .../klauspost/compress/flate/regmask_other.go | 40 + .../klauspost/compress/flate/stateless.go | 297 + .../klauspost/compress/flate/token.go | 377 + .../klauspost/compress/fse/README.md | 79 + .../klauspost/compress/fse/bitreader.go | 122 + .../klauspost/compress/fse/bitwriter.go | 168 + .../klauspost/compress/fse/bytereader.go | 47 + .../klauspost/compress/fse/compress.go | 683 ++ .../klauspost/compress/fse/decompress.go | 374 + .../github.com/klauspost/compress/fse/fse.go | 144 + vendor/github.com/klauspost/compress/gen.sh | 4 + vendor/github.com/klauspost/compress/go.mod | 3 + vendor/github.com/klauspost/compress/go.sum | 0 .../klauspost/compress/huff0/.gitignore | 1 + .../klauspost/compress/huff0/README.md | 89 + .../klauspost/compress/huff0/bitreader.go | 329 + .../klauspost/compress/huff0/bitwriter.go | 210 + .../klauspost/compress/huff0/bytereader.go | 54 + .../klauspost/compress/huff0/compress.go | 720 ++ .../klauspost/compress/huff0/decompress.go | 1387 ++++ .../klauspost/compress/huff0/huff0.go | 335 + .../compress/internal/snapref/LICENSE | 27 + .../compress/internal/snapref/decode.go | 264 + .../compress/internal/snapref/decode_other.go | 113 + .../compress/internal/snapref/encode.go | 289 + .../compress/internal/snapref/encode_other.go | 236 + .../compress/internal/snapref/snappy.go | 98 + vendor/github.com/klauspost/compress/s2sx.mod | 4 + vendor/github.com/klauspost/compress/s2sx.sum | 0 .../klauspost/compress/zstd/README.md | 441 ++ .../klauspost/compress/zstd/bitreader.go | 143 + .../klauspost/compress/zstd/bitwriter.go | 189 + .../klauspost/compress/zstd/blockdec.go | 724 ++ .../klauspost/compress/zstd/blockenc.go | 871 +++ .../compress/zstd/blocktype_string.go | 85 + .../klauspost/compress/zstd/bytebuf.go | 130 + .../klauspost/compress/zstd/bytereader.go | 88 + .../klauspost/compress/zstd/decodeheader.go | 230 + .../klauspost/compress/zstd/decoder.go | 555 ++ .../compress/zstd/decoder_options.go | 102 + .../klauspost/compress/zstd/dict.go | 122 + .../klauspost/compress/zstd/enc_base.go | 188 + .../klauspost/compress/zstd/enc_best.go | 558 ++ .../klauspost/compress/zstd/enc_better.go | 1237 ++++ .../klauspost/compress/zstd/enc_dfast.go | 1124 +++ .../klauspost/compress/zstd/enc_fast.go | 898 +++ .../klauspost/compress/zstd/encoder.go | 599 ++ .../compress/zstd/encoder_options.go | 316 + .../klauspost/compress/zstd/framedec.go | 521 ++ .../klauspost/compress/zstd/frameenc.go | 137 + .../klauspost/compress/zstd/fse_decoder.go | 385 ++ .../klauspost/compress/zstd/fse_encoder.go | 724 ++ .../klauspost/compress/zstd/fse_predefined.go | 158 + .../klauspost/compress/zstd/hash.go | 41 + .../klauspost/compress/zstd/history.go | 89 + .../compress/zstd/internal/xxhash/LICENSE.txt | 22 + .../compress/zstd/internal/xxhash/README.md | 58 + .../compress/zstd/internal/xxhash/xxhash.go | 237 + .../zstd/internal/xxhash/xxhash_amd64.s | 216 + .../zstd/internal/xxhash/xxhash_arm64.s | 186 + .../zstd/internal/xxhash/xxhash_asm.go | 16 + .../zstd/internal/xxhash/xxhash_other.go | 77 + .../zstd/internal/xxhash/xxhash_safe.go | 11 + .../klauspost/compress/zstd/seqdec.go | 492 ++ .../klauspost/compress/zstd/seqenc.go | 114 + .../klauspost/compress/zstd/snappy.go | 435 ++ .../github.com/klauspost/compress/zstd/zip.go | 122 + .../klauspost/compress/zstd/zstd.go | 152 + vendor/github.com/klauspost/pgzip/.gitignore | 24 + vendor/github.com/klauspost/pgzip/.travis.yml | 24 + vendor/github.com/klauspost/pgzip/GO_LICENSE | 27 + vendor/github.com/klauspost/pgzip/LICENSE | 21 + vendor/github.com/klauspost/pgzip/README.md | 135 + vendor/github.com/klauspost/pgzip/gunzip.go | 584 ++ vendor/github.com/klauspost/pgzip/gzip.go | 519 ++ .../magiconair/properties/.travis.yml | 5 + .../magiconair/properties/CHANGELOG.md | 23 +- .../properties/{LICENSE => LICENSE.md} | 9 +- .../magiconair/properties/README.md | 1 - .../github.com/magiconair/properties/go.mod | 2 + .../github.com/magiconair/properties/load.go | 5 +- .../magiconair/properties/properties.go | 31 +- .../github.com/manifoldco/promptui/.gitignore | 3 + .../manifoldco/promptui/.golangci.yml | 26 + .../manifoldco/promptui/.travis.yml | 14 + .../manifoldco/promptui/CHANGELOG.md | 130 + .../manifoldco/promptui/CODE_OF_CONDUCT.md | 73 + .../github.com/manifoldco/promptui/LICENSE.md | 29 + .../github.com/manifoldco/promptui/Makefile | 49 + .../github.com/manifoldco/promptui/README.md | 107 + .../github.com/manifoldco/promptui/codes.go | 120 + .../github.com/manifoldco/promptui/cursor.go | 232 + vendor/github.com/manifoldco/promptui/go.mod | 10 + vendor/github.com/manifoldco/promptui/go.sum | 8 + .../manifoldco/promptui/keycodes.go | 29 + .../manifoldco/promptui/keycodes_other.go | 10 + .../manifoldco/promptui/keycodes_windows.go | 10 + .../manifoldco/promptui/list/list.go | 237 + .../github.com/manifoldco/promptui/prompt.go | 341 + .../manifoldco/promptui/promptui.go | 27 + .../promptui/screenbuf/screenbuf.go | 151 + .../github.com/manifoldco/promptui/select.go | 638 ++ .../github.com/manifoldco/promptui/styles.go | 23 + .../manifoldco/promptui/styles_windows.go | 21 + .../github.com/mattn/go-runewidth/.travis.yml | 14 +- .../go-runewidth/{README.mkd => README.md} | 2 +- vendor/github.com/mattn/go-runewidth/go.mod | 2 + vendor/github.com/mattn/go-runewidth/go.sum | 2 + .../github.com/mattn/go-runewidth/go.test.sh | 12 + .../mattn/go-runewidth/runewidth.go | 123 +- .../mattn/go-runewidth/runewidth_posix.go | 5 +- .../mattn/go-runewidth/runewidth_table.go | 426 +- .../mattn/go-shellwords/.travis.yml | 16 + vendor/github.com/mattn/go-shellwords/LICENSE | 21 + .../github.com/mattn/go-shellwords/README.md | 55 + vendor/github.com/mattn/go-shellwords/go.mod | 3 + .../github.com/mattn/go-shellwords/go.test.sh | 12 + .../mattn/go-shellwords/shellwords.go | 317 + .../mattn/go-shellwords/util_posix.go | 29 + .../mattn/go-shellwords/util_windows.go | 29 + vendor/github.com/miekg/pkcs11/.gitignore | 3 + vendor/github.com/miekg/pkcs11/LICENSE | 27 + .../github.com/miekg/pkcs11/Makefile.release | 57 + vendor/github.com/miekg/pkcs11/README.md | 68 + vendor/github.com/miekg/pkcs11/error.go | 98 + vendor/github.com/miekg/pkcs11/go.mod | 3 + vendor/github.com/miekg/pkcs11/hsm.db | Bin 0 -> 10240 bytes vendor/github.com/miekg/pkcs11/params.go | 190 + vendor/github.com/miekg/pkcs11/pkcs11.go | 1609 +++++ vendor/github.com/miekg/pkcs11/pkcs11.h | 265 + vendor/github.com/miekg/pkcs11/pkcs11f.h | 939 +++ vendor/github.com/miekg/pkcs11/pkcs11go.h | 33 + vendor/github.com/miekg/pkcs11/pkcs11t.h | 2047 ++++++ vendor/github.com/miekg/pkcs11/release.go | 18 + vendor/github.com/miekg/pkcs11/softhsm.conf | 1 + vendor/github.com/miekg/pkcs11/softhsm2.conf | 4 + vendor/github.com/miekg/pkcs11/types.go | 315 + vendor/github.com/miekg/pkcs11/vendor.go | 127 + vendor/github.com/miekg/pkcs11/zconst.go | 766 +++ vendor/github.com/mistifyio/go-zfs/.gitignore | 1 + .../github.com/mistifyio/go-zfs/.travis.yml | 43 + .../mistifyio/go-zfs/CONTRIBUTING.md | 60 + vendor/github.com/mistifyio/go-zfs/LICENSE | 201 + vendor/github.com/mistifyio/go-zfs/README.md | 54 + .../github.com/mistifyio/go-zfs/Vagrantfile | 34 + vendor/github.com/mistifyio/go-zfs/error.go | 18 + vendor/github.com/mistifyio/go-zfs/utils.go | 360 + .../mistifyio/go-zfs/utils_notsolaris.go | 17 + .../mistifyio/go-zfs/utils_solaris.go | 17 + vendor/github.com/mistifyio/go-zfs/zfs.go | 452 ++ vendor/github.com/mistifyio/go-zfs/zpool.go | 112 + .../mitchellh/mapstructure/.travis.yml | 9 - .../mitchellh/mapstructure/CHANGELOG.md | 30 + .../mitchellh/mapstructure/decode_hooks.go | 72 +- .../mitchellh/mapstructure/mapstructure.go | 212 +- vendor/github.com/moby/sys/mount/LICENSE | 202 + vendor/github.com/moby/sys/mount/doc.go | 4 + vendor/github.com/moby/sys/mount/flags_bsd.go | 45 + .../github.com/moby/sys/mount/flags_linux.go | 87 + .../github.com/moby/sys/mount/flags_unix.go | 139 + vendor/github.com/moby/sys/mount/go.mod | 8 + vendor/github.com/moby/sys/mount/go.sum | 5 + .../github.com/moby/sys/mount/mount_errors.go | 46 + .../github.com/moby/sys/mount/mount_unix.go | 87 + .../github.com/moby/sys/mount/mounter_bsd.go | 61 + .../moby/sys/mount/mounter_linux.go | 73 + .../moby/sys/mount/mounter_unsupported.go | 7 + .../moby/sys/mount/sharedsubtree_linux.go | 73 + vendor/github.com/moby/sys/mountinfo/LICENSE | 202 + vendor/github.com/moby/sys/mountinfo/doc.go | 44 + vendor/github.com/moby/sys/mountinfo/go.mod | 5 + vendor/github.com/moby/sys/mountinfo/go.sum | 2 + .../moby/sys/mountinfo/mounted_linux.go | 57 + .../moby/sys/mountinfo/mounted_unix.go | 54 + .../moby/sys/mountinfo/mountinfo.go | 67 + .../moby/sys/mountinfo/mountinfo_bsd.go | 72 + .../moby/sys/mountinfo/mountinfo_filters.go | 63 + .../moby/sys/mountinfo/mountinfo_linux.go | 214 + .../sys/mountinfo/mountinfo_unsupported.go | 19 + .../moby/sys/mountinfo/mountinfo_windows.go | 10 + vendor/github.com/moby/term/go.mod | 4 +- vendor/github.com/moby/term/go.sum | 8 +- .../github.com/modern-go/reflect2/.travis.yml | 2 +- .../github.com/modern-go/reflect2/Gopkg.lock | 8 +- .../github.com/modern-go/reflect2/Gopkg.toml | 4 - vendor/github.com/modern-go/reflect2/go.mod | 3 + .../modern-go/reflect2/go_above_118.go | 23 + .../modern-go/reflect2/go_above_17.go | 8 - .../modern-go/reflect2/go_above_19.go | 3 + .../modern-go/reflect2/go_below_118.go | 21 + .../modern-go/reflect2/go_below_17.go | 9 - .../modern-go/reflect2/go_below_19.go | 14 - .../github.com/modern-go/reflect2/reflect2.go | 20 +- vendor/github.com/modern-go/reflect2/test.sh | 12 - .../github.com/modern-go/reflect2/type_map.go | 51 +- .../modern-go/reflect2/unsafe_link.go | 26 +- .../modern-go/reflect2/unsafe_map.go | 8 - vendor/github.com/onsi/ginkgo/CHANGELOG.md | 17 + vendor/github.com/onsi/ginkgo/README.md | 13 +- vendor/github.com/onsi/ginkgo/RELEASING.md | 11 +- .../github.com/onsi/ginkgo/config/config.go | 2 +- vendor/github.com/onsi/ginkgo/ginkgo_dsl.go | 15 +- vendor/github.com/onsi/ginkgo/go.mod | 4 +- .../internal/testingtproxy/testing_t_proxy.go | 5 + .../onsi/ginkgo/types/deprecation_support.go | 24 +- vendor/github.com/onsi/gomega/.travis.yml | 18 - vendor/github.com/onsi/gomega/CHANGELOG.md | 90 + vendor/github.com/onsi/gomega/Dockerfile | 1 - vendor/github.com/onsi/gomega/Makefile | 33 - vendor/github.com/onsi/gomega/README.md | 2 +- vendor/github.com/onsi/gomega/RELEASING.md | 11 +- .../onsi/gomega/docker-compose.yaml | 10 - vendor/github.com/onsi/gomega/gexec/build.go | 25 +- vendor/github.com/onsi/gomega/go.mod | 12 +- vendor/github.com/onsi/gomega/go.sum | 24 +- vendor/github.com/onsi/gomega/gomega_dsl.go | 557 +- .../onsi/gomega/internal/assertion.go | 150 + .../gomega/internal/assertion/assertion.go | 109 - .../onsi/gomega/internal/async_assertion.go | 235 + .../asyncassertion/async_assertion.go | 198 - .../onsi/gomega/internal/duration_bundle.go | 71 + .../github.com/onsi/gomega/internal/gomega.go | 102 + .../onsi/gomega/internal/gutil/post_ioutil.go | 48 + .../gomega/internal/gutil/using_ioutil.go | 47 + .../internal/oraclematcher/oracle_matcher.go | 25 - .../testingtsupport/testing_t_support.go | 60 - vendor/github.com/onsi/gomega/matchers.go | 112 +- vendor/github.com/onsi/gomega/matchers/and.go | 5 +- .../matchers/contain_element_matcher.go | 120 +- .../onsi/gomega/matchers/have_each_matcher.go | 65 + .../onsi/gomega/matchers/have_field.go | 87 + .../gomega/matchers/have_http_body_matcher.go | 101 + .../have_http_header_with_value_matcher.go | 81 + .../matchers/have_http_status_matcher.go | 72 +- .../onsi/gomega/matchers/have_value.go | 54 + vendor/github.com/onsi/gomega/matchers/not.go | 3 +- vendor/github.com/onsi/gomega/matchers/or.go | 5 +- .../onsi/gomega/matchers/with_transform.go | 19 +- vendor/github.com/onsi/gomega/tools | 8 + vendor/github.com/onsi/gomega/types/types.go | 77 +- .../image-spec/specs-go/v1/annotations.go | 6 + .../image-spec/specs-go/v1/config.go | 11 + .../image-spec/specs-go/v1/mediatype.go | 9 + .../image-spec/specs-go/version.go | 2 +- vendor/github.com/opencontainers/runc/LICENSE | 191 + vendor/github.com/opencontainers/runc/NOTICE | 17 + .../runc/libcontainer/apparmor/apparmor.go | 16 + .../libcontainer/apparmor/apparmor_linux.go | 68 + .../apparmor/apparmor_unsupported.go | 15 + .../runc/libcontainer/devices/device.go | 174 + .../runc/libcontainer/devices/device_unix.go | 120 + .../runc/libcontainer/user/lookup_unix.go | 157 + .../runc/libcontainer/user/user.go | 605 ++ .../runc/libcontainer/user/user_fuzzer.go | 43 + .../runc/libcontainer/userns/userns.go | 5 + .../runc/libcontainer/userns/userns_fuzzer.go | 16 + .../runc/libcontainer/userns/userns_linux.go | 37 + .../libcontainer/userns/userns_unsupported.go | 18 + .../runc/libcontainer/utils/cmsg.go | 96 + .../runc/libcontainer/utils/utils.go | 167 + .../runc/libcontainer/utils/utils_unix.go | 69 + .../opencontainers/runtime-spec/LICENSE | 191 + .../runtime-spec/specs-go/config.go | 700 ++ .../runtime-spec/specs-go/state.go | 56 + .../runtime-spec/specs-go/version.go | 18 + .../opencontainers/runtime-tools/LICENSE | 191 + .../runtime-tools/error/error.go | 122 + .../runtime-tools/filepath/abs.go | 48 + .../runtime-tools/filepath/ancestor.go | 32 + .../runtime-tools/filepath/clean.go | 74 + .../runtime-tools/filepath/doc.go | 6 + .../runtime-tools/filepath/join.go | 9 + .../runtime-tools/filepath/separator.go | 9 + .../runtime-tools/generate/config.go | 208 + .../runtime-tools/generate/generate.go | 1779 +++++ .../runtime-tools/generate/seccomp/consts.go | 12 + .../generate/seccomp/parse_action.go | 135 + .../generate/seccomp/parse_architecture.go | 55 + .../generate/seccomp/parse_arguments.go | 73 + .../generate/seccomp/parse_remove.go | 52 + .../generate/seccomp/seccomp_default.go | 576 ++ .../generate/seccomp/seccomp_default_linux.go | 15 + .../seccomp/seccomp_default_unsupported.go | 15 + .../generate/seccomp/syscall_compare.go | 140 + .../runtime-tools/specerror/bundle.go | 29 + .../runtime-tools/specerror/config-linux.go | 134 + .../runtime-tools/specerror/config-windows.go | 32 + .../runtime-tools/specerror/config.go | 188 + .../runtime-tools/specerror/error.go | 152 + .../runtime-tools/specerror/runtime-linux.go | 23 + .../runtime-tools/specerror/runtime.go | 179 + .../runtime-tools/validate/validate.go | 838 +++ .../runtime-tools/validate/validate_linux.go | 237 + .../validate/validate_unsupported.go | 17 + .../github.com/opencontainers/selinux/LICENSE | 201 + .../opencontainers/selinux/go-selinux/doc.go | 14 + .../selinux/go-selinux/label/label.go | 97 + .../selinux/go-selinux/label/label_linux.go | 196 + .../selinux/go-selinux/label/label_stub.go | 49 + .../selinux/go-selinux/rchcon.go | 22 + .../selinux/go-selinux/rchcon_go115.go | 21 + .../selinux/go-selinux/selinux.go | 304 + .../selinux/go-selinux/selinux_linux.go | 1262 ++++ .../selinux/go-selinux/selinux_stub.go | 164 + .../selinux/go-selinux/xattrs_linux.go | 71 + .../selinux/pkg/pwalk/README.md | 48 + .../opencontainers/selinux/pkg/pwalk/pwalk.go | 115 + .../selinux/pkg/pwalkdir/README.md | 54 + .../selinux/pkg/pwalkdir/pwalkdir.go | 116 + .../openshift/imagebuilder/.gitignore | 1 + .../openshift/imagebuilder/.travis.yml | 16 + .../github.com/openshift/imagebuilder/LICENSE | 192 + .../openshift/imagebuilder/Makefile | 11 + .../github.com/openshift/imagebuilder/OWNERS | 6 + .../openshift/imagebuilder/README.md | 109 + .../openshift/imagebuilder/builder.go | 636 ++ .../openshift/imagebuilder/constants.go | 9 + .../openshift/imagebuilder/dispatchers.go | 694 ++ .../github.com/openshift/imagebuilder/doc.go | 6 + .../openshift/imagebuilder/dockerfile/NOTICE | 26 + .../dockerfile/command/command.go | 46 + .../dockerfile/parser/line_parsers.go | 399 ++ .../imagebuilder/dockerfile/parser/parser.go | 355 + .../dockerfile/parser/split_command.go | 118 + .../openshift/imagebuilder/evaluator.go | 159 + .../github.com/openshift/imagebuilder/go.mod | 18 + .../github.com/openshift/imagebuilder/go.sum | 1003 +++ .../openshift/imagebuilder/imagebuilder.spec | 63 + .../openshift/imagebuilder/internals.go | 120 + .../openshift/imagebuilder/shell_parser.go | 325 + .../openshift/imagebuilder/signal/README.md | 1 + .../openshift/imagebuilder/signal/signal.go | 25 + .../openshift/imagebuilder/signal/signals.go | 79 + .../imagebuilder/strslice/strslice.go | 30 + vendor/github.com/ostreedev/ostree-go/LICENSE | 17 + .../ostree-go/pkg/glibobject/gboolean.go | 60 + .../ostree-go/pkg/glibobject/gcancellable.go | 47 + .../ostree-go/pkg/glibobject/gerror.go | 71 + .../ostree-go/pkg/glibobject/gfile.go | 52 + .../ostree-go/pkg/glibobject/gfileinfo.go | 53 + .../ostree-go/pkg/glibobject/ghashtable.go | 50 + .../pkg/glibobject/ghashtableiter.go | 50 + .../ostree-go/pkg/glibobject/glibobject.go | 27 + .../ostree-go/pkg/glibobject/glibobject.go.h | 17 + .../ostree-go/pkg/glibobject/gobject.go | 79 + .../pkg/glibobject/goptioncontext.go | 51 + .../ostree-go/pkg/glibobject/gvariant.go | 97 + .../ostree-go/pkg/otbuiltin/builtin.go | 119 + .../ostree-go/pkg/otbuiltin/builtin.go.h | 179 + .../ostree-go/pkg/otbuiltin/checkout.go | 111 + .../ostree-go/pkg/otbuiltin/commit.go | 483 ++ .../ostreedev/ostree-go/pkg/otbuiltin/init.go | 84 + .../ostreedev/ostree-go/pkg/otbuiltin/log.go | 163 + .../ostree-go/pkg/otbuiltin/prune.go | 217 + vendor/github.com/pelletier/go-toml/LICENSE | 226 + vendor/github.com/pelletier/go-toml/README.md | 27 +- .../github.com/pelletier/go-toml/localtime.go | 10 +- .../github.com/pelletier/go-toml/marshal.go | 19 +- .../pelletier/go-toml/tomltree_write.go | 17 +- .../github.com/proglottis/gpgme/.appveyor.yml | 40 + vendor/github.com/proglottis/gpgme/.gitignore | 1 + .../github.com/proglottis/gpgme/.travis.yml | 32 + vendor/github.com/proglottis/gpgme/LICENSE | 12 + vendor/github.com/proglottis/gpgme/README.md | 13 + .../github.com/proglottis/gpgme/callbacks.go | 42 + vendor/github.com/proglottis/gpgme/data.go | 197 + vendor/github.com/proglottis/gpgme/go.mod | 3 + vendor/github.com/proglottis/gpgme/go_gpgme.c | 111 + vendor/github.com/proglottis/gpgme/go_gpgme.h | 44 + vendor/github.com/proglottis/gpgme/gpgme.go | 950 +++ .../proglottis/gpgme/unset_agent_info.go | 18 + .../gpgme/unset_agent_info_windows.go | 14 + .../prometheus/promhttp/instrument_client.go | 28 +- .../prometheus/promhttp/instrument_server.go | 111 +- .../prometheus/promhttp/option.go | 31 + vendor/github.com/rivo/uniseg/LICENSE.txt | 21 + vendor/github.com/rivo/uniseg/README.md | 62 + vendor/github.com/rivo/uniseg/doc.go | 8 + vendor/github.com/rivo/uniseg/go.mod | 3 + vendor/github.com/rivo/uniseg/grapheme.go | 268 + vendor/github.com/rivo/uniseg/properties.go | 1658 +++++ .../russross/blackfriday/v2/README.md | 90 +- .../russross/blackfriday/v2/block.go | 30 +- .../github.com/russross/blackfriday/v2/doc.go | 28 + .../russross/blackfriday/v2/entities.go | 2236 ++++++ .../github.com/russross/blackfriday/v2/esc.go | 42 +- .../russross/blackfriday/v2/html.go | 9 +- .../russross/blackfriday/v2/inline.go | 2 +- .../russross/blackfriday/v2/node.go | 12 +- .../seccomp/libseccomp-golang/.gitignore | 4 + .../seccomp/libseccomp-golang/.travis.yml | 57 + .../seccomp/libseccomp-golang/CHANGELOG | 17 + .../seccomp/libseccomp-golang/CONTRIBUTING.md | 128 + .../seccomp/libseccomp-golang/LICENSE | 22 + .../seccomp/libseccomp-golang/Makefile | 32 + .../seccomp/libseccomp-golang/README.md | 51 + .../seccomp/libseccomp-golang/go.mod | 3 + .../seccomp/libseccomp-golang/go.sum | 23 + .../seccomp/libseccomp-golang/seccomp.go | 1126 +++ .../libseccomp-golang/seccomp_internal.go | 854 +++ .../sergi/go-diff/diffmatchpatch/diff.go | 135 +- .../sergi/go-diff/diffmatchpatch/patch.go | 2 +- .../go-diff/diffmatchpatch/stringutil.go | 18 + .../sanitized_anchor_name/.travis.yml | 16 - .../shurcooL/sanitized_anchor_name/README.md | 36 - .../shurcooL/sanitized_anchor_name/go.mod | 1 - .../shurcooL/sanitized_anchor_name/main.go | 29 - vendor/github.com/spf13/afero/.gitignore | 2 + vendor/github.com/spf13/afero/.travis.yml | 47 +- vendor/github.com/spf13/afero/README.md | 42 +- vendor/github.com/spf13/afero/afero.go | 5 +- vendor/github.com/spf13/afero/appveyor.yml | 4 +- vendor/github.com/spf13/afero/basepath.go | 33 +- .../github.com/spf13/afero/cacheOnReadFs.go | 21 + vendor/github.com/spf13/afero/const_bsds.go | 2 +- .../github.com/spf13/afero/const_win_unix.go | 1 + .../github.com/spf13/afero/copyOnWriteFs.go | 35 +- vendor/github.com/spf13/afero/go.mod | 8 +- vendor/github.com/spf13/afero/go.sum | 27 + vendor/github.com/spf13/afero/httpFs.go | 4 + vendor/github.com/spf13/afero/iofs.go | 288 + vendor/github.com/spf13/afero/ioutil.go | 32 +- vendor/github.com/spf13/afero/match.go | 2 +- vendor/github.com/spf13/afero/mem/file.go | 39 +- vendor/github.com/spf13/afero/memmap.go | 75 +- vendor/github.com/spf13/afero/os.go | 12 + vendor/github.com/spf13/afero/readonlyfs.go | 16 + vendor/github.com/spf13/afero/regexpfs.go | 10 + vendor/github.com/spf13/afero/symlink.go | 55 + vendor/github.com/spf13/afero/unionFile.go | 17 +- vendor/github.com/spf13/cast/.travis.yml | 16 - vendor/github.com/spf13/cast/README.md | 2 +- vendor/github.com/spf13/cast/cast.go | 5 + vendor/github.com/spf13/cast/caste.go | 148 +- .../spf13/cast/timeformattype_string.go | 27 + vendor/github.com/spf13/cobra/.travis.yml | 28 - vendor/github.com/spf13/cobra/MAINTAINERS | 13 + vendor/github.com/spf13/cobra/Makefile | 11 +- vendor/github.com/spf13/cobra/README.md | 681 +- vendor/github.com/spf13/cobra/args.go | 12 + .../spf13/cobra/bash_completions.go | 54 +- .../spf13/cobra/bash_completions.md | 2 + .../spf13/cobra/bash_completionsV2.go | 331 + vendor/github.com/spf13/cobra/command.go | 30 +- .../github.com/spf13/cobra/command_notwin.go | 1 + vendor/github.com/spf13/cobra/command_win.go | 1 + .../{custom_completions.go => completions.go} | 422 +- .../spf13/cobra/fish_completions.go | 178 +- vendor/github.com/spf13/cobra/go.mod | 6 +- vendor/github.com/spf13/cobra/go.sum | 311 +- .../spf13/cobra/powershell_completions.go | 38 +- .../spf13/cobra/projects_using_cobra.md | 13 + .../spf13/cobra/shell_completions.md | 95 +- vendor/github.com/spf13/cobra/user_guide.md | 638 ++ .../github.com/spf13/cobra/zsh_completions.go | 54 +- vendor/github.com/spf13/viper/.golangci.yml | 104 +- vendor/github.com/spf13/viper/Makefile | 6 +- vendor/github.com/spf13/viper/README.md | 208 +- .../github.com/spf13/viper/TROUBLESHOOTING.md | 23 + vendor/github.com/spf13/viper/fs.go | 65 + vendor/github.com/spf13/viper/go.mod | 95 +- vendor/github.com/spf13/viper/go.sum | 761 +- .../spf13/viper/internal/encoding/decoder.go | 61 + .../spf13/viper/internal/encoding/encoder.go | 60 + .../spf13/viper/internal/encoding/error.go | 7 + .../viper/internal/encoding/hcl/codec.go | 40 + .../viper/internal/encoding/json/codec.go | 17 + .../viper/internal/encoding/toml/codec.go | 45 + .../viper/internal/encoding/yaml/codec.go | 14 + vendor/github.com/spf13/viper/logger.go | 77 + vendor/github.com/spf13/viper/util.go | 29 +- vendor/github.com/spf13/viper/viper.go | 486 +- vendor/github.com/spf13/viper/viper_go1_15.go | 57 + vendor/github.com/spf13/viper/viper_go1_16.go | 32 + vendor/github.com/spf13/viper/watch.go | 12 + vendor/github.com/spf13/viper/watch_wasm.go | 30 + .../stefanberger/go-pkcs11uri/.gitignore | 2 + .../stefanberger/go-pkcs11uri/.travis.yml | 25 + .../stefanberger/go-pkcs11uri/LICENSE | 177 + .../stefanberger/go-pkcs11uri/Makefile | 28 + .../stefanberger/go-pkcs11uri/README.md | 102 + .../stefanberger/go-pkcs11uri/pkcs11uri.go | 453 ++ vendor/github.com/sylabs/sif/v2/LICENSE.md | 29 + .../github.com/sylabs/sif/v2/pkg/sif/arch.go | 69 + .../sylabs/sif/v2/pkg/sif/buffer.go | 103 + .../sylabs/sif/v2/pkg/sif/create.go | 680 ++ .../sylabs/sif/v2/pkg/sif/descriptor.go | 267 + .../sylabs/sif/v2/pkg/sif/descriptor_input.go | 300 + .../github.com/sylabs/sif/v2/pkg/sif/load.go | 174 + .../sylabs/sif/v2/pkg/sif/select.go | 210 + .../github.com/sylabs/sif/v2/pkg/sif/sif.go | 364 + vendor/github.com/syndtr/gocapability/LICENSE | 24 + .../gocapability/capability/capability.go | 133 + .../capability/capability_linux.go | 642 ++ .../capability/capability_noop.go | 19 + .../syndtr/gocapability/capability/enum.go | 309 + .../gocapability/capability/enum_gen.go | 138 + .../gocapability/capability/syscall_linux.go | 154 + vendor/github.com/tchap/go-patricia/AUTHORS | 3 + vendor/github.com/tchap/go-patricia/LICENSE | 20 + .../tchap/go-patricia/patricia/children.go | 363 + .../tchap/go-patricia/patricia/patricia.go | 606 ++ .../github.com/tonistiigi/fsutil/.gitignore | 9 - .../github.com/tonistiigi/fsutil/Dockerfile | 29 - .../tonistiigi/fsutil/chtimes_linux.go | 20 - .../tonistiigi/fsutil/chtimes_nolinux.go | 22 - .../github.com/tonistiigi/fsutil/copy/copy.go | 131 +- .../tonistiigi/fsutil/copy/copy_freebsd.go | 32 - .../tonistiigi/fsutil/copy/copy_linux.go | 5 +- .../tonistiigi/fsutil/copy/copy_unix.go | 9 +- .../tonistiigi/fsutil/copy/device_darwin.go | 20 - .../tonistiigi/fsutil/copy/device_freebsd.go | 20 - vendor/github.com/tonistiigi/fsutil/diff.go | 51 - .../tonistiigi/fsutil/diff_containerd.go | 253 - .../tonistiigi/fsutil/diskwriter.go | 351 - .../tonistiigi/fsutil/diskwriter_freebsd.go | 14 - .../tonistiigi/fsutil/diskwriter_unix.go | 52 - .../tonistiigi/fsutil/diskwriter_unixnobsd.go | 13 - .../tonistiigi/fsutil/diskwriter_windows.go | 18 - .../tonistiigi/fsutil/docker-bake.hcl | 67 - .../tonistiigi/fsutil/followlinks.go | 150 - vendor/github.com/tonistiigi/fsutil/fs.go | 120 - vendor/github.com/tonistiigi/fsutil/go.mod | 20 - vendor/github.com/tonistiigi/fsutil/go.sum | 230 - .../github.com/tonistiigi/fsutil/hardlinks.go | 48 - vendor/github.com/tonistiigi/fsutil/readme.md | 45 - .../github.com/tonistiigi/fsutil/receive.go | 286 - vendor/github.com/tonistiigi/fsutil/send.go | 208 - vendor/github.com/tonistiigi/fsutil/stat.go | 64 - .../github.com/tonistiigi/fsutil/stat_unix.go | 71 - .../tonistiigi/fsutil/stat_windows.go | 16 - .../github.com/tonistiigi/fsutil/tarwriter.go | 80 - .../tonistiigi/fsutil/types/generate.go | 3 - .../tonistiigi/fsutil/types/stat.go | 7 - .../tonistiigi/fsutil/types/stat.pb.go | 929 --- .../tonistiigi/fsutil/types/stat.proto | 19 - .../tonistiigi/fsutil/types/wire.pb.go | 575 -- .../tonistiigi/fsutil/types/wire.proto | 21 - .../github.com/tonistiigi/fsutil/validator.go | 93 - vendor/github.com/tonistiigi/fsutil/walker.go | 256 - vendor/github.com/ulikunitz/xz/.gitignore | 25 + vendor/github.com/ulikunitz/xz/LICENSE | 26 + vendor/github.com/ulikunitz/xz/README.md | 73 + vendor/github.com/ulikunitz/xz/SECURITY.md | 10 + vendor/github.com/ulikunitz/xz/TODO.md | 363 + vendor/github.com/ulikunitz/xz/bits.go | 79 + vendor/github.com/ulikunitz/xz/crc.go | 54 + vendor/github.com/ulikunitz/xz/format.go | 721 ++ .../github.com/ulikunitz/xz/fox-check-none.xz | Bin 0 -> 96 bytes vendor/github.com/ulikunitz/xz/fox.xz | Bin 0 -> 104 bytes vendor/github.com/ulikunitz/xz/go.mod | 3 + .../ulikunitz/xz/internal/hash/cyclic_poly.go | 181 + .../ulikunitz/xz/internal/hash/doc.go | 14 + .../ulikunitz/xz/internal/hash/rabin_karp.go | 66 + .../ulikunitz/xz/internal/hash/roller.go | 29 + .../ulikunitz/xz/internal/xlog/xlog.go | 457 ++ .../github.com/ulikunitz/xz/lzma/bintree.go | 522 ++ vendor/github.com/ulikunitz/xz/lzma/bitops.go | 47 + .../github.com/ulikunitz/xz/lzma/breader.go | 39 + vendor/github.com/ulikunitz/xz/lzma/buffer.go | 171 + .../ulikunitz/xz/lzma/bytewriter.go | 37 + .../github.com/ulikunitz/xz/lzma/decoder.go | 277 + .../ulikunitz/xz/lzma/decoderdict.go | 128 + .../ulikunitz/xz/lzma/directcodec.go | 38 + .../github.com/ulikunitz/xz/lzma/distcodec.go | 140 + .../github.com/ulikunitz/xz/lzma/encoder.go | 268 + .../ulikunitz/xz/lzma/encoderdict.go | 149 + vendor/github.com/ulikunitz/xz/lzma/fox.lzma | Bin 0 -> 67 bytes .../github.com/ulikunitz/xz/lzma/hashtable.go | 309 + vendor/github.com/ulikunitz/xz/lzma/header.go | 167 + .../github.com/ulikunitz/xz/lzma/header2.go | 398 ++ .../ulikunitz/xz/lzma/lengthcodec.go | 116 + .../ulikunitz/xz/lzma/literalcodec.go | 125 + .../ulikunitz/xz/lzma/matchalgorithm.go | 52 + .../github.com/ulikunitz/xz/lzma/operation.go | 55 + vendor/github.com/ulikunitz/xz/lzma/prob.go | 53 + .../ulikunitz/xz/lzma/properties.go | 69 + .../ulikunitz/xz/lzma/rangecodec.go | 222 + vendor/github.com/ulikunitz/xz/lzma/reader.go | 100 + .../github.com/ulikunitz/xz/lzma/reader2.go | 231 + vendor/github.com/ulikunitz/xz/lzma/state.go | 145 + .../ulikunitz/xz/lzma/treecodecs.go | 133 + vendor/github.com/ulikunitz/xz/lzma/writer.go | 209 + .../github.com/ulikunitz/xz/lzma/writer2.go | 305 + vendor/github.com/ulikunitz/xz/lzmafilter.go | 117 + vendor/github.com/ulikunitz/xz/make-docs | 5 + vendor/github.com/ulikunitz/xz/none-check.go | 23 + vendor/github.com/ulikunitz/xz/reader.go | 359 + vendor/github.com/ulikunitz/xz/writer.go | 399 ++ .../vbatts/tar-split/tar/storage/getter.go | 3 +- .../github.com/vbauerster/mpb/v7/.gitignore | 5 + vendor/github.com/vbauerster/mpb/v7/README.md | 120 + vendor/github.com/vbauerster/mpb/v7/UNLICENSE | 24 + vendor/github.com/vbauerster/mpb/v7/bar.go | 569 ++ .../vbauerster/mpb/v7/bar_filler.go | 50 + .../vbauerster/mpb/v7/bar_filler_bar.go | 256 + .../vbauerster/mpb/v7/bar_filler_nop.go | 14 + .../vbauerster/mpb/v7/bar_filler_spinner.go | 91 + .../vbauerster/mpb/v7/bar_option.go | 167 + .../vbauerster/mpb/v7/container_option.go | 117 + .../vbauerster/mpb/v7/cwriter/doc.go | 2 + .../vbauerster/mpb/v7/cwriter/util_bsd.go | 7 + .../vbauerster/mpb/v7/cwriter/util_linux.go | 7 + .../vbauerster/mpb/v7/cwriter/util_solaris.go | 7 + .../vbauerster/mpb/v7/cwriter/util_zos.go | 7 + .../vbauerster/mpb/v7/cwriter/writer.go | 84 + .../vbauerster/mpb/v7/cwriter/writer_posix.go | 26 + .../mpb/v7/cwriter/writer_windows.go | 73 + .../github.com/vbauerster/mpb/v7/decor/any.go | 21 + .../vbauerster/mpb/v7/decor/counters.go | 243 + .../vbauerster/mpb/v7/decor/decorator.go | 192 + .../github.com/vbauerster/mpb/v7/decor/doc.go | 19 + .../vbauerster/mpb/v7/decor/elapsed.go | 35 + .../github.com/vbauerster/mpb/v7/decor/eta.go | 203 + .../vbauerster/mpb/v7/decor/merge.go | 110 + .../vbauerster/mpb/v7/decor/moving_average.go | 68 + .../vbauerster/mpb/v7/decor/name.go | 12 + .../vbauerster/mpb/v7/decor/on_abort.go | 41 + .../vbauerster/mpb/v7/decor/on_complete.go | 40 + .../vbauerster/mpb/v7/decor/on_condition.go | 27 + .../mpb/v7/decor/optimistic_string_writer.go | 12 + .../vbauerster/mpb/v7/decor/percentage.go | 57 + .../vbauerster/mpb/v7/decor/size_type.go | 107 + .../mpb/v7/decor/sizeb1000_string.go | 41 + .../mpb/v7/decor/sizeb1024_string.go | 41 + .../vbauerster/mpb/v7/decor/speed.go | 170 + .../vbauerster/mpb/v7/decor/spinner.go | 21 + vendor/github.com/vbauerster/mpb/v7/doc.go | 2 + vendor/github.com/vbauerster/mpb/v7/go.mod | 10 + vendor/github.com/vbauerster/mpb/v7/go.sum | 10 + .../vbauerster/mpb/v7/internal/percentage.go | 19 + .../vbauerster/mpb/v7/internal/width.go | 10 + .../vbauerster/mpb/v7/priority_queue.go | 32 + .../github.com/vbauerster/mpb/v7/progress.go | 440 ++ .../vbauerster/mpb/v7/proxyreader.go | 86 + .../vishvananda/netlink/class_linux.go | 10 + .../vishvananda/netlink/devlink_linux.go | 121 + .../vishvananda/netlink/filter_linux.go | 6 + .../vishvananda/netlink/handle_linux.go | 16 + .../vishvananda/netlink/handle_unspecified.go | 4 + .../vishvananda/netlink/inet_diag.go | 1 + .../vishvananda/netlink/ipset_linux.go | 60 +- vendor/github.com/vishvananda/netlink/link.go | 195 +- .../vishvananda/netlink/link_linux.go | 131 +- .../vishvananda/netlink/nl/devlink_linux.go | 23 + .../vishvananda/netlink/nl/link_linux.go | 36 + .../vishvananda/netlink/nl/nl_linux.go | 11 + .../nl/{parse_attr.go => parse_attr_linux.go} | 14 +- .../github.com/vishvananda/netlink/qdisc.go | 16 +- .../vishvananda/netlink/qdisc_linux.go | 16 +- .../github.com/vishvananda/netlink/route.go | 14 +- .../vishvananda/netlink/route_linux.go | 218 +- .../vishvananda/netlink/route_unspecified.go | 10 + .../vishvananda/netlink/socket_linux.go | 47 +- vendor/github.com/vishvananda/netlink/tcp.go | 66 + .../vishvananda/netlink/tcp_linux.go | 76 +- .../vishvananda/netlink/xfrm_policy.go | 13 +- .../vishvananda/netlink/xfrm_policy_linux.go | 2 + .../vishvananda/netlink/xfrm_state.go | 4 +- .../vishvananda/netlink/xfrm_state_linux.go | 23 +- vendor/github.com/xanzy/ssh-agent/README.md | 2 +- vendor/github.com/xanzy/ssh-agent/go.mod | 7 +- vendor/github.com/xanzy/ssh-agent/go.sum | 30 +- .../xeipuuv/gojsonpointer/pointer.go | 8 +- vendor/go.etcd.io/bbolt/.gitignore | 7 + vendor/go.etcd.io/bbolt/.travis.yml | 18 + vendor/go.etcd.io/bbolt/LICENSE | 20 + vendor/go.etcd.io/bbolt/Makefile | 36 + vendor/go.etcd.io/bbolt/README.md | 958 +++ vendor/go.etcd.io/bbolt/bolt_386.go | 7 + vendor/go.etcd.io/bbolt/bolt_amd64.go | 7 + vendor/go.etcd.io/bbolt/bolt_arm.go | 7 + vendor/go.etcd.io/bbolt/bolt_arm64.go | 9 + vendor/go.etcd.io/bbolt/bolt_linux.go | 10 + vendor/go.etcd.io/bbolt/bolt_mips64x.go | 9 + vendor/go.etcd.io/bbolt/bolt_mipsx.go | 9 + vendor/go.etcd.io/bbolt/bolt_openbsd.go | 27 + vendor/go.etcd.io/bbolt/bolt_ppc.go | 9 + vendor/go.etcd.io/bbolt/bolt_ppc64.go | 9 + vendor/go.etcd.io/bbolt/bolt_ppc64le.go | 9 + vendor/go.etcd.io/bbolt/bolt_riscv64.go | 9 + vendor/go.etcd.io/bbolt/bolt_s390x.go | 9 + vendor/go.etcd.io/bbolt/bolt_unix.go | 86 + vendor/go.etcd.io/bbolt/bolt_unix_aix.go | 90 + vendor/go.etcd.io/bbolt/bolt_unix_solaris.go | 88 + vendor/go.etcd.io/bbolt/bolt_windows.go | 141 + vendor/go.etcd.io/bbolt/boltsync_unix.go | 8 + vendor/go.etcd.io/bbolt/bucket.go | 777 +++ vendor/go.etcd.io/bbolt/compact.go | 114 + vendor/go.etcd.io/bbolt/cursor.go | 396 ++ vendor/go.etcd.io/bbolt/db.go | 1232 ++++ vendor/go.etcd.io/bbolt/doc.go | 44 + vendor/go.etcd.io/bbolt/errors.go | 71 + vendor/go.etcd.io/bbolt/freelist.go | 404 ++ vendor/go.etcd.io/bbolt/freelist_hmap.go | 178 + vendor/go.etcd.io/bbolt/go.mod | 5 + vendor/go.etcd.io/bbolt/go.sum | 2 + vendor/go.etcd.io/bbolt/mlock_unix.go | 36 + vendor/go.etcd.io/bbolt/mlock_windows.go | 11 + vendor/go.etcd.io/bbolt/node.go | 602 ++ vendor/go.etcd.io/bbolt/page.go | 204 + vendor/go.etcd.io/bbolt/tx.go | 723 ++ vendor/go.etcd.io/bbolt/unsafe.go | 39 + .../go.etcd.io/etcd/api/v3/version/version.go | 2 +- vendor/go.mozilla.org/pkcs7/.gitignore | 24 + vendor/go.mozilla.org/pkcs7/.travis.yml | 10 + vendor/go.mozilla.org/pkcs7/LICENSE | 22 + vendor/go.mozilla.org/pkcs7/Makefile | 20 + vendor/go.mozilla.org/pkcs7/README.md | 69 + vendor/go.mozilla.org/pkcs7/ber.go | 251 + vendor/go.mozilla.org/pkcs7/decrypt.go | 177 + vendor/go.mozilla.org/pkcs7/encrypt.go | 399 ++ vendor/go.mozilla.org/pkcs7/go.mod | 3 + vendor/go.mozilla.org/pkcs7/pkcs7.go | 291 + vendor/go.mozilla.org/pkcs7/sign.go | 429 ++ vendor/go.mozilla.org/pkcs7/verify.go | 264 + vendor/go.opencensus.io/.gitignore | 9 + vendor/go.opencensus.io/AUTHORS | 1 + vendor/go.opencensus.io/CONTRIBUTING.md | 63 + vendor/go.opencensus.io/LICENSE | 202 + vendor/go.opencensus.io/Makefile | 97 + vendor/go.opencensus.io/README.md | 267 + vendor/go.opencensus.io/appveyor.yml | 24 + vendor/go.opencensus.io/go.mod | 12 + vendor/go.opencensus.io/go.sum | 116 + vendor/go.opencensus.io/internal/internal.go | 37 + vendor/go.opencensus.io/internal/sanitize.go | 50 + .../internal/traceinternals.go | 53 + vendor/go.opencensus.io/opencensus.go | 21 + vendor/go.opencensus.io/trace/basetypes.go | 129 + vendor/go.opencensus.io/trace/config.go | 86 + vendor/go.opencensus.io/trace/doc.go | 53 + vendor/go.opencensus.io/trace/evictedqueue.go | 38 + vendor/go.opencensus.io/trace/export.go | 97 + .../trace/internal/internal.go | 22 + vendor/go.opencensus.io/trace/lrumap.go | 61 + vendor/go.opencensus.io/trace/sampling.go | 75 + vendor/go.opencensus.io/trace/spanbucket.go | 130 + vendor/go.opencensus.io/trace/spanstore.go | 308 + vendor/go.opencensus.io/trace/status_codes.go | 37 + vendor/go.opencensus.io/trace/trace.go | 595 ++ vendor/go.opencensus.io/trace/trace_api.go | 265 + vendor/go.opencensus.io/trace/trace_go11.go | 32 + .../go.opencensus.io/trace/trace_nongo11.go | 25 + .../trace/tracestate/tracestate.go | 147 + .../x/crypto/chacha20/chacha_s390x.go | 1 + .../curve25519/internal/field/fe_amd64.go | 3 + vendor/golang.org/x/crypto/ed25519/ed25519.go | 188 +- .../x/crypto/ed25519/ed25519_go113.go | 74 - .../ed25519/internal/edwards25519/const.go | 1422 ---- .../internal/edwards25519/edwards25519.go | 1793 ----- .../{ => internal}/poly1305/bits_compat.go | 0 .../{ => internal}/poly1305/bits_go1.13.go | 0 .../{ => internal}/poly1305/mac_noasm.go | 0 .../{ => internal}/poly1305/poly1305.go | 2 +- .../{ => internal}/poly1305/sum_amd64.go | 0 .../{ => internal}/poly1305/sum_amd64.s | 0 .../{ => internal}/poly1305/sum_generic.go | 5 +- .../{ => internal}/poly1305/sum_ppc64le.go | 0 .../{ => internal}/poly1305/sum_ppc64le.s | 0 .../{ => internal}/poly1305/sum_s390x.go | 1 + .../{ => internal}/poly1305/sum_s390x.s | 2 +- .../x/crypto/openpgp/armor/armor.go | 232 + .../x/crypto/openpgp/armor/encode.go | 161 + .../x/crypto/openpgp/canonical_text.go | 59 + .../x/crypto/openpgp/elgamal/elgamal.go | 130 + .../x/crypto/openpgp/errors/errors.go | 78 + vendor/golang.org/x/crypto/openpgp/keys.go | 693 ++ .../x/crypto/openpgp/packet/compressed.go | 123 + .../x/crypto/openpgp/packet/config.go | 91 + .../x/crypto/openpgp/packet/encrypted_key.go | 208 + .../x/crypto/openpgp/packet/literal.go | 89 + .../x/crypto/openpgp/packet/ocfb.go | 143 + .../openpgp/packet/one_pass_signature.go | 73 + .../x/crypto/openpgp/packet/opaque.go | 162 + .../x/crypto/openpgp/packet/packet.go | 590 ++ .../x/crypto/openpgp/packet/private_key.go | 385 ++ .../x/crypto/openpgp/packet/public_key.go | 753 ++ .../x/crypto/openpgp/packet/public_key_v3.go | 279 + .../x/crypto/openpgp/packet/reader.go | 76 + .../x/crypto/openpgp/packet/signature.go | 731 ++ .../x/crypto/openpgp/packet/signature_v3.go | 146 + .../openpgp/packet/symmetric_key_encrypted.go | 155 + .../openpgp/packet/symmetrically_encrypted.go | 290 + .../x/crypto/openpgp/packet/userattribute.go | 91 + .../x/crypto/openpgp/packet/userid.go | 160 + vendor/golang.org/x/crypto/openpgp/read.go | 448 ++ vendor/golang.org/x/crypto/openpgp/s2k/s2k.go | 279 + vendor/golang.org/x/crypto/openpgp/write.go | 418 ++ vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go | 2 +- vendor/golang.org/x/crypto/scrypt/scrypt.go | 2 +- .../golang.org/x/crypto/ssh/agent/client.go | 27 +- .../golang.org/x/crypto/ssh/agent/keyring.go | 6 +- vendor/golang.org/x/crypto/ssh/certs.go | 85 +- vendor/golang.org/x/crypto/ssh/cipher.go | 12 +- vendor/golang.org/x/crypto/ssh/client.go | 18 +- vendor/golang.org/x/crypto/ssh/client_auth.go | 132 +- vendor/golang.org/x/crypto/ssh/common.go | 76 +- vendor/golang.org/x/crypto/ssh/doc.go | 5 +- vendor/golang.org/x/crypto/ssh/handshake.go | 83 +- vendor/golang.org/x/crypto/ssh/kex.go | 186 +- vendor/golang.org/x/crypto/ssh/keys.go | 147 +- vendor/golang.org/x/crypto/ssh/messages.go | 21 +- vendor/golang.org/x/crypto/ssh/server.go | 46 +- vendor/golang.org/x/crypto/ssh/session.go | 1 + vendor/golang.org/x/crypto/ssh/transport.go | 10 +- vendor/golang.org/x/net/html/parse.go | 24 +- vendor/golang.org/x/net/http2/README | 20 - vendor/golang.org/x/net/http2/ascii.go | 4 + .../x/net/http2/client_conn_pool.go | 47 +- vendor/golang.org/x/net/http2/errors.go | 12 + vendor/golang.org/x/net/http2/frame.go | 62 +- vendor/golang.org/x/net/http2/go118.go | 17 + .../golang.org/x/net/http2/hpack/huffman.go | 38 +- vendor/golang.org/x/net/http2/not_go118.go | 17 + vendor/golang.org/x/net/http2/pipe.go | 11 + vendor/golang.org/x/net/http2/server.go | 134 +- vendor/golang.org/x/net/http2/transport.go | 1585 +++-- vendor/golang.org/x/net/http2/writesched.go | 4 +- .../x/net/http2/writesched_random.go | 6 +- vendor/golang.org/x/net/idna/go118.go | 14 + vendor/golang.org/x/net/idna/idna10.0.0.go | 6 +- vendor/golang.org/x/net/idna/idna9.0.0.go | 4 +- vendor/golang.org/x/net/idna/pre_go118.go | 12 + vendor/golang.org/x/net/idna/punycode.go | 36 +- vendor/golang.org/x/oauth2/README.md | 10 +- vendor/golang.org/x/oauth2/go.mod | 7 +- vendor/golang.org/x/oauth2/go.sum | 359 +- .../x/oauth2/internal/client_appengine.go | 1 + .../golang.org/x/sync/semaphore/semaphore.go | 136 + vendor/golang.org/x/sys/cpu/byteorder.go | 1 + vendor/golang.org/x/sys/cpu/cpu_loong64.go | 13 + .../golang.org/x/sys/cpu/syscall_aix_gccgo.go | 2 +- .../golang.org/x/sys/unix/asm_linux_loong64.s | 54 + vendor/golang.org/x/sys/unix/endian_little.go | 4 +- vendor/golang.org/x/sys/unix/ioctl_linux.go | 23 + vendor/golang.org/x/sys/unix/mkerrors.sh | 7 + vendor/golang.org/x/sys/unix/syscall_aix.go | 16 +- vendor/golang.org/x/sys/unix/syscall_bsd.go | 35 +- .../golang.org/x/sys/unix/syscall_darwin.go | 45 +- .../x/sys/unix/syscall_dragonfly.go | 9 +- .../golang.org/x/sys/unix/syscall_freebsd.go | 9 +- vendor/golang.org/x/sys/unix/syscall_linux.go | 92 +- .../x/sys/unix/syscall_linux_386.go | 12 +- .../x/sys/unix/syscall_linux_alarm.go | 14 + .../x/sys/unix/syscall_linux_amd64.go | 6 +- .../x/sys/unix/syscall_linux_arm.go | 5 +- .../x/sys/unix/syscall_linux_arm64.go | 6 +- .../x/sys/unix/syscall_linux_loong64.go | 191 + .../x/sys/unix/syscall_linux_mips64x.go | 5 +- .../x/sys/unix/syscall_linux_mipsx.go | 5 +- .../x/sys/unix/syscall_linux_ppc.go | 5 +- .../x/sys/unix/syscall_linux_ppc64x.go | 5 +- .../x/sys/unix/syscall_linux_riscv64.go | 5 +- .../x/sys/unix/syscall_linux_s390x.go | 13 +- .../x/sys/unix/syscall_linux_sparc64.go | 5 +- .../golang.org/x/sys/unix/syscall_netbsd.go | 9 +- .../golang.org/x/sys/unix/syscall_openbsd.go | 9 +- .../golang.org/x/sys/unix/syscall_solaris.go | 148 +- vendor/golang.org/x/sys/unix/syscall_unix.go | 51 + vendor/golang.org/x/sys/unix/zerrors_linux.go | 53 +- .../x/sys/unix/zerrors_linux_loong64.go | 818 +++ .../golang.org/x/sys/unix/zsyscall_aix_ppc.go | 4 +- .../x/sys/unix/zsyscall_aix_ppc64.go | 4 +- .../x/sys/unix/zsyscall_darwin_amd64.go | 17 +- .../x/sys/unix/zsyscall_darwin_amd64.s | 8 +- .../x/sys/unix/zsyscall_darwin_arm64.go | 17 +- .../x/sys/unix/zsyscall_darwin_arm64.s | 8 +- .../x/sys/unix/zsyscall_freebsd_386.go | 4 +- .../x/sys/unix/zsyscall_freebsd_amd64.go | 4 +- .../x/sys/unix/zsyscall_freebsd_arm.go | 4 +- .../x/sys/unix/zsyscall_freebsd_arm64.go | 4 +- .../golang.org/x/sys/unix/zsyscall_linux.go | 119 + .../x/sys/unix/zsyscall_linux_386.go | 17 +- .../x/sys/unix/zsyscall_linux_amd64.go | 39 +- .../x/sys/unix/zsyscall_linux_arm.go | 15 +- .../x/sys/unix/zsyscall_linux_arm64.go | 26 +- .../x/sys/unix/zsyscall_linux_loong64.go | 552 ++ .../x/sys/unix/zsyscall_linux_mips.go | 28 +- .../x/sys/unix/zsyscall_linux_mips64.go | 28 +- .../x/sys/unix/zsyscall_linux_mips64le.go | 15 +- .../x/sys/unix/zsyscall_linux_mipsle.go | 28 +- .../x/sys/unix/zsyscall_linux_ppc.go | 28 +- .../x/sys/unix/zsyscall_linux_ppc64.go | 28 +- .../x/sys/unix/zsyscall_linux_ppc64le.go | 28 +- .../x/sys/unix/zsyscall_linux_riscv64.go | 15 +- .../x/sys/unix/zsyscall_linux_s390x.go | 17 +- .../x/sys/unix/zsyscall_linux_sparc64.go | 28 +- .../x/sys/unix/zsyscall_netbsd_386.go | 4 +- .../x/sys/unix/zsyscall_netbsd_amd64.go | 4 +- .../x/sys/unix/zsyscall_netbsd_arm.go | 4 +- .../x/sys/unix/zsyscall_netbsd_arm64.go | 4 +- .../x/sys/unix/zsyscall_openbsd_386.go | 4 +- .../x/sys/unix/zsyscall_openbsd_amd64.go | 4 +- .../x/sys/unix/zsyscall_openbsd_arm.go | 4 +- .../x/sys/unix/zsyscall_openbsd_arm64.go | 4 +- .../x/sys/unix/zsyscall_openbsd_mips64.go | 4 +- .../x/sys/unix/zsyscall_solaris_amd64.go | 16 +- .../x/sys/unix/zsysnum_linux_386.go | 1 + .../x/sys/unix/zsysnum_linux_amd64.go | 1 + .../x/sys/unix/zsysnum_linux_arm.go | 1 + .../x/sys/unix/zsysnum_linux_arm64.go | 1 + .../x/sys/unix/zsysnum_linux_loong64.go | 313 + .../x/sys/unix/zsysnum_linux_mips.go | 1 + .../x/sys/unix/zsysnum_linux_mips64.go | 1 + .../x/sys/unix/zsysnum_linux_mips64le.go | 1 + .../x/sys/unix/zsysnum_linux_mipsle.go | 1 + .../x/sys/unix/zsysnum_linux_ppc.go | 1 + .../x/sys/unix/zsysnum_linux_ppc64.go | 1 + .../x/sys/unix/zsysnum_linux_ppc64le.go | 1 + .../x/sys/unix/zsysnum_linux_riscv64.go | 1 + .../x/sys/unix/zsysnum_linux_s390x.go | 1 + .../x/sys/unix/zsysnum_linux_sparc64.go | 1 + vendor/golang.org/x/sys/unix/ztypes_linux.go | 1525 +++- .../golang.org/x/sys/unix/ztypes_linux_386.go | 13 + .../x/sys/unix/ztypes_linux_amd64.go | 14 + .../golang.org/x/sys/unix/ztypes_linux_arm.go | 13 + .../x/sys/unix/ztypes_linux_arm64.go | 14 + .../x/sys/unix/ztypes_linux_loong64.go | 679 ++ .../x/sys/unix/ztypes_linux_mips.go | 13 + .../x/sys/unix/ztypes_linux_mips64.go | 14 + .../x/sys/unix/ztypes_linux_mips64le.go | 14 + .../x/sys/unix/ztypes_linux_mipsle.go | 13 + .../golang.org/x/sys/unix/ztypes_linux_ppc.go | 13 + .../x/sys/unix/ztypes_linux_ppc64.go | 14 + .../x/sys/unix/ztypes_linux_ppc64le.go | 14 + .../x/sys/unix/ztypes_linux_riscv64.go | 14 + .../x/sys/unix/ztypes_linux_s390x.go | 18 +- .../x/sys/unix/ztypes_linux_sparc64.go | 14 + .../x/sys/windows/syscall_windows.go | 42 +- .../x/sys/windows/zsyscall_windows.go | 4 +- vendor/golang.org/x/term/codereview.cfg | 1 + vendor/golang.org/x/term/go.mod | 4 +- vendor/golang.org/x/term/go.sum | 4 +- vendor/golang.org/x/term/term.go | 6 +- vendor/golang.org/x/term/term_solaris.go | 111 - vendor/golang.org/x/term/term_unix.go | 4 +- vendor/golang.org/x/term/term_unix_linux.go | 10 - .../{term_unix_aix.go => term_unix_other.go} | 5 +- vendor/golang.org/x/term/term_unix_zos.go | 10 - .../x/text/internal/language/language.go | 43 +- .../x/text/internal/language/parse.go | 7 + vendor/golang.org/x/text/language/parse.go | 22 + .../api/annotations/annotations.pb.go | 7 +- .../googleapis/api/annotations/client.pb.go | 7 +- .../api/annotations/field_behavior.pb.go | 46 +- .../googleapis/api/annotations/http.pb.go | 7 +- .../googleapis/api/annotations/resource.pb.go | 7 +- .../googleapis/api/annotations/routing.pb.go | 693 ++ .../googleapis/rpc/status/status.pb.go | 9 +- vendor/google.golang.org/grpc/.travis.yml | 42 - vendor/google.golang.org/grpc/MAINTAINERS.md | 5 +- vendor/google.golang.org/grpc/Makefile | 2 - vendor/google.golang.org/grpc/NOTICE.txt | 13 + vendor/google.golang.org/grpc/README.md | 2 +- .../grpc/attributes/attributes.go | 78 +- .../grpc/balancer/balancer.go | 68 +- .../grpc/balancer/base/balancer.go | 80 +- .../grpc/balancer/grpclb/state/state.go | 2 +- .../grpc/balancer/roundrobin/roundrobin.go | 4 +- .../grpc/balancer_conn_wrappers.go | 112 +- vendor/google.golang.org/grpc/clientconn.go | 518 +- .../grpc/connectivity/connectivity.go | 35 +- .../grpc/credentials/credentials.go | 15 +- .../google.golang.org/grpc/credentials/tls.go | 3 + vendor/google.golang.org/grpc/dialoptions.go | 27 +- vendor/google.golang.org/grpc/go.mod | 16 +- vendor/google.golang.org/grpc/go.sum | 55 +- .../grpc/grpclog/loggerv2.go | 86 +- vendor/google.golang.org/grpc/install_gae.sh | 6 - .../grpc/internal/binarylog/sink.go | 41 +- .../grpc/internal/channelz/funcs.go | 2 +- .../grpc/internal/channelz/types_linux.go | 2 - .../grpc/internal/channelz/types_nonlinux.go | 5 +- .../grpc/internal/channelz/util_linux.go | 2 - .../grpc/internal/channelz/util_nonlinux.go | 3 +- .../grpc/internal/credentials/spiffe.go | 2 - .../internal/credentials/spiffe_appengine.go | 31 - .../grpc/internal/credentials/syscallconn.go | 2 - .../grpc/internal/credentials/util.go | 4 +- .../grpc/internal/envconfig/envconfig.go | 7 +- .../grpc/internal/grpcrand/grpcrand.go | 29 +- .../grpcutil.go} | 16 +- .../dns/go113.go => grpcutil/regex.go} | 21 +- .../grpc/internal/grpcutil/target.go | 89 - .../grpc/internal/metadata/metadata.go | 30 +- .../grpc/internal/resolver/config_selector.go | 9 +- .../internal/resolver/dns/dns_resolver.go | 9 +- .../grpc/internal/resolver/unix/unix.go | 12 +- .../internal/serviceconfig/serviceconfig.go | 4 +- .../grpc/internal/status/status.go | 14 +- .../grpc/internal/syscall/syscall_linux.go | 2 - .../grpc/internal/syscall/syscall_nonlinux.go | 21 +- .../grpc/internal/transport/controlbuf.go | 32 +- .../grpc/internal/transport/handler_server.go | 3 +- .../grpc/internal/transport/http2_client.go | 271 +- .../grpc/internal/transport/http2_server.go | 242 +- .../grpc/internal/transport/http_util.go | 224 +- .../transport/networktype/networktype.go | 4 +- .../grpc/internal/transport/transport.go | 12 +- .../grpc/internal/xds/env/env.go | 97 + .../grpc/internal/xds_handshake_cluster.go | 2 +- .../grpc/metadata/metadata.go | 76 +- .../google.golang.org/grpc/picker_wrapper.go | 4 +- vendor/google.golang.org/grpc/pickfirst.go | 21 +- .../grpc/resolver/manual/manual.go | 20 +- vendor/google.golang.org/grpc/resolver/map.go | 109 + .../grpc/resolver/resolver.go | 56 +- .../grpc/resolver_conn_wrapper.go | 10 + vendor/google.golang.org/grpc/rpc_util.go | 31 +- vendor/google.golang.org/grpc/server.go | 113 +- vendor/google.golang.org/grpc/stats/stats.go | 11 +- .../google.golang.org/grpc/status/status.go | 15 +- vendor/google.golang.org/grpc/stream.go | 172 +- vendor/google.golang.org/grpc/version.go | 2 +- vendor/google.golang.org/grpc/vet.sh | 32 +- vendor/gopkg.in/ini.v1/.golangci.yml | 21 + vendor/gopkg.in/ini.v1/README.md | 2 +- vendor/gopkg.in/ini.v1/codecov.yml | 2 +- vendor/gopkg.in/ini.v1/file.go | 29 +- vendor/gopkg.in/ini.v1/ini.go | 14 +- vendor/gopkg.in/ini.v1/key.go | 17 +- vendor/gopkg.in/ini.v1/parser.go | 42 +- vendor/gopkg.in/ini.v1/section.go | 12 +- vendor/gopkg.in/ini.v1/struct.go | 71 +- .../square/go-jose.v2/.gitcookies.sh.enc | 1 + vendor/gopkg.in/square/go-jose.v2/.gitignore | 8 + vendor/gopkg.in/square/go-jose.v2/.travis.yml | 45 + .../gopkg.in/square/go-jose.v2/BUG-BOUNTY.md | 10 + .../square/go-jose.v2/CONTRIBUTING.md | 14 + vendor/gopkg.in/square/go-jose.v2/LICENSE | 202 + vendor/gopkg.in/square/go-jose.v2/README.md | 118 + .../gopkg.in/square/go-jose.v2/asymmetric.go | 592 ++ .../square/go-jose.v2/cipher/cbc_hmac.go | 196 + .../square/go-jose.v2/cipher/concat_kdf.go | 75 + .../square/go-jose.v2/cipher/ecdh_es.go | 86 + .../square/go-jose.v2/cipher/key_wrap.go | 109 + vendor/gopkg.in/square/go-jose.v2/crypter.go | 541 ++ .../square/go-jose.v2/doc.go} | 25 +- vendor/gopkg.in/square/go-jose.v2/encoding.go | 185 + .../gopkg.in/square/go-jose.v2/json/LICENSE | 27 + .../gopkg.in/square/go-jose.v2/json/README.md | 13 + .../gopkg.in/square/go-jose.v2/json/decode.go | 1183 ++++ .../gopkg.in/square/go-jose.v2/json/encode.go | 1197 ++++ .../gopkg.in/square/go-jose.v2/json/indent.go | 141 + .../square/go-jose.v2/json/scanner.go | 623 ++ .../gopkg.in/square/go-jose.v2/json/stream.go | 480 ++ .../gopkg.in/square/go-jose.v2/json/tags.go | 44 + vendor/gopkg.in/square/go-jose.v2/jwe.go | 294 + vendor/gopkg.in/square/go-jose.v2/jwk.go | 760 ++ vendor/gopkg.in/square/go-jose.v2/jws.go | 366 + vendor/gopkg.in/square/go-jose.v2/opaque.go | 144 + vendor/gopkg.in/square/go-jose.v2/shared.go | 520 ++ vendor/gopkg.in/square/go-jose.v2/signing.go | 441 ++ .../gopkg.in/square/go-jose.v2/symmetric.go | 482 ++ .../third_party/forked/golang/LICENSE | 27 + .../third_party/forked/golang/PATENTS | 22 + .../third_party/forked/golang/net/ip.go | 236 + .../third_party/forked/golang/net/parse.go | 59 + vendor/k8s.io/utils/net/ipnet.go | 4 +- vendor/k8s.io/utils/net/net.go | 12 +- vendor/k8s.io/utils/net/parse.go | 33 + vendor/k8s.io/utils/net/port.go | 2 +- vendor/k8s.io/utils/pointer/pointer.go | 174 +- vendor/k8s.io/utils/trace/trace.go | 2 +- vendor/modules.txt | 481 +- version/base.go | 2 +- 2605 files changed, 341642 insertions(+), 17927 deletions(-) create mode 100644 docs/design/build_implementation.md create mode 100644 pkg/auth/auth.go create mode 100644 pkg/imageengine/buildah/build.go create mode 100644 pkg/imageengine/buildah/build_rootfs.go create mode 100644 pkg/imageengine/buildah/commit.go create mode 100644 pkg/imageengine/buildah/common.go create mode 100644 pkg/imageengine/buildah/copy.go create mode 100644 pkg/imageengine/buildah/engine.go create mode 100644 pkg/imageengine/buildah/from.go create mode 100644 pkg/imageengine/buildah/get_annotation.go create mode 100644 pkg/imageengine/buildah/images.go create mode 100644 pkg/imageengine/buildah/init.go create mode 100644 pkg/imageengine/buildah/inspect.go create mode 100644 pkg/imageengine/buildah/load.go create mode 100644 pkg/imageengine/buildah/login.go create mode 100644 pkg/imageengine/buildah/logout.go create mode 100644 pkg/imageengine/buildah/manifest.go create mode 100644 pkg/imageengine/buildah/mount.go create mode 100644 pkg/imageengine/buildah/pull.go create mode 100644 pkg/imageengine/buildah/push.go create mode 100644 pkg/imageengine/buildah/rmi.go create mode 100644 pkg/imageengine/buildah/save.go create mode 100644 pkg/imageengine/buildah/sealer_image_extension.go create mode 100644 pkg/imageengine/buildah/util.go create mode 100644 pkg/imageengine/common.go create mode 100644 pkg/imageengine/common/options.go create mode 100644 pkg/imageengine/interface.go create mode 100644 types/api/v1/sealer_image_extension.go create mode 100644 vendor/github.com/Azure/go-ansiterm/go.mod create mode 100644 vendor/github.com/Azure/go-ansiterm/go.sum delete mode 100644 vendor/github.com/BurntSushi/toml/.travis.yml delete mode 100644 vendor/github.com/BurntSushi/toml/Makefile create mode 100644 vendor/github.com/BurntSushi/toml/decode_go116.go create mode 100644 vendor/github.com/BurntSushi/toml/deprecated.go delete mode 100644 vendor/github.com/BurntSushi/toml/encoding_types.go delete mode 100644 vendor/github.com/BurntSushi/toml/encoding_types_1.1.go create mode 100644 vendor/github.com/BurntSushi/toml/error.go create mode 100644 vendor/github.com/BurntSushi/toml/go.mod create mode 100644 vendor/github.com/BurntSushi/toml/internal/tz.go rename vendor/github.com/BurntSushi/toml/{decode_meta.go => meta.go} (64%) delete mode 100644 vendor/github.com/BurntSushi/toml/session.vim rename vendor/github.com/BurntSushi/toml/{type_check.go => type_toml.go} (75%) create mode 100644 vendor/github.com/Microsoft/go-winio/backuptar/noop.go create mode 100644 vendor/github.com/Microsoft/go-winio/backuptar/strconv.go create mode 100644 vendor/github.com/Microsoft/go-winio/backuptar/tar.go create mode 100644 vendor/github.com/Microsoft/go-winio/pkg/security/grantvmgroupaccess.go create mode 100644 vendor/github.com/Microsoft/go-winio/pkg/security/syscall_windows.go create mode 100644 vendor/github.com/Microsoft/go-winio/pkg/security/zsyscall_windows.go create mode 100644 vendor/github.com/Microsoft/go-winio/vhd/vhd.go create mode 100644 vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go create mode 100644 vendor/github.com/Microsoft/hcsshim/.gitattributes create mode 100644 vendor/github.com/Microsoft/hcsshim/.gitignore create mode 100644 vendor/github.com/Microsoft/hcsshim/.golangci.yml create mode 100644 vendor/github.com/Microsoft/hcsshim/CODEOWNERS create mode 100644 vendor/github.com/Microsoft/hcsshim/LICENSE create mode 100644 vendor/github.com/Microsoft/hcsshim/Makefile create mode 100644 vendor/github.com/Microsoft/hcsshim/Protobuild.toml create mode 100644 vendor/github.com/Microsoft/hcsshim/README.md create mode 100644 vendor/github.com/Microsoft/hcsshim/computestorage/attach.go create mode 100644 vendor/github.com/Microsoft/hcsshim/computestorage/destroy.go create mode 100644 vendor/github.com/Microsoft/hcsshim/computestorage/detach.go create mode 100644 vendor/github.com/Microsoft/hcsshim/computestorage/export.go create mode 100644 vendor/github.com/Microsoft/hcsshim/computestorage/format.go create mode 100644 vendor/github.com/Microsoft/hcsshim/computestorage/helpers.go create mode 100644 vendor/github.com/Microsoft/hcsshim/computestorage/import.go create mode 100644 vendor/github.com/Microsoft/hcsshim/computestorage/initialize.go create mode 100644 vendor/github.com/Microsoft/hcsshim/computestorage/mount.go create mode 100644 vendor/github.com/Microsoft/hcsshim/computestorage/setup.go create mode 100644 vendor/github.com/Microsoft/hcsshim/computestorage/storage.go create mode 100644 vendor/github.com/Microsoft/hcsshim/computestorage/zsyscall_windows.go create mode 100644 vendor/github.com/Microsoft/hcsshim/container.go create mode 100644 vendor/github.com/Microsoft/hcsshim/errors.go create mode 100644 vendor/github.com/Microsoft/hcsshim/functional_tests.ps1 create mode 100644 vendor/github.com/Microsoft/hcsshim/go.mod create mode 100644 vendor/github.com/Microsoft/hcsshim/go.sum create mode 100644 vendor/github.com/Microsoft/hcsshim/hcsshim.go create mode 100644 vendor/github.com/Microsoft/hcsshim/hnsendpoint.go create mode 100644 vendor/github.com/Microsoft/hcsshim/hnsglobals.go create mode 100644 vendor/github.com/Microsoft/hcsshim/hnsnetwork.go create mode 100644 vendor/github.com/Microsoft/hcsshim/hnspolicy.go create mode 100644 vendor/github.com/Microsoft/hcsshim/hnspolicylist.go create mode 100644 vendor/github.com/Microsoft/hcsshim/hnssupport.go create mode 100644 vendor/github.com/Microsoft/hcsshim/interface.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/cow/cow.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/callback.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema1/schema1.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/attachment.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/battery.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cache_query_stats_response.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/chipset.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/close_handle.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/com_port.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/compute_system.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/configuration.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/console_size.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_add_instance_request.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_hv_socket_service_config.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_instance.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_modify_operation.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_operation_request.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_remove_instance_request.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_state.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_system_info.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_memory_information.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_affinity.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_config.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_configurations.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_operations.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_property.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/create_group_operation.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/delete_group_operation.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/device.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/devices.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/enhanced_mode_video.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/flexible_io_device.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_connection.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_connection_info.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_crash_reporting.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_os.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_state.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/host_processor_modify_request.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hosted_system.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_2.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_address.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_service_config.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_system_config.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/interrupt_moderation_mode.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/iov_settings.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/keyboard.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/layer.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/linux_kernel_direct.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/logical_processor.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mapped_directory.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mapped_pipe.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_2.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_information_for_vm.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_stats.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_container_definition_device.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_category.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_extension.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_instance.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_namespace.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_interface_class.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_namespace.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_directory.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_namespace.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_symlink.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/modification_request.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/modify_setting_request.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mouse.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/network_adapter.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/networking.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/pause_notification.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/pause_options.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/plan9.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/plan9_share.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_details.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_modify_request.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_parameters.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_status.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_2.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_stats.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_topology.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/properties.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/property_query.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/property_type.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/rdp_connection_options.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_changes.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_key.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_value.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/restore_state.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/save_options.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/scsi.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/service_properties.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_configuration.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_region.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_region_info.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/silo_properties.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/statistics.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage_qo_s.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage_stats.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/topology.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/uefi.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/uefi_boot_entry.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/version.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/video_monitor.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_machine.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_node_info.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_controller.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_device.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_mapping.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_pci_device.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_pci_function.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb_share.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb_share_options.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/vm_memory.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/vm_processor_limits.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/windows_crash_reporting.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/service.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/utils.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/waithelper.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcserror/hcserror.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hns/hns.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hns/hnsfuncs.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hns/hnsglobals.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hns/hnsnetwork.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicylist.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hns/hnssupport.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hns/namespace.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hns/zsyscall_windows.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/interop/interop.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/interop/zsyscall_windows.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/log/g.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/logfields/fields.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/longpath/longpath.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/mergemaps/merge.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/oc/exporter.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/oc/span.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/timeout/timeout.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/vmcompute/vmcompute.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/vmcompute/zsyscall_windows.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/activatelayer.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayer.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/createlayer.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/createscratchlayer.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/deactivatelayer.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/destroylayer.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/exportlayer.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/getlayermountpath.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/getsharedbaseimages.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/grantvmaccess.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/importlayer.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerexists.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerid.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerutils.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/nametoguid.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/preparelayer.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/processimage.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/unpreparelayer.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/wclayer.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/zsyscall_windows.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/console.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/devices.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/errors.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/filesystem.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/iocp.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/logon.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/memory.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/net.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/path.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/processor.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/system.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/thread.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/utils.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go create mode 100644 vendor/github.com/Microsoft/hcsshim/layer.go create mode 100644 vendor/github.com/Microsoft/hcsshim/osversion/osversion_windows.go create mode 100644 vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go create mode 100644 vendor/github.com/Microsoft/hcsshim/process.go create mode 100644 vendor/github.com/Microsoft/hcsshim/zsyscall_windows.go create mode 100644 vendor/github.com/VividCortex/ewma/.gitignore create mode 100644 vendor/github.com/VividCortex/ewma/.whitesource create mode 100644 vendor/github.com/VividCortex/ewma/LICENSE create mode 100644 vendor/github.com/VividCortex/ewma/README.md create mode 100644 vendor/github.com/VividCortex/ewma/codecov.yml create mode 100644 vendor/github.com/VividCortex/ewma/ewma.go create mode 100644 vendor/github.com/VividCortex/ewma/go.mod rename vendor/github.com/{shurcooL/sanitized_anchor_name => acarl005/stripansi}/LICENSE (96%) create mode 100644 vendor/github.com/acarl005/stripansi/README.md create mode 100644 vendor/github.com/acarl005/stripansi/stripansi.go create mode 100644 vendor/github.com/blang/semver/.travis.yml create mode 100644 vendor/github.com/blang/semver/LICENSE create mode 100644 vendor/github.com/blang/semver/README.md create mode 100644 vendor/github.com/blang/semver/json.go create mode 100644 vendor/github.com/blang/semver/package.json create mode 100644 vendor/github.com/blang/semver/range.go create mode 100644 vendor/github.com/blang/semver/semver.go create mode 100644 vendor/github.com/blang/semver/sort.go create mode 100644 vendor/github.com/blang/semver/sql.go delete mode 100644 vendor/github.com/cespare/xxhash/v2/.travis.yml create mode 100644 vendor/github.com/chzyer/readline/.gitignore create mode 100644 vendor/github.com/chzyer/readline/.travis.yml create mode 100644 vendor/github.com/chzyer/readline/CHANGELOG.md create mode 100644 vendor/github.com/chzyer/readline/LICENSE create mode 100644 vendor/github.com/chzyer/readline/README.md create mode 100644 vendor/github.com/chzyer/readline/ansi_windows.go create mode 100644 vendor/github.com/chzyer/readline/complete.go create mode 100644 vendor/github.com/chzyer/readline/complete_helper.go create mode 100644 vendor/github.com/chzyer/readline/complete_segment.go create mode 100644 vendor/github.com/chzyer/readline/history.go create mode 100644 vendor/github.com/chzyer/readline/operation.go create mode 100644 vendor/github.com/chzyer/readline/password.go create mode 100644 vendor/github.com/chzyer/readline/rawreader_windows.go create mode 100644 vendor/github.com/chzyer/readline/readline.go create mode 100644 vendor/github.com/chzyer/readline/remote.go create mode 100644 vendor/github.com/chzyer/readline/runebuf.go create mode 100644 vendor/github.com/chzyer/readline/runes.go create mode 100644 vendor/github.com/chzyer/readline/search.go create mode 100644 vendor/github.com/chzyer/readline/std.go create mode 100644 vendor/github.com/chzyer/readline/std_windows.go create mode 100644 vendor/github.com/chzyer/readline/term.go create mode 100644 vendor/github.com/chzyer/readline/term_bsd.go create mode 100644 vendor/github.com/chzyer/readline/term_linux.go create mode 100644 vendor/github.com/chzyer/readline/term_solaris.go create mode 100644 vendor/github.com/chzyer/readline/term_unix.go create mode 100644 vendor/github.com/chzyer/readline/term_windows.go create mode 100644 vendor/github.com/chzyer/readline/terminal.go create mode 100644 vendor/github.com/chzyer/readline/utils.go create mode 100644 vendor/github.com/chzyer/readline/utils_unix.go create mode 100644 vendor/github.com/chzyer/readline/utils_windows.go create mode 100644 vendor/github.com/chzyer/readline/vim.go create mode 100644 vendor/github.com/chzyer/readline/windows_api.go create mode 100644 vendor/github.com/containerd/cgroups/LICENSE create mode 100644 vendor/github.com/containerd/cgroups/stats/v1/doc.go create mode 100644 vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.go create mode 100644 vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.txt create mode 100644 vendor/github.com/containerd/cgroups/stats/v1/metrics.proto create mode 100644 vendor/github.com/containerd/containerd/log/context.go create mode 100644 vendor/github.com/containerd/containerd/pkg/userns/userns_linux.go create mode 100644 vendor/github.com/containerd/containerd/pkg/userns/userns_unsupported.go create mode 100644 vendor/github.com/containerd/containerd/platforms/compare.go create mode 100644 vendor/github.com/containerd/containerd/platforms/cpuinfo.go create mode 100644 vendor/github.com/containerd/containerd/platforms/database.go create mode 100644 vendor/github.com/containerd/containerd/platforms/defaults.go create mode 100644 vendor/github.com/containerd/containerd/platforms/defaults_unix.go create mode 100644 vendor/github.com/containerd/containerd/platforms/defaults_windows.go create mode 100644 vendor/github.com/containerd/containerd/platforms/platforms.go create mode 100644 vendor/github.com/containerd/containerd/sys/epoll.go create mode 100644 vendor/github.com/containerd/containerd/sys/fds.go create mode 100644 vendor/github.com/containerd/containerd/sys/filesys_unix.go create mode 100644 vendor/github.com/containerd/containerd/sys/filesys_windows.go create mode 100644 vendor/github.com/containerd/containerd/sys/mount_linux.go create mode 100644 vendor/github.com/containerd/containerd/sys/oom_linux.go create mode 100644 vendor/github.com/containerd/containerd/sys/oom_unsupported.go create mode 100644 vendor/github.com/containerd/containerd/sys/socket_unix.go create mode 100644 vendor/github.com/containerd/containerd/sys/socket_windows.go create mode 100644 vendor/github.com/containerd/containerd/sys/stat_bsd.go create mode 100644 vendor/github.com/containerd/containerd/sys/stat_openbsd.go create mode 100644 vendor/github.com/containerd/containerd/sys/stat_unix.go create mode 100644 vendor/github.com/containerd/containerd/sys/subprocess_unsafe_linux.go create mode 100644 vendor/github.com/containerd/containerd/sys/subprocess_unsafe_linux.s create mode 100644 vendor/github.com/containerd/containerd/sys/userns_deprecated.go create mode 100644 vendor/github.com/containerd/stargz-snapshotter/estargz/LICENSE create mode 100644 vendor/github.com/containerd/stargz-snapshotter/estargz/build.go create mode 100644 vendor/github.com/containerd/stargz-snapshotter/estargz/errorutil/errors.go create mode 100644 vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go create mode 100644 vendor/github.com/containerd/stargz-snapshotter/estargz/go.mod create mode 100644 vendor/github.com/containerd/stargz-snapshotter/estargz/go.sum create mode 100644 vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go create mode 100644 vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go create mode 100644 vendor/github.com/containerd/stargz-snapshotter/estargz/types.go create mode 100644 vendor/github.com/containernetworking/cni/LICENSE create mode 100644 vendor/github.com/containernetworking/cni/libcni/api.go create mode 100644 vendor/github.com/containernetworking/cni/libcni/conf.go create mode 100644 vendor/github.com/containernetworking/cni/pkg/invoke/args.go create mode 100644 vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go create mode 100644 vendor/github.com/containernetworking/cni/pkg/invoke/exec.go create mode 100644 vendor/github.com/containernetworking/cni/pkg/invoke/find.go create mode 100644 vendor/github.com/containernetworking/cni/pkg/invoke/os_unix.go create mode 100644 vendor/github.com/containernetworking/cni/pkg/invoke/os_windows.go create mode 100644 vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go create mode 100644 vendor/github.com/containernetworking/cni/pkg/types/020/types.go create mode 100644 vendor/github.com/containernetworking/cni/pkg/types/040/types.go create mode 100644 vendor/github.com/containernetworking/cni/pkg/types/100/types.go create mode 100644 vendor/github.com/containernetworking/cni/pkg/types/args.go create mode 100644 vendor/github.com/containernetworking/cni/pkg/types/create/create.go create mode 100644 vendor/github.com/containernetworking/cni/pkg/types/internal/convert.go create mode 100644 vendor/github.com/containernetworking/cni/pkg/types/internal/create.go create mode 100644 vendor/github.com/containernetworking/cni/pkg/types/types.go create mode 100644 vendor/github.com/containernetworking/cni/pkg/utils/utils.go create mode 100644 vendor/github.com/containernetworking/cni/pkg/version/conf.go create mode 100644 vendor/github.com/containernetworking/cni/pkg/version/plugin.go create mode 100644 vendor/github.com/containernetworking/cni/pkg/version/reconcile.go create mode 100644 vendor/github.com/containernetworking/cni/pkg/version/version.go create mode 100644 vendor/github.com/containernetworking/plugins/LICENSE create mode 100644 vendor/github.com/containernetworking/plugins/pkg/ns/README.md create mode 100644 vendor/github.com/containernetworking/plugins/pkg/ns/ns_linux.go create mode 100644 vendor/github.com/containers/buildah/.cirrus.yml create mode 100644 vendor/github.com/containers/buildah/.gitignore create mode 100644 vendor/github.com/containers/buildah/.golangci.yml create mode 100644 vendor/github.com/containers/buildah/CHANGELOG.md create mode 100644 vendor/github.com/containers/buildah/CODE-OF-CONDUCT.md create mode 100644 vendor/github.com/containers/buildah/CONTRIBUTING.md create mode 100644 vendor/github.com/containers/buildah/LICENSE create mode 100644 vendor/github.com/containers/buildah/MAINTAINERS create mode 100644 vendor/github.com/containers/buildah/Makefile create mode 100644 vendor/github.com/containers/buildah/OWNERS create mode 100644 vendor/github.com/containers/buildah/README.md create mode 100644 vendor/github.com/containers/buildah/SECURITY.md create mode 100644 vendor/github.com/containers/buildah/add.go create mode 100644 vendor/github.com/containers/buildah/bind/mount.go create mode 100644 vendor/github.com/containers/buildah/bind/mount_unsupported.go create mode 100644 vendor/github.com/containers/buildah/bind/util.go create mode 100644 vendor/github.com/containers/buildah/btrfs_installed_tag.sh create mode 100644 vendor/github.com/containers/buildah/btrfs_tag.sh create mode 100644 vendor/github.com/containers/buildah/buildah.go create mode 100644 vendor/github.com/containers/buildah/changelog.txt create mode 100644 vendor/github.com/containers/buildah/chroot/run.go create mode 100644 vendor/github.com/containers/buildah/chroot/seccomp.go create mode 100644 vendor/github.com/containers/buildah/chroot/seccomp_unsupported.go create mode 100644 vendor/github.com/containers/buildah/chroot/selinux.go create mode 100644 vendor/github.com/containers/buildah/chroot/selinux_unsupported.go create mode 100644 vendor/github.com/containers/buildah/chroot/unsupported.go create mode 100644 vendor/github.com/containers/buildah/commit.go create mode 100644 vendor/github.com/containers/buildah/common.go create mode 100644 vendor/github.com/containers/buildah/config.go create mode 100644 vendor/github.com/containers/buildah/copier/copier.go create mode 100644 vendor/github.com/containers/buildah/copier/syscall_unix.go create mode 100644 vendor/github.com/containers/buildah/copier/syscall_windows.go create mode 100644 vendor/github.com/containers/buildah/copier/unwrap_112.go create mode 100644 vendor/github.com/containers/buildah/copier/unwrap_113.go create mode 100644 vendor/github.com/containers/buildah/copier/xattrs.go create mode 100644 vendor/github.com/containers/buildah/copier/xattrs_unsupported.go create mode 100644 vendor/github.com/containers/buildah/define/build.go create mode 100644 vendor/github.com/containers/buildah/define/isolation.go create mode 100644 vendor/github.com/containers/buildah/define/namespace.go create mode 100644 vendor/github.com/containers/buildah/define/pull.go create mode 100644 vendor/github.com/containers/buildah/define/types.go create mode 100644 vendor/github.com/containers/buildah/define/types_unix.go create mode 100644 vendor/github.com/containers/buildah/define/types_unsupported.go create mode 100644 vendor/github.com/containers/buildah/delete.go create mode 100644 vendor/github.com/containers/buildah/developmentplan.md create mode 100644 vendor/github.com/containers/buildah/digester.go create mode 100644 vendor/github.com/containers/buildah/docker/AUTHORS create mode 100644 vendor/github.com/containers/buildah/docker/types.go create mode 100644 vendor/github.com/containers/buildah/go.mod create mode 100644 vendor/github.com/containers/buildah/go.sum create mode 100644 vendor/github.com/containers/buildah/image.go create mode 100644 vendor/github.com/containers/buildah/imagebuildah/build.go create mode 100644 vendor/github.com/containers/buildah/imagebuildah/executor.go create mode 100644 vendor/github.com/containers/buildah/imagebuildah/stage_executor.go create mode 100644 vendor/github.com/containers/buildah/imagebuildah/util.go create mode 100644 vendor/github.com/containers/buildah/import.go create mode 100644 vendor/github.com/containers/buildah/info.go create mode 100644 vendor/github.com/containers/buildah/install.md create mode 100644 vendor/github.com/containers/buildah/internal/parse/parse.go create mode 100644 vendor/github.com/containers/buildah/internal/types.go create mode 100644 vendor/github.com/containers/buildah/internal/util/util.go create mode 100644 vendor/github.com/containers/buildah/libdm_tag.sh create mode 100644 vendor/github.com/containers/buildah/mount.go create mode 100644 vendor/github.com/containers/buildah/new.go create mode 100644 vendor/github.com/containers/buildah/pkg/blobcache/blobcache.go create mode 100644 vendor/github.com/containers/buildah/pkg/chrootuser/user.go create mode 100644 vendor/github.com/containers/buildah/pkg/chrootuser/user_basic.go create mode 100644 vendor/github.com/containers/buildah/pkg/chrootuser/user_linux.go create mode 100644 vendor/github.com/containers/buildah/pkg/cli/common.go create mode 100644 vendor/github.com/containers/buildah/pkg/cli/exec_codes.go create mode 100644 vendor/github.com/containers/buildah/pkg/completion/completion.go create mode 100644 vendor/github.com/containers/buildah/pkg/formats/formats.go create mode 100644 vendor/github.com/containers/buildah/pkg/formats/templates.go create mode 100644 vendor/github.com/containers/buildah/pkg/overlay/overlay.go create mode 100644 vendor/github.com/containers/buildah/pkg/parse/parse.go create mode 100644 vendor/github.com/containers/buildah/pkg/parse/parse_unix.go create mode 100644 vendor/github.com/containers/buildah/pkg/parse/parse_unsupported.go create mode 100644 vendor/github.com/containers/buildah/pkg/rusage/rusage.go create mode 100644 vendor/github.com/containers/buildah/pkg/rusage/rusage_unix.go create mode 100644 vendor/github.com/containers/buildah/pkg/rusage/rusage_unsupported.go create mode 100644 vendor/github.com/containers/buildah/pkg/sshagent/sshagent.go create mode 100644 vendor/github.com/containers/buildah/pkg/util/util.go create mode 100644 vendor/github.com/containers/buildah/pull.go create mode 100644 vendor/github.com/containers/buildah/push.go create mode 100644 vendor/github.com/containers/buildah/release.sh create mode 100644 vendor/github.com/containers/buildah/run.go create mode 100644 vendor/github.com/containers/buildah/run_linux.go create mode 100644 vendor/github.com/containers/buildah/run_unix.go create mode 100644 vendor/github.com/containers/buildah/run_unsupported.go create mode 100644 vendor/github.com/containers/buildah/seccomp.go create mode 100644 vendor/github.com/containers/buildah/seccomp_unsupported.go create mode 100644 vendor/github.com/containers/buildah/selinux.go create mode 100644 vendor/github.com/containers/buildah/selinux_tag.sh create mode 100644 vendor/github.com/containers/buildah/selinux_unsupported.go create mode 100644 vendor/github.com/containers/buildah/troubleshooting.md create mode 100644 vendor/github.com/containers/buildah/unmount.go create mode 100644 vendor/github.com/containers/buildah/util.go create mode 100644 vendor/github.com/containers/buildah/util/types.go create mode 100644 vendor/github.com/containers/buildah/util/util.go create mode 100644 vendor/github.com/containers/buildah/util/util_linux.go create mode 100644 vendor/github.com/containers/buildah/util/util_not_uint64.go create mode 100644 vendor/github.com/containers/buildah/util/util_uint64.go create mode 100644 vendor/github.com/containers/buildah/util/util_unix.go create mode 100644 vendor/github.com/containers/buildah/util/util_unsupported.go create mode 100644 vendor/github.com/containers/buildah/util/util_windows.go create mode 100644 vendor/github.com/containers/common/LICENSE create mode 100644 vendor/github.com/containers/common/libimage/copier.go create mode 100644 vendor/github.com/containers/common/libimage/disk_usage.go create mode 100644 vendor/github.com/containers/common/libimage/events.go create mode 100644 vendor/github.com/containers/common/libimage/filters.go create mode 100644 vendor/github.com/containers/common/libimage/history.go create mode 100644 vendor/github.com/containers/common/libimage/image.go create mode 100644 vendor/github.com/containers/common/libimage/image_config.go create mode 100644 vendor/github.com/containers/common/libimage/image_tree.go create mode 100644 vendor/github.com/containers/common/libimage/import.go create mode 100644 vendor/github.com/containers/common/libimage/inspect.go create mode 100644 vendor/github.com/containers/common/libimage/layer_tree.go create mode 100644 vendor/github.com/containers/common/libimage/load.go create mode 100644 vendor/github.com/containers/common/libimage/manifest_list.go create mode 100644 vendor/github.com/containers/common/libimage/manifests/copy.go create mode 100644 vendor/github.com/containers/common/libimage/manifests/manifests.go create mode 100644 vendor/github.com/containers/common/libimage/normalize.go create mode 100644 vendor/github.com/containers/common/libimage/oci.go create mode 100644 vendor/github.com/containers/common/libimage/pull.go create mode 100644 vendor/github.com/containers/common/libimage/push.go create mode 100644 vendor/github.com/containers/common/libimage/runtime.go create mode 100644 vendor/github.com/containers/common/libimage/save.go create mode 100644 vendor/github.com/containers/common/libimage/search.go create mode 100644 vendor/github.com/containers/common/libnetwork/cni/README.md create mode 100644 vendor/github.com/containers/common/libnetwork/cni/cni.coverprofile create mode 100644 vendor/github.com/containers/common/libnetwork/cni/cni_conversion.go create mode 100644 vendor/github.com/containers/common/libnetwork/cni/cni_exec.go create mode 100644 vendor/github.com/containers/common/libnetwork/cni/cni_types.go create mode 100644 vendor/github.com/containers/common/libnetwork/cni/config.go create mode 100644 vendor/github.com/containers/common/libnetwork/cni/network.go create mode 100644 vendor/github.com/containers/common/libnetwork/cni/run.go create mode 100644 vendor/github.com/containers/common/libnetwork/internal/util/bridge.go create mode 100644 vendor/github.com/containers/common/libnetwork/internal/util/create.go create mode 100644 vendor/github.com/containers/common/libnetwork/internal/util/interface.go create mode 100644 vendor/github.com/containers/common/libnetwork/internal/util/interfaces.go create mode 100644 vendor/github.com/containers/common/libnetwork/internal/util/ip.go create mode 100644 vendor/github.com/containers/common/libnetwork/internal/util/parse.go create mode 100644 vendor/github.com/containers/common/libnetwork/internal/util/util.go create mode 100644 vendor/github.com/containers/common/libnetwork/internal/util/validate.go create mode 100644 vendor/github.com/containers/common/libnetwork/netavark/config.go create mode 100644 vendor/github.com/containers/common/libnetwork/netavark/const.go create mode 100644 vendor/github.com/containers/common/libnetwork/netavark/exec.go create mode 100644 vendor/github.com/containers/common/libnetwork/netavark/ipam.go create mode 100644 vendor/github.com/containers/common/libnetwork/netavark/network.go create mode 100644 vendor/github.com/containers/common/libnetwork/netavark/run.go create mode 100644 vendor/github.com/containers/common/libnetwork/network/interface.go create mode 100644 vendor/github.com/containers/common/libnetwork/types/const.go create mode 100644 vendor/github.com/containers/common/libnetwork/types/define.go create mode 100644 vendor/github.com/containers/common/libnetwork/types/network.go create mode 100644 vendor/github.com/containers/common/libnetwork/util/filters.go create mode 100644 vendor/github.com/containers/common/libnetwork/util/ip.go create mode 100644 vendor/github.com/containers/common/libnetwork/util/ip_calc.go create mode 100644 vendor/github.com/containers/common/pkg/apparmor/apparmor.go create mode 100644 vendor/github.com/containers/common/pkg/apparmor/apparmor_linux.go create mode 100644 vendor/github.com/containers/common/pkg/apparmor/apparmor_linux_template.go create mode 100644 vendor/github.com/containers/common/pkg/apparmor/apparmor_unsupported.go create mode 100644 vendor/github.com/containers/common/pkg/apparmor/internal/supported/supported.go create mode 100644 vendor/github.com/containers/common/pkg/auth/auth.go create mode 100644 vendor/github.com/containers/common/pkg/auth/cli.go create mode 100644 vendor/github.com/containers/common/pkg/capabilities/capabilities.go create mode 100644 vendor/github.com/containers/common/pkg/cgroups/blkio.go create mode 100644 vendor/github.com/containers/common/pkg/cgroups/cgroups.go create mode 100644 vendor/github.com/containers/common/pkg/cgroups/cgroups_supported.go create mode 100644 vendor/github.com/containers/common/pkg/cgroups/cgroups_unsupported.go create mode 100644 vendor/github.com/containers/common/pkg/cgroups/cpu.go create mode 100644 vendor/github.com/containers/common/pkg/cgroups/cpuset.go create mode 100644 vendor/github.com/containers/common/pkg/cgroups/memory.go create mode 100644 vendor/github.com/containers/common/pkg/cgroups/pids.go create mode 100644 vendor/github.com/containers/common/pkg/cgroups/systemd.go create mode 100644 vendor/github.com/containers/common/pkg/cgroupv2/cgroups_linux.go create mode 100644 vendor/github.com/containers/common/pkg/cgroupv2/cgroups_unsupported.go create mode 100644 vendor/github.com/containers/common/pkg/chown/chown.go create mode 100644 vendor/github.com/containers/common/pkg/chown/chown_unix.go create mode 100644 vendor/github.com/containers/common/pkg/chown/chown_windows.go create mode 100644 vendor/github.com/containers/common/pkg/completion/completion.go create mode 100644 vendor/github.com/containers/common/pkg/config/config.go create mode 100644 vendor/github.com/containers/common/pkg/config/config_darwin.go create mode 100644 vendor/github.com/containers/common/pkg/config/config_linux.go create mode 100644 vendor/github.com/containers/common/pkg/config/config_local.go create mode 100644 vendor/github.com/containers/common/pkg/config/config_remote.go create mode 100644 vendor/github.com/containers/common/pkg/config/config_unsupported.go create mode 100644 vendor/github.com/containers/common/pkg/config/config_windows.go create mode 100644 vendor/github.com/containers/common/pkg/config/containers.conf create mode 100644 vendor/github.com/containers/common/pkg/config/default.go create mode 100644 vendor/github.com/containers/common/pkg/config/default_linux.go create mode 100644 vendor/github.com/containers/common/pkg/config/default_unsupported.go create mode 100644 vendor/github.com/containers/common/pkg/config/default_windows.go create mode 100644 vendor/github.com/containers/common/pkg/config/nosystemd.go create mode 100644 vendor/github.com/containers/common/pkg/config/pull_policy.go create mode 100644 vendor/github.com/containers/common/pkg/config/systemd.go create mode 100644 vendor/github.com/containers/common/pkg/download/download.go create mode 100644 vendor/github.com/containers/common/pkg/filters/filters.go create mode 100644 vendor/github.com/containers/common/pkg/manifests/errors.go create mode 100644 vendor/github.com/containers/common/pkg/manifests/manifests.go create mode 100644 vendor/github.com/containers/common/pkg/parse/parse.go create mode 100644 vendor/github.com/containers/common/pkg/parse/parse_unix.go create mode 100644 vendor/github.com/containers/common/pkg/retry/retry.go create mode 100644 vendor/github.com/containers/common/pkg/retry/retry_linux.go create mode 100644 vendor/github.com/containers/common/pkg/retry/retry_unsupported.go create mode 100644 vendor/github.com/containers/common/pkg/seccomp/conversion.go create mode 100644 vendor/github.com/containers/common/pkg/seccomp/default_linux.go create mode 100644 vendor/github.com/containers/common/pkg/seccomp/errno_list.go create mode 100644 vendor/github.com/containers/common/pkg/seccomp/filter.go create mode 100644 vendor/github.com/containers/common/pkg/seccomp/seccomp.json create mode 100644 vendor/github.com/containers/common/pkg/seccomp/seccomp_linux.go create mode 100644 vendor/github.com/containers/common/pkg/seccomp/seccomp_unsupported.go create mode 100644 vendor/github.com/containers/common/pkg/seccomp/supported.go create mode 100644 vendor/github.com/containers/common/pkg/seccomp/types.go create mode 100644 vendor/github.com/containers/common/pkg/seccomp/validate.go create mode 100644 vendor/github.com/containers/common/pkg/signal/signal_common.go create mode 100644 vendor/github.com/containers/common/pkg/signal/signal_linux.go create mode 100644 vendor/github.com/containers/common/pkg/signal/signal_linux_mipsx.go create mode 100644 vendor/github.com/containers/common/pkg/signal/signal_unsupported.go create mode 100644 vendor/github.com/containers/common/pkg/subscriptions/mounts.conf create mode 100644 vendor/github.com/containers/common/pkg/subscriptions/subscriptions.go create mode 100644 vendor/github.com/containers/common/pkg/supplemented/errors.go create mode 100644 vendor/github.com/containers/common/pkg/supplemented/supplemented.go create mode 100644 vendor/github.com/containers/common/pkg/timetype/timestamp.go create mode 100644 vendor/github.com/containers/common/pkg/umask/umask_unix.go create mode 100644 vendor/github.com/containers/common/pkg/umask/umask_unsupported.go create mode 100644 vendor/github.com/containers/common/pkg/util/util.go create mode 100644 vendor/github.com/containers/common/pkg/util/util_supported.go create mode 100644 vendor/github.com/containers/common/pkg/util/util_windows.go create mode 100644 vendor/github.com/containers/common/version/version.go create mode 100644 vendor/github.com/containers/image/v5/LICENSE create mode 100644 vendor/github.com/containers/image/v5/copy/copy.go create mode 100644 vendor/github.com/containers/image/v5/copy/digesting_reader.go create mode 100644 vendor/github.com/containers/image/v5/copy/encrypt.go create mode 100644 vendor/github.com/containers/image/v5/copy/manifest.go create mode 100644 vendor/github.com/containers/image/v5/copy/progress_reader.go create mode 100644 vendor/github.com/containers/image/v5/copy/sign.go create mode 100644 vendor/github.com/containers/image/v5/directory/directory_dest.go create mode 100644 vendor/github.com/containers/image/v5/directory/directory_src.go create mode 100644 vendor/github.com/containers/image/v5/directory/directory_transport.go create mode 100644 vendor/github.com/containers/image/v5/directory/explicitfilepath/path.go create mode 100644 vendor/github.com/containers/image/v5/docker/archive/dest.go create mode 100644 vendor/github.com/containers/image/v5/docker/archive/reader.go create mode 100644 vendor/github.com/containers/image/v5/docker/archive/src.go create mode 100644 vendor/github.com/containers/image/v5/docker/archive/transport.go create mode 100644 vendor/github.com/containers/image/v5/docker/archive/writer.go create mode 100644 vendor/github.com/containers/image/v5/docker/cache.go create mode 100644 vendor/github.com/containers/image/v5/docker/daemon/client.go create mode 100644 vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go create mode 100644 vendor/github.com/containers/image/v5/docker/daemon/daemon_src.go create mode 100644 vendor/github.com/containers/image/v5/docker/daemon/daemon_transport.go create mode 100644 vendor/github.com/containers/image/v5/docker/docker_client.go create mode 100644 vendor/github.com/containers/image/v5/docker/docker_image.go create mode 100644 vendor/github.com/containers/image/v5/docker/docker_image_dest.go create mode 100644 vendor/github.com/containers/image/v5/docker/docker_image_src.go create mode 100644 vendor/github.com/containers/image/v5/docker/docker_transport.go create mode 100644 vendor/github.com/containers/image/v5/docker/errors.go create mode 100644 vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go create mode 100644 vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go create mode 100644 vendor/github.com/containers/image/v5/docker/internal/tarfile/src.go create mode 100644 vendor/github.com/containers/image/v5/docker/internal/tarfile/types.go create mode 100644 vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go create mode 100644 vendor/github.com/containers/image/v5/docker/lookaside.go create mode 100644 vendor/github.com/containers/image/v5/docker/policyconfiguration/naming.go create mode 100644 vendor/github.com/containers/image/v5/docker/reference/README.md create mode 100644 vendor/github.com/containers/image/v5/docker/reference/helpers.go create mode 100644 vendor/github.com/containers/image/v5/docker/reference/normalize.go create mode 100644 vendor/github.com/containers/image/v5/docker/reference/reference.go create mode 100644 vendor/github.com/containers/image/v5/docker/reference/regexp.go create mode 100644 vendor/github.com/containers/image/v5/docker/wwwauthenticate.go create mode 100644 vendor/github.com/containers/image/v5/image/docker_list.go create mode 100644 vendor/github.com/containers/image/v5/image/docker_schema1.go create mode 100644 vendor/github.com/containers/image/v5/image/docker_schema2.go create mode 100644 vendor/github.com/containers/image/v5/image/manifest.go create mode 100644 vendor/github.com/containers/image/v5/image/memory.go create mode 100644 vendor/github.com/containers/image/v5/image/oci.go create mode 100644 vendor/github.com/containers/image/v5/image/oci_index.go create mode 100644 vendor/github.com/containers/image/v5/image/sourced.go create mode 100644 vendor/github.com/containers/image/v5/image/unparsed.go create mode 100644 vendor/github.com/containers/image/v5/internal/blobinfocache/blobinfocache.go create mode 100644 vendor/github.com/containers/image/v5/internal/blobinfocache/types.go create mode 100644 vendor/github.com/containers/image/v5/internal/iolimits/iolimits.go create mode 100644 vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go create mode 100644 vendor/github.com/containers/image/v5/internal/putblobdigest/put_blob_digest.go create mode 100644 vendor/github.com/containers/image/v5/internal/rootless/rootless.go create mode 100644 vendor/github.com/containers/image/v5/internal/streamdigest/stream_digest.go create mode 100644 vendor/github.com/containers/image/v5/internal/tmpdir/tmpdir.go create mode 100644 vendor/github.com/containers/image/v5/internal/types/types.go create mode 100644 vendor/github.com/containers/image/v5/internal/uploadreader/upload_reader.go create mode 100644 vendor/github.com/containers/image/v5/manifest/common.go create mode 100644 vendor/github.com/containers/image/v5/manifest/docker_schema1.go create mode 100644 vendor/github.com/containers/image/v5/manifest/docker_schema2.go create mode 100644 vendor/github.com/containers/image/v5/manifest/docker_schema2_list.go create mode 100644 vendor/github.com/containers/image/v5/manifest/list.go create mode 100644 vendor/github.com/containers/image/v5/manifest/manifest.go create mode 100644 vendor/github.com/containers/image/v5/manifest/oci.go create mode 100644 vendor/github.com/containers/image/v5/manifest/oci_index.go create mode 100644 vendor/github.com/containers/image/v5/oci/archive/oci_dest.go create mode 100644 vendor/github.com/containers/image/v5/oci/archive/oci_src.go create mode 100644 vendor/github.com/containers/image/v5/oci/archive/oci_transport.go create mode 100644 vendor/github.com/containers/image/v5/oci/internal/oci_util.go create mode 100644 vendor/github.com/containers/image/v5/oci/layout/oci_dest.go create mode 100644 vendor/github.com/containers/image/v5/oci/layout/oci_src.go create mode 100644 vendor/github.com/containers/image/v5/oci/layout/oci_transport.go create mode 100644 vendor/github.com/containers/image/v5/openshift/openshift-copies.go create mode 100644 vendor/github.com/containers/image/v5/openshift/openshift.go create mode 100644 vendor/github.com/containers/image/v5/openshift/openshift_transport.go create mode 100644 vendor/github.com/containers/image/v5/ostree/ostree_dest.go create mode 100644 vendor/github.com/containers/image/v5/ostree/ostree_src.go create mode 100644 vendor/github.com/containers/image/v5/ostree/ostree_transport.go create mode 100644 vendor/github.com/containers/image/v5/pkg/blobinfocache/boltdb/boltdb.go create mode 100644 vendor/github.com/containers/image/v5/pkg/blobinfocache/default.go create mode 100644 vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go create mode 100644 vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go create mode 100644 vendor/github.com/containers/image/v5/pkg/blobinfocache/none/none.go create mode 100644 vendor/github.com/containers/image/v5/pkg/compression/compression.go create mode 100644 vendor/github.com/containers/image/v5/pkg/compression/internal/types.go create mode 100644 vendor/github.com/containers/image/v5/pkg/compression/types/types.go create mode 100644 vendor/github.com/containers/image/v5/pkg/compression/zstd.go create mode 100644 vendor/github.com/containers/image/v5/pkg/docker/config/config.go create mode 100644 vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go create mode 100644 vendor/github.com/containers/image/v5/pkg/strslice/README.md create mode 100644 vendor/github.com/containers/image/v5/pkg/strslice/strslice.go create mode 100644 vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go create mode 100644 vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go create mode 100644 vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go create mode 100644 vendor/github.com/containers/image/v5/sif/load.go create mode 100644 vendor/github.com/containers/image/v5/sif/src.go create mode 100644 vendor/github.com/containers/image/v5/sif/transport.go create mode 100644 vendor/github.com/containers/image/v5/signature/docker.go create mode 100644 vendor/github.com/containers/image/v5/signature/json.go create mode 100644 vendor/github.com/containers/image/v5/signature/mechanism.go create mode 100644 vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go create mode 100644 vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go create mode 100644 vendor/github.com/containers/image/v5/signature/policy_config.go create mode 100644 vendor/github.com/containers/image/v5/signature/policy_eval.go create mode 100644 vendor/github.com/containers/image/v5/signature/policy_eval_baselayer.go create mode 100644 vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go create mode 100644 vendor/github.com/containers/image/v5/signature/policy_eval_simple.go create mode 100644 vendor/github.com/containers/image/v5/signature/policy_reference_match.go create mode 100644 vendor/github.com/containers/image/v5/signature/policy_types.go create mode 100644 vendor/github.com/containers/image/v5/signature/signature.go create mode 100644 vendor/github.com/containers/image/v5/storage/storage_image.go create mode 100644 vendor/github.com/containers/image/v5/storage/storage_reference.go create mode 100644 vendor/github.com/containers/image/v5/storage/storage_transport.go create mode 100644 vendor/github.com/containers/image/v5/tarball/doc.go create mode 100644 vendor/github.com/containers/image/v5/tarball/tarball_reference.go create mode 100644 vendor/github.com/containers/image/v5/tarball/tarball_src.go create mode 100644 vendor/github.com/containers/image/v5/tarball/tarball_transport.go create mode 100644 vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go create mode 100644 vendor/github.com/containers/image/v5/transports/alltransports/docker_daemon.go create mode 100644 vendor/github.com/containers/image/v5/transports/alltransports/docker_daemon_stub.go create mode 100644 vendor/github.com/containers/image/v5/transports/alltransports/ostree.go create mode 100644 vendor/github.com/containers/image/v5/transports/alltransports/ostree_stub.go create mode 100644 vendor/github.com/containers/image/v5/transports/alltransports/storage.go create mode 100644 vendor/github.com/containers/image/v5/transports/alltransports/storage_stub.go create mode 100644 vendor/github.com/containers/image/v5/transports/stub.go create mode 100644 vendor/github.com/containers/image/v5/transports/transports.go create mode 100644 vendor/github.com/containers/image/v5/types/types.go create mode 100644 vendor/github.com/containers/image/v5/version/version.go create mode 100644 vendor/github.com/containers/libtrust/CONTRIBUTING.md create mode 100644 vendor/github.com/containers/libtrust/LICENSE create mode 100644 vendor/github.com/containers/libtrust/MAINTAINERS create mode 100644 vendor/github.com/containers/libtrust/README.md create mode 100644 vendor/github.com/containers/libtrust/certificates.go create mode 100644 vendor/github.com/containers/libtrust/doc.go create mode 100644 vendor/github.com/containers/libtrust/ec_key.go create mode 100644 vendor/github.com/containers/libtrust/ec_key_no_openssl.go create mode 100644 vendor/github.com/containers/libtrust/ec_key_openssl.go create mode 100644 vendor/github.com/containers/libtrust/filter.go create mode 100644 vendor/github.com/containers/libtrust/hash.go create mode 100644 vendor/github.com/containers/libtrust/jsonsign.go create mode 100644 vendor/github.com/containers/libtrust/key.go create mode 100644 vendor/github.com/containers/libtrust/key_files.go create mode 100644 vendor/github.com/containers/libtrust/key_manager.go create mode 100644 vendor/github.com/containers/libtrust/rsa_key.go create mode 100644 vendor/github.com/containers/libtrust/util.go create mode 100644 vendor/github.com/containers/ocicrypt/.travis.yml create mode 100644 vendor/github.com/containers/ocicrypt/ADOPTERS.md create mode 100644 vendor/github.com/containers/ocicrypt/CODE-OF-CONDUCT.md create mode 100644 vendor/github.com/containers/ocicrypt/LICENSE create mode 100644 vendor/github.com/containers/ocicrypt/MAINTAINERS create mode 100644 vendor/github.com/containers/ocicrypt/Makefile create mode 100644 vendor/github.com/containers/ocicrypt/README.md create mode 100644 vendor/github.com/containers/ocicrypt/SECURITY.md create mode 100644 vendor/github.com/containers/ocicrypt/blockcipher/blockcipher.go create mode 100644 vendor/github.com/containers/ocicrypt/blockcipher/blockcipher_aes_ctr.go create mode 100644 vendor/github.com/containers/ocicrypt/config/config.go create mode 100644 vendor/github.com/containers/ocicrypt/config/constructors.go create mode 100644 vendor/github.com/containers/ocicrypt/config/keyprovider-config/config.go create mode 100644 vendor/github.com/containers/ocicrypt/config/pkcs11config/config.go create mode 100644 vendor/github.com/containers/ocicrypt/crypto/pkcs11/common.go create mode 100644 vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers.go create mode 100644 vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers_nocgo.go create mode 100644 vendor/github.com/containers/ocicrypt/crypto/pkcs11/utils.go create mode 100644 vendor/github.com/containers/ocicrypt/encryption.go create mode 100644 vendor/github.com/containers/ocicrypt/go.mod create mode 100644 vendor/github.com/containers/ocicrypt/go.sum create mode 100644 vendor/github.com/containers/ocicrypt/gpg.go create mode 100644 vendor/github.com/containers/ocicrypt/gpgvault.go create mode 100644 vendor/github.com/containers/ocicrypt/helpers/parse_helpers.go create mode 100644 vendor/github.com/containers/ocicrypt/keywrap/jwe/keywrapper_jwe.go create mode 100644 vendor/github.com/containers/ocicrypt/keywrap/keyprovider/keyprovider.go create mode 100644 vendor/github.com/containers/ocicrypt/keywrap/keywrap.go create mode 100644 vendor/github.com/containers/ocicrypt/keywrap/pgp/keywrapper_gpg.go create mode 100644 vendor/github.com/containers/ocicrypt/keywrap/pkcs11/keywrapper_pkcs11.go create mode 100644 vendor/github.com/containers/ocicrypt/keywrap/pkcs7/keywrapper_pkcs7.go create mode 100644 vendor/github.com/containers/ocicrypt/reader.go create mode 100644 vendor/github.com/containers/ocicrypt/spec/spec.go create mode 100644 vendor/github.com/containers/ocicrypt/utils/delayedreader.go create mode 100644 vendor/github.com/containers/ocicrypt/utils/ioutils.go create mode 100644 vendor/github.com/containers/ocicrypt/utils/keyprovider/keyprovider.pb.go create mode 100644 vendor/github.com/containers/ocicrypt/utils/keyprovider/keyprovider.proto create mode 100644 vendor/github.com/containers/ocicrypt/utils/testing.go create mode 100644 vendor/github.com/containers/ocicrypt/utils/utils.go create mode 100644 vendor/github.com/containers/storage/.cirrus.yml create mode 100644 vendor/github.com/containers/storage/.dockerignore create mode 100644 vendor/github.com/containers/storage/.gitignore create mode 100644 vendor/github.com/containers/storage/.golangci.yml create mode 100644 vendor/github.com/containers/storage/.mailmap create mode 100644 vendor/github.com/containers/storage/AUTHORS create mode 100644 vendor/github.com/containers/storage/CODE-OF-CONDUCT.md create mode 100644 vendor/github.com/containers/storage/CONTRIBUTING.md create mode 100644 vendor/github.com/containers/storage/LICENSE create mode 100644 vendor/github.com/containers/storage/Makefile create mode 100644 vendor/github.com/containers/storage/NOTICE create mode 100644 vendor/github.com/containers/storage/README.md create mode 100644 vendor/github.com/containers/storage/SECURITY.md create mode 100644 vendor/github.com/containers/storage/VERSION create mode 100644 vendor/github.com/containers/storage/containers.go create mode 100644 vendor/github.com/containers/storage/drivers/aufs/aufs.go create mode 100644 vendor/github.com/containers/storage/drivers/aufs/dirs.go create mode 100644 vendor/github.com/containers/storage/drivers/aufs/mount.go create mode 100644 vendor/github.com/containers/storage/drivers/aufs/mount_linux.go create mode 100644 vendor/github.com/containers/storage/drivers/aufs/mount_unsupported.go create mode 100644 vendor/github.com/containers/storage/drivers/btrfs/btrfs.go create mode 100644 vendor/github.com/containers/storage/drivers/btrfs/dummy_unsupported.go create mode 100644 vendor/github.com/containers/storage/drivers/btrfs/version.go create mode 100644 vendor/github.com/containers/storage/drivers/btrfs/version_none.go create mode 100644 vendor/github.com/containers/storage/drivers/chown.go create mode 100644 vendor/github.com/containers/storage/drivers/chown_unix.go create mode 100644 vendor/github.com/containers/storage/drivers/chown_windows.go create mode 100644 vendor/github.com/containers/storage/drivers/chroot_unix.go create mode 100644 vendor/github.com/containers/storage/drivers/chroot_windows.go create mode 100644 vendor/github.com/containers/storage/drivers/copy/copy_linux.go create mode 100644 vendor/github.com/containers/storage/drivers/copy/copy_unsupported.go create mode 100644 vendor/github.com/containers/storage/drivers/counter.go create mode 100644 vendor/github.com/containers/storage/drivers/devmapper/device_setup.go create mode 100644 vendor/github.com/containers/storage/drivers/devmapper/deviceset.go create mode 100644 vendor/github.com/containers/storage/drivers/devmapper/devmapper_doc.go create mode 100644 vendor/github.com/containers/storage/drivers/devmapper/driver.go create mode 100644 vendor/github.com/containers/storage/drivers/devmapper/jsoniter.go create mode 100644 vendor/github.com/containers/storage/drivers/devmapper/mount.go create mode 100644 vendor/github.com/containers/storage/drivers/driver.go create mode 100644 vendor/github.com/containers/storage/drivers/driver_freebsd.go create mode 100644 vendor/github.com/containers/storage/drivers/driver_linux.go create mode 100644 vendor/github.com/containers/storage/drivers/driver_solaris.go create mode 100644 vendor/github.com/containers/storage/drivers/driver_unsupported.go create mode 100644 vendor/github.com/containers/storage/drivers/driver_windows.go create mode 100644 vendor/github.com/containers/storage/drivers/fsdiff.go create mode 100644 vendor/github.com/containers/storage/drivers/jsoniter.go create mode 100644 vendor/github.com/containers/storage/drivers/overlay/check.go create mode 100644 vendor/github.com/containers/storage/drivers/overlay/check_115.go create mode 100644 vendor/github.com/containers/storage/drivers/overlay/check_116.go create mode 100644 vendor/github.com/containers/storage/drivers/overlay/jsoniter.go create mode 100644 vendor/github.com/containers/storage/drivers/overlay/mount.go create mode 100644 vendor/github.com/containers/storage/drivers/overlay/overlay.go create mode 100644 vendor/github.com/containers/storage/drivers/overlay/overlay_cgo.go create mode 100644 vendor/github.com/containers/storage/drivers/overlay/overlay_nocgo.go create mode 100644 vendor/github.com/containers/storage/drivers/overlay/overlay_unsupported.go create mode 100644 vendor/github.com/containers/storage/drivers/overlay/randomid.go create mode 100644 vendor/github.com/containers/storage/drivers/overlayutils/overlayutils.go create mode 100644 vendor/github.com/containers/storage/drivers/quota/projectquota.go create mode 100644 vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go create mode 100644 vendor/github.com/containers/storage/drivers/register/register_aufs.go create mode 100644 vendor/github.com/containers/storage/drivers/register/register_btrfs.go create mode 100644 vendor/github.com/containers/storage/drivers/register/register_devicemapper.go create mode 100644 vendor/github.com/containers/storage/drivers/register/register_overlay.go create mode 100644 vendor/github.com/containers/storage/drivers/register/register_vfs.go create mode 100644 vendor/github.com/containers/storage/drivers/register/register_windows.go create mode 100644 vendor/github.com/containers/storage/drivers/register/register_zfs.go create mode 100644 vendor/github.com/containers/storage/drivers/template.go create mode 100644 vendor/github.com/containers/storage/drivers/vfs/copy_linux.go create mode 100644 vendor/github.com/containers/storage/drivers/vfs/copy_unsupported.go create mode 100644 vendor/github.com/containers/storage/drivers/vfs/driver.go create mode 100644 vendor/github.com/containers/storage/drivers/windows/jsoniter_windows.go create mode 100644 vendor/github.com/containers/storage/drivers/windows/windows.go create mode 100644 vendor/github.com/containers/storage/drivers/zfs/MAINTAINERS create mode 100644 vendor/github.com/containers/storage/drivers/zfs/zfs.go create mode 100644 vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go create mode 100644 vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go create mode 100644 vendor/github.com/containers/storage/drivers/zfs/zfs_unsupported.go create mode 100644 vendor/github.com/containers/storage/errors.go create mode 100644 vendor/github.com/containers/storage/go.mod create mode 100644 vendor/github.com/containers/storage/go.sum create mode 100644 vendor/github.com/containers/storage/idset.go create mode 100644 vendor/github.com/containers/storage/images.go create mode 100644 vendor/github.com/containers/storage/jsoniter.go create mode 100644 vendor/github.com/containers/storage/layers.go create mode 100644 vendor/github.com/containers/storage/lockfile_compat.go create mode 100644 vendor/github.com/containers/storage/pkg/archive/README.md create mode 100644 vendor/github.com/containers/storage/pkg/archive/archive.go create mode 100644 vendor/github.com/containers/storage/pkg/archive/archive_110.go create mode 100644 vendor/github.com/containers/storage/pkg/archive/archive_19.go create mode 100644 vendor/github.com/containers/storage/pkg/archive/archive_freebsd.go create mode 100644 vendor/github.com/containers/storage/pkg/archive/archive_linux.go create mode 100644 vendor/github.com/containers/storage/pkg/archive/archive_other.go create mode 100644 vendor/github.com/containers/storage/pkg/archive/archive_unix.go create mode 100644 vendor/github.com/containers/storage/pkg/archive/archive_windows.go create mode 100644 vendor/github.com/containers/storage/pkg/archive/archive_zstd.go create mode 100644 vendor/github.com/containers/storage/pkg/archive/changes.go create mode 100644 vendor/github.com/containers/storage/pkg/archive/changes_linux.go create mode 100644 vendor/github.com/containers/storage/pkg/archive/changes_other.go create mode 100644 vendor/github.com/containers/storage/pkg/archive/changes_unix.go create mode 100644 vendor/github.com/containers/storage/pkg/archive/changes_windows.go create mode 100644 vendor/github.com/containers/storage/pkg/archive/copy.go create mode 100644 vendor/github.com/containers/storage/pkg/archive/copy_unix.go create mode 100644 vendor/github.com/containers/storage/pkg/archive/copy_windows.go create mode 100644 vendor/github.com/containers/storage/pkg/archive/diff.go create mode 100644 vendor/github.com/containers/storage/pkg/archive/time_linux.go create mode 100644 vendor/github.com/containers/storage/pkg/archive/time_unsupported.go create mode 100644 vendor/github.com/containers/storage/pkg/archive/whiteouts.go create mode 100644 vendor/github.com/containers/storage/pkg/archive/wrap.go create mode 100644 vendor/github.com/containers/storage/pkg/chrootarchive/archive.go create mode 100644 vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go create mode 100644 vendor/github.com/containers/storage/pkg/chrootarchive/archive_windows.go create mode 100644 vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go create mode 100644 vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go create mode 100644 vendor/github.com/containers/storage/pkg/chrootarchive/diff.go create mode 100644 vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go create mode 100644 vendor/github.com/containers/storage/pkg/chrootarchive/diff_windows.go create mode 100644 vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go create mode 100644 vendor/github.com/containers/storage/pkg/chrootarchive/init_windows.go create mode 100644 vendor/github.com/containers/storage/pkg/chrootarchive/jsoniter.go create mode 100644 vendor/github.com/containers/storage/pkg/chunked/cache_linux.go create mode 100644 vendor/github.com/containers/storage/pkg/chunked/compression.go create mode 100644 vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go create mode 100644 vendor/github.com/containers/storage/pkg/chunked/compressor/rollsum.go create mode 100644 vendor/github.com/containers/storage/pkg/chunked/internal/compression.go create mode 100644 vendor/github.com/containers/storage/pkg/chunked/storage.go create mode 100644 vendor/github.com/containers/storage/pkg/chunked/storage_linux.go create mode 100644 vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go create mode 100644 vendor/github.com/containers/storage/pkg/config/config.go create mode 100644 vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go create mode 100644 vendor/github.com/containers/storage/pkg/devicemapper/devmapper_log.go create mode 100644 vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper.go create mode 100644 vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_deferred_remove.go create mode 100644 vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_dynamic.go create mode 100644 vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go create mode 100644 vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_static.go create mode 100644 vendor/github.com/containers/storage/pkg/devicemapper/ioctl.go create mode 100644 vendor/github.com/containers/storage/pkg/devicemapper/log.go create mode 100644 vendor/github.com/containers/storage/pkg/directory/directory.go create mode 100644 vendor/github.com/containers/storage/pkg/directory/directory_unix.go create mode 100644 vendor/github.com/containers/storage/pkg/directory/directory_windows.go create mode 100644 vendor/github.com/containers/storage/pkg/dmesg/dmesg_linux.go create mode 100644 vendor/github.com/containers/storage/pkg/fileutils/fileutils.go create mode 100644 vendor/github.com/containers/storage/pkg/fileutils/fileutils_darwin.go create mode 100644 vendor/github.com/containers/storage/pkg/fileutils/fileutils_solaris.go create mode 100644 vendor/github.com/containers/storage/pkg/fileutils/fileutils_unix.go create mode 100644 vendor/github.com/containers/storage/pkg/fileutils/fileutils_windows.go create mode 100644 vendor/github.com/containers/storage/pkg/fsutils/fsutils_linux.go create mode 100644 vendor/github.com/containers/storage/pkg/homedir/homedir.go create mode 100644 vendor/github.com/containers/storage/pkg/homedir/homedir_others.go create mode 100644 vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go create mode 100644 vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go create mode 100644 vendor/github.com/containers/storage/pkg/idtools/idtools.go create mode 100644 vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go create mode 100644 vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go create mode 100644 vendor/github.com/containers/storage/pkg/idtools/idtools_unsupported.go create mode 100644 vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go create mode 100644 vendor/github.com/containers/storage/pkg/idtools/parser.go create mode 100644 vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go create mode 100644 vendor/github.com/containers/storage/pkg/idtools/usergroupadd_unsupported.go create mode 100644 vendor/github.com/containers/storage/pkg/idtools/utils_unix.go create mode 100644 vendor/github.com/containers/storage/pkg/ioutils/buffer.go create mode 100644 vendor/github.com/containers/storage/pkg/ioutils/bytespipe.go create mode 100644 vendor/github.com/containers/storage/pkg/ioutils/fswriters.go create mode 100644 vendor/github.com/containers/storage/pkg/ioutils/fswriters_linux.go create mode 100644 vendor/github.com/containers/storage/pkg/ioutils/fswriters_unsupported.go create mode 100644 vendor/github.com/containers/storage/pkg/ioutils/readers.go create mode 100644 vendor/github.com/containers/storage/pkg/ioutils/temp_unix.go create mode 100644 vendor/github.com/containers/storage/pkg/ioutils/temp_windows.go create mode 100644 vendor/github.com/containers/storage/pkg/ioutils/writeflusher.go create mode 100644 vendor/github.com/containers/storage/pkg/ioutils/writers.go create mode 100644 vendor/github.com/containers/storage/pkg/locker/README.md create mode 100644 vendor/github.com/containers/storage/pkg/locker/locker.go create mode 100644 vendor/github.com/containers/storage/pkg/lockfile/lockfile.go create mode 100644 vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go create mode 100644 vendor/github.com/containers/storage/pkg/lockfile/lockfile_windows.go create mode 100644 vendor/github.com/containers/storage/pkg/longpath/longpath.go create mode 100644 vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go create mode 100644 vendor/github.com/containers/storage/pkg/loopback/ioctl.go create mode 100644 vendor/github.com/containers/storage/pkg/loopback/loop_wrapper.go create mode 100644 vendor/github.com/containers/storage/pkg/loopback/loopback.go create mode 100644 vendor/github.com/containers/storage/pkg/loopback/loopback_unsupported.go create mode 100644 vendor/github.com/containers/storage/pkg/mount/flags.go create mode 100644 vendor/github.com/containers/storage/pkg/mount/flags_linux.go create mode 100644 vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go create mode 100644 vendor/github.com/containers/storage/pkg/mount/mount.go create mode 100644 vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go create mode 100644 vendor/github.com/containers/storage/pkg/mount/mounter_linux.go create mode 100644 vendor/github.com/containers/storage/pkg/mount/mounter_unsupported.go create mode 100644 vendor/github.com/containers/storage/pkg/mount/mountinfo.go create mode 100644 vendor/github.com/containers/storage/pkg/mount/mountinfo_linux.go create mode 100644 vendor/github.com/containers/storage/pkg/mount/sharedsubtree_linux.go create mode 100644 vendor/github.com/containers/storage/pkg/mount/unmount_unix.go create mode 100644 vendor/github.com/containers/storage/pkg/mount/unmount_unsupported.go create mode 100644 vendor/github.com/containers/storage/pkg/parsers/kernel/kernel.go create mode 100644 vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_darwin.go create mode 100644 vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_unix.go create mode 100644 vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go create mode 100644 vendor/github.com/containers/storage/pkg/parsers/kernel/uname_linux.go create mode 100644 vendor/github.com/containers/storage/pkg/parsers/kernel/uname_solaris.go create mode 100644 vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported.go create mode 100644 vendor/github.com/containers/storage/pkg/parsers/parsers.go create mode 100644 vendor/github.com/containers/storage/pkg/pools/pools.go create mode 100644 vendor/github.com/containers/storage/pkg/promise/promise.go create mode 100644 vendor/github.com/containers/storage/pkg/reexec/README.md create mode 100644 vendor/github.com/containers/storage/pkg/reexec/command_linux.go create mode 100644 vendor/github.com/containers/storage/pkg/reexec/command_unix.go create mode 100644 vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go create mode 100644 vendor/github.com/containers/storage/pkg/reexec/command_windows.go create mode 100644 vendor/github.com/containers/storage/pkg/reexec/reexec.go create mode 100644 vendor/github.com/containers/storage/pkg/stringid/README.md create mode 100644 vendor/github.com/containers/storage/pkg/stringid/stringid.go create mode 100644 vendor/github.com/containers/storage/pkg/stringutils/README.md create mode 100644 vendor/github.com/containers/storage/pkg/stringutils/stringutils.go create mode 100644 vendor/github.com/containers/storage/pkg/system/chmod.go create mode 100644 vendor/github.com/containers/storage/pkg/system/chtimes.go create mode 100644 vendor/github.com/containers/storage/pkg/system/chtimes_unix.go create mode 100644 vendor/github.com/containers/storage/pkg/system/chtimes_windows.go create mode 100644 vendor/github.com/containers/storage/pkg/system/errors.go create mode 100644 vendor/github.com/containers/storage/pkg/system/exitcode.go create mode 100644 vendor/github.com/containers/storage/pkg/system/init.go create mode 100644 vendor/github.com/containers/storage/pkg/system/init_windows.go create mode 100644 vendor/github.com/containers/storage/pkg/system/lchown.go create mode 100644 vendor/github.com/containers/storage/pkg/system/lcow_unix.go create mode 100644 vendor/github.com/containers/storage/pkg/system/lcow_windows.go create mode 100644 vendor/github.com/containers/storage/pkg/system/lstat_unix.go create mode 100644 vendor/github.com/containers/storage/pkg/system/lstat_windows.go create mode 100644 vendor/github.com/containers/storage/pkg/system/meminfo.go create mode 100644 vendor/github.com/containers/storage/pkg/system/meminfo_linux.go create mode 100644 vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go create mode 100644 vendor/github.com/containers/storage/pkg/system/meminfo_unsupported.go create mode 100644 vendor/github.com/containers/storage/pkg/system/meminfo_windows.go create mode 100644 vendor/github.com/containers/storage/pkg/system/mknod.go create mode 100644 vendor/github.com/containers/storage/pkg/system/mknod_freebsd.go create mode 100644 vendor/github.com/containers/storage/pkg/system/mknod_windows.go create mode 100644 vendor/github.com/containers/storage/pkg/system/path.go create mode 100644 vendor/github.com/containers/storage/pkg/system/path_unix.go create mode 100644 vendor/github.com/containers/storage/pkg/system/path_windows.go create mode 100644 vendor/github.com/containers/storage/pkg/system/process_unix.go create mode 100644 vendor/github.com/containers/storage/pkg/system/rm.go create mode 100644 vendor/github.com/containers/storage/pkg/system/stat_darwin.go create mode 100644 vendor/github.com/containers/storage/pkg/system/stat_freebsd.go create mode 100644 vendor/github.com/containers/storage/pkg/system/stat_linux.go create mode 100644 vendor/github.com/containers/storage/pkg/system/stat_openbsd.go create mode 100644 vendor/github.com/containers/storage/pkg/system/stat_solaris.go create mode 100644 vendor/github.com/containers/storage/pkg/system/stat_unix.go create mode 100644 vendor/github.com/containers/storage/pkg/system/stat_windows.go create mode 100644 vendor/github.com/containers/storage/pkg/system/syscall_unix.go create mode 100644 vendor/github.com/containers/storage/pkg/system/syscall_windows.go create mode 100644 vendor/github.com/containers/storage/pkg/system/umask.go create mode 100644 vendor/github.com/containers/storage/pkg/system/umask_windows.go create mode 100644 vendor/github.com/containers/storage/pkg/system/utimes_freebsd.go create mode 100644 vendor/github.com/containers/storage/pkg/system/utimes_linux.go create mode 100644 vendor/github.com/containers/storage/pkg/system/utimes_unsupported.go create mode 100644 vendor/github.com/containers/storage/pkg/system/xattrs_linux.go create mode 100644 vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go create mode 100644 vendor/github.com/containers/storage/pkg/tarlog/tarlogger.go create mode 100644 vendor/github.com/containers/storage/pkg/truncindex/truncindex.go create mode 100644 vendor/github.com/containers/storage/pkg/unshare/getenv_linux_cgo.go create mode 100644 vendor/github.com/containers/storage/pkg/unshare/getenv_linux_nocgo.go create mode 100644 vendor/github.com/containers/storage/pkg/unshare/unshare.c create mode 100644 vendor/github.com/containers/storage/pkg/unshare/unshare.go create mode 100644 vendor/github.com/containers/storage/pkg/unshare/unshare_cgo.go create mode 100644 vendor/github.com/containers/storage/pkg/unshare/unshare_gccgo.go create mode 100644 vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go create mode 100644 vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported.go create mode 100644 vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported_cgo.go create mode 100644 vendor/github.com/containers/storage/storage.conf create mode 100644 vendor/github.com/containers/storage/store.go create mode 100644 vendor/github.com/containers/storage/types/default_override_test.conf create mode 100644 vendor/github.com/containers/storage/types/errors.go create mode 100644 vendor/github.com/containers/storage/types/idmappings.go create mode 100644 vendor/github.com/containers/storage/types/options.go create mode 100644 vendor/github.com/containers/storage/types/storage_broken.conf create mode 100644 vendor/github.com/containers/storage/types/storage_test.conf create mode 100644 vendor/github.com/containers/storage/types/utils.go create mode 100644 vendor/github.com/containers/storage/userns.go create mode 100644 vendor/github.com/containers/storage/utils.go create mode 100644 vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go create mode 100644 vendor/github.com/coreos/go-systemd/v22/dbus/methods.go create mode 100644 vendor/github.com/coreos/go-systemd/v22/dbus/properties.go create mode 100644 vendor/github.com/coreos/go-systemd/v22/dbus/set.go create mode 100644 vendor/github.com/coreos/go-systemd/v22/dbus/subscription.go create mode 100644 vendor/github.com/coreos/go-systemd/v22/dbus/subscription_set.go create mode 100644 vendor/github.com/cyphar/filepath-securejoin/go.mod delete mode 100644 vendor/github.com/cyphar/filepath-securejoin/vendor.conf create mode 100644 vendor/github.com/disiqueira/gotree/v3/.gitignore create mode 100644 vendor/github.com/disiqueira/gotree/v3/.travis.yml create mode 100644 vendor/github.com/disiqueira/gotree/v3/LICENSE create mode 100644 vendor/github.com/disiqueira/gotree/v3/README.md create mode 100644 vendor/github.com/disiqueira/gotree/v3/_config.yml create mode 100644 vendor/github.com/disiqueira/gotree/v3/go.mod create mode 100644 vendor/github.com/disiqueira/gotree/v3/gotree-logo.png create mode 100644 vendor/github.com/disiqueira/gotree/v3/gotree.go create mode 100644 vendor/github.com/docker/distribution/.golangci.yml delete mode 100644 vendor/github.com/docker/distribution/.gometalinter.json delete mode 100644 vendor/github.com/docker/distribution/.travis.yml create mode 100644 vendor/github.com/docker/distribution/docker-bake.hcl create mode 100644 vendor/github.com/docker/docker-credential-helpers/LICENSE create mode 100644 vendor/github.com/docker/docker-credential-helpers/client/client.go create mode 100644 vendor/github.com/docker/docker-credential-helpers/client/command.go create mode 100644 vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go create mode 100644 vendor/github.com/docker/docker-credential-helpers/credentials/error.go create mode 100644 vendor/github.com/docker/docker-credential-helpers/credentials/helper.go create mode 100644 vendor/github.com/docker/docker-credential-helpers/credentials/version.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/README.md create mode 100644 vendor/github.com/docker/docker/pkg/archive/archive.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/archive_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/archive_other.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/archive_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/archive_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/changes.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/changes_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/changes_other.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/changes_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/changes_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/copy.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/copy_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/copy_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/diff.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/time_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/time_unsupported.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/whiteouts.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/wrap.go create mode 100644 vendor/github.com/docker/docker/pkg/idtools/idtools.go create mode 100644 vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go create mode 100644 vendor/github.com/docker/docker/pkg/idtools/utils_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/pools/pools.go create mode 100644 vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go create mode 100644 vendor/github.com/docker/docker/pkg/system/args_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/system/chtimes.go create mode 100644 vendor/github.com/docker/docker/pkg/system/chtimes_nowindows.go create mode 100644 vendor/github.com/docker/docker/pkg/system/chtimes_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/system/errors.go create mode 100644 vendor/github.com/docker/docker/pkg/system/exitcode.go create mode 100644 vendor/github.com/docker/docker/pkg/system/filesys_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/system/filesys_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/system/init.go create mode 100644 vendor/github.com/docker/docker/pkg/system/init_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/system/lcow.go create mode 100644 vendor/github.com/docker/docker/pkg/system/lcow_unsupported.go create mode 100644 vendor/github.com/docker/docker/pkg/system/lstat_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/system/lstat_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/system/meminfo.go create mode 100644 vendor/github.com/docker/docker/pkg/system/meminfo_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go create mode 100644 vendor/github.com/docker/docker/pkg/system/meminfo_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/system/mknod.go create mode 100644 vendor/github.com/docker/docker/pkg/system/mknod_freebsd.go create mode 100644 vendor/github.com/docker/docker/pkg/system/mknod_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/system/mknod_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/system/path.go create mode 100644 vendor/github.com/docker/docker/pkg/system/path_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/system/path_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/system/process_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/system/process_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/system/rm.go create mode 100644 vendor/github.com/docker/docker/pkg/system/rm_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/system/stat_bsd.go create mode 100644 vendor/github.com/docker/docker/pkg/system/stat_darwin.go create mode 100644 vendor/github.com/docker/docker/pkg/system/stat_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/system/stat_openbsd.go create mode 100644 vendor/github.com/docker/docker/pkg/system/stat_solaris.go create mode 100644 vendor/github.com/docker/docker/pkg/system/stat_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/system/stat_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/system/syscall_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/system/syscall_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/system/umask.go create mode 100644 vendor/github.com/docker/docker/pkg/system/umask_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/system/utimes_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go create mode 100644 vendor/github.com/docker/docker/pkg/system/xattrs_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go create mode 100644 vendor/github.com/docker/go-connections/tlsconfig/versions_go113.go create mode 100644 vendor/github.com/docker/go-connections/tlsconfig/versions_other.go create mode 100644 vendor/github.com/docker/libnetwork/LICENSE create mode 100644 vendor/github.com/docker/libnetwork/resolvconf/README.md create mode 100644 vendor/github.com/docker/libnetwork/resolvconf/dns/resolvconf.go create mode 100644 vendor/github.com/docker/libnetwork/resolvconf/resolvconf.go create mode 100644 vendor/github.com/docker/libnetwork/types/types.go create mode 100644 vendor/github.com/fsnotify/fsnotify/.mailmap delete mode 100644 vendor/github.com/fsnotify/fsnotify/.travis.yml create mode 100644 vendor/github.com/fsouza/go-dockerclient/.gitattributes create mode 100644 vendor/github.com/fsouza/go-dockerclient/.gitignore create mode 100644 vendor/github.com/fsouza/go-dockerclient/.golangci.yaml create mode 100644 vendor/github.com/fsouza/go-dockerclient/AUTHORS create mode 100644 vendor/github.com/fsouza/go-dockerclient/DOCKER-LICENSE create mode 100644 vendor/github.com/fsouza/go-dockerclient/LICENSE create mode 100644 vendor/github.com/fsouza/go-dockerclient/Makefile create mode 100644 vendor/github.com/fsouza/go-dockerclient/README.md create mode 100644 vendor/github.com/fsouza/go-dockerclient/auth.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/change.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/client.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/client_unix.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/client_windows.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/container.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/container_archive.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/container_attach.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/container_changes.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/container_commit.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/container_copy.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/container_create.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/container_export.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/container_inspect.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/container_kill.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/container_list.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/container_logs.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/container_pause.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/container_prune.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/container_remove.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/container_rename.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/container_resize.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/container_restart.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/container_start.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/container_stats.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/container_stop.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/container_top.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/container_unpause.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/container_update.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/container_wait.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/distribution.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/env.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/event.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/exec.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/go.mod create mode 100644 vendor/github.com/fsouza/go-dockerclient/go.sum create mode 100644 vendor/github.com/fsouza/go-dockerclient/image.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/misc.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/network.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/plugin.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/registry_auth.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/signal.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/swarm.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/swarm_configs.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/swarm_node.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/swarm_secrets.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/swarm_service.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/swarm_task.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/system.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/tar.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/tls.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/volume.go create mode 100644 vendor/github.com/ghodss/yaml/.gitignore create mode 100644 vendor/github.com/ghodss/yaml/.travis.yml create mode 100644 vendor/github.com/ghodss/yaml/LICENSE create mode 100644 vendor/github.com/ghodss/yaml/README.md create mode 100644 vendor/github.com/ghodss/yaml/fields.go create mode 100644 vendor/github.com/ghodss/yaml/yaml.go create mode 100644 vendor/github.com/godbus/dbus/v5/CONTRIBUTING.md create mode 100644 vendor/github.com/godbus/dbus/v5/LICENSE create mode 100644 vendor/github.com/godbus/dbus/v5/MAINTAINERS create mode 100644 vendor/github.com/godbus/dbus/v5/README.md create mode 100644 vendor/github.com/godbus/dbus/v5/auth.go create mode 100644 vendor/github.com/godbus/dbus/v5/auth_anonymous.go create mode 100644 vendor/github.com/godbus/dbus/v5/auth_external.go create mode 100644 vendor/github.com/godbus/dbus/v5/auth_sha1.go create mode 100644 vendor/github.com/godbus/dbus/v5/call.go create mode 100644 vendor/github.com/godbus/dbus/v5/conn.go create mode 100644 vendor/github.com/godbus/dbus/v5/conn_darwin.go create mode 100644 vendor/github.com/godbus/dbus/v5/conn_other.go create mode 100644 vendor/github.com/godbus/dbus/v5/conn_unix.go create mode 100644 vendor/github.com/godbus/dbus/v5/conn_windows.go create mode 100644 vendor/github.com/godbus/dbus/v5/dbus.go create mode 100644 vendor/github.com/godbus/dbus/v5/decoder.go create mode 100644 vendor/github.com/godbus/dbus/v5/default_handler.go create mode 100644 vendor/github.com/godbus/dbus/v5/doc.go create mode 100644 vendor/github.com/godbus/dbus/v5/encoder.go create mode 100644 vendor/github.com/godbus/dbus/v5/export.go create mode 100644 vendor/github.com/godbus/dbus/v5/go.mod create mode 100644 vendor/github.com/godbus/dbus/v5/go.sum create mode 100644 vendor/github.com/godbus/dbus/v5/homedir.go create mode 100644 vendor/github.com/godbus/dbus/v5/homedir_dynamic.go create mode 100644 vendor/github.com/godbus/dbus/v5/homedir_static.go create mode 100644 vendor/github.com/godbus/dbus/v5/match.go create mode 100644 vendor/github.com/godbus/dbus/v5/message.go create mode 100644 vendor/github.com/godbus/dbus/v5/object.go create mode 100644 vendor/github.com/godbus/dbus/v5/sequence.go create mode 100644 vendor/github.com/godbus/dbus/v5/sequential_handler.go create mode 100644 vendor/github.com/godbus/dbus/v5/server_interfaces.go create mode 100644 vendor/github.com/godbus/dbus/v5/sig.go create mode 100644 vendor/github.com/godbus/dbus/v5/transport_darwin.go create mode 100644 vendor/github.com/godbus/dbus/v5/transport_generic.go create mode 100644 vendor/github.com/godbus/dbus/v5/transport_nonce_tcp.go create mode 100644 vendor/github.com/godbus/dbus/v5/transport_tcp.go create mode 100644 vendor/github.com/godbus/dbus/v5/transport_unix.go create mode 100644 vendor/github.com/godbus/dbus/v5/transport_unixcred_dragonfly.go create mode 100644 vendor/github.com/godbus/dbus/v5/transport_unixcred_freebsd.go create mode 100644 vendor/github.com/godbus/dbus/v5/transport_unixcred_linux.go create mode 100644 vendor/github.com/godbus/dbus/v5/transport_unixcred_netbsd.go create mode 100644 vendor/github.com/godbus/dbus/v5/transport_unixcred_openbsd.go create mode 100644 vendor/github.com/godbus/dbus/v5/variant.go create mode 100644 vendor/github.com/godbus/dbus/v5/variant_lexer.go create mode 100644 vendor/github.com/godbus/dbus/v5/variant_parser.go create mode 100644 vendor/github.com/golang/groupcache/LICENSE create mode 100644 vendor/github.com/golang/groupcache/lru/lru.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go create mode 100644 vendor/github.com/google/go-intervals/LICENSE create mode 100644 vendor/github.com/google/go-intervals/intervalset/intervalset.go create mode 100644 vendor/github.com/google/go-intervals/intervalset/intervalset_immutable.go create mode 100644 vendor/github.com/google/uuid/null.go create mode 100644 vendor/github.com/hashicorp/errwrap/LICENSE create mode 100644 vendor/github.com/hashicorp/errwrap/README.md create mode 100644 vendor/github.com/hashicorp/errwrap/errwrap.go create mode 100644 vendor/github.com/hashicorp/errwrap/go.mod create mode 100644 vendor/github.com/hashicorp/go-multierror/LICENSE create mode 100644 vendor/github.com/hashicorp/go-multierror/Makefile create mode 100644 vendor/github.com/hashicorp/go-multierror/README.md create mode 100644 vendor/github.com/hashicorp/go-multierror/append.go create mode 100644 vendor/github.com/hashicorp/go-multierror/flatten.go create mode 100644 vendor/github.com/hashicorp/go-multierror/format.go create mode 100644 vendor/github.com/hashicorp/go-multierror/go.mod create mode 100644 vendor/github.com/hashicorp/go-multierror/go.sum create mode 100644 vendor/github.com/hashicorp/go-multierror/group.go create mode 100644 vendor/github.com/hashicorp/go-multierror/multierror.go create mode 100644 vendor/github.com/hashicorp/go-multierror/prefix.go create mode 100644 vendor/github.com/hashicorp/go-multierror/sort.go create mode 100644 vendor/github.com/ishidawataru/sctp/.gitignore create mode 100644 vendor/github.com/ishidawataru/sctp/.travis.yml create mode 100644 vendor/github.com/ishidawataru/sctp/GO_LICENSE create mode 100644 vendor/github.com/ishidawataru/sctp/LICENSE create mode 100644 vendor/github.com/ishidawataru/sctp/NOTICE create mode 100644 vendor/github.com/ishidawataru/sctp/README.md create mode 100644 vendor/github.com/ishidawataru/sctp/go.mod create mode 100644 vendor/github.com/ishidawataru/sctp/ipsock_linux.go create mode 100644 vendor/github.com/ishidawataru/sctp/sctp.go create mode 100644 vendor/github.com/ishidawataru/sctp/sctp_linux.go create mode 100644 vendor/github.com/ishidawataru/sctp/sctp_unsupported.go create mode 100644 vendor/github.com/jinzhu/copier/License create mode 100644 vendor/github.com/jinzhu/copier/README.md create mode 100644 vendor/github.com/jinzhu/copier/copier.go create mode 100644 vendor/github.com/jinzhu/copier/errors.go create mode 100644 vendor/github.com/jinzhu/copier/go.mod delete mode 100644 vendor/github.com/kevinburke/ssh_config/.travis.yml create mode 100644 vendor/github.com/klauspost/compress/.gitattributes create mode 100644 vendor/github.com/klauspost/compress/.gitignore create mode 100644 vendor/github.com/klauspost/compress/.goreleaser.yml create mode 100644 vendor/github.com/klauspost/compress/LICENSE create mode 100644 vendor/github.com/klauspost/compress/README.md create mode 100644 vendor/github.com/klauspost/compress/compressible.go create mode 100644 vendor/github.com/klauspost/compress/flate/deflate.go create mode 100644 vendor/github.com/klauspost/compress/flate/dict_decoder.go create mode 100644 vendor/github.com/klauspost/compress/flate/fast_encoder.go create mode 100644 vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go create mode 100644 vendor/github.com/klauspost/compress/flate/huffman_code.go create mode 100644 vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go create mode 100644 vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go create mode 100644 vendor/github.com/klauspost/compress/flate/inflate.go create mode 100644 vendor/github.com/klauspost/compress/flate/inflate_gen.go create mode 100644 vendor/github.com/klauspost/compress/flate/level1.go create mode 100644 vendor/github.com/klauspost/compress/flate/level2.go create mode 100644 vendor/github.com/klauspost/compress/flate/level3.go create mode 100644 vendor/github.com/klauspost/compress/flate/level4.go create mode 100644 vendor/github.com/klauspost/compress/flate/level5.go create mode 100644 vendor/github.com/klauspost/compress/flate/level6.go create mode 100644 vendor/github.com/klauspost/compress/flate/regmask_amd64.go create mode 100644 vendor/github.com/klauspost/compress/flate/regmask_other.go create mode 100644 vendor/github.com/klauspost/compress/flate/stateless.go create mode 100644 vendor/github.com/klauspost/compress/flate/token.go create mode 100644 vendor/github.com/klauspost/compress/fse/README.md create mode 100644 vendor/github.com/klauspost/compress/fse/bitreader.go create mode 100644 vendor/github.com/klauspost/compress/fse/bitwriter.go create mode 100644 vendor/github.com/klauspost/compress/fse/bytereader.go create mode 100644 vendor/github.com/klauspost/compress/fse/compress.go create mode 100644 vendor/github.com/klauspost/compress/fse/decompress.go create mode 100644 vendor/github.com/klauspost/compress/fse/fse.go create mode 100644 vendor/github.com/klauspost/compress/gen.sh create mode 100644 vendor/github.com/klauspost/compress/go.mod create mode 100644 vendor/github.com/klauspost/compress/go.sum create mode 100644 vendor/github.com/klauspost/compress/huff0/.gitignore create mode 100644 vendor/github.com/klauspost/compress/huff0/README.md create mode 100644 vendor/github.com/klauspost/compress/huff0/bitreader.go create mode 100644 vendor/github.com/klauspost/compress/huff0/bitwriter.go create mode 100644 vendor/github.com/klauspost/compress/huff0/bytereader.go create mode 100644 vendor/github.com/klauspost/compress/huff0/compress.go create mode 100644 vendor/github.com/klauspost/compress/huff0/decompress.go create mode 100644 vendor/github.com/klauspost/compress/huff0/huff0.go create mode 100644 vendor/github.com/klauspost/compress/internal/snapref/LICENSE create mode 100644 vendor/github.com/klauspost/compress/internal/snapref/decode.go create mode 100644 vendor/github.com/klauspost/compress/internal/snapref/decode_other.go create mode 100644 vendor/github.com/klauspost/compress/internal/snapref/encode.go create mode 100644 vendor/github.com/klauspost/compress/internal/snapref/encode_other.go create mode 100644 vendor/github.com/klauspost/compress/internal/snapref/snappy.go create mode 100644 vendor/github.com/klauspost/compress/s2sx.mod create mode 100644 vendor/github.com/klauspost/compress/s2sx.sum create mode 100644 vendor/github.com/klauspost/compress/zstd/README.md create mode 100644 vendor/github.com/klauspost/compress/zstd/bitreader.go create mode 100644 vendor/github.com/klauspost/compress/zstd/bitwriter.go create mode 100644 vendor/github.com/klauspost/compress/zstd/blockdec.go create mode 100644 vendor/github.com/klauspost/compress/zstd/blockenc.go create mode 100644 vendor/github.com/klauspost/compress/zstd/blocktype_string.go create mode 100644 vendor/github.com/klauspost/compress/zstd/bytebuf.go create mode 100644 vendor/github.com/klauspost/compress/zstd/bytereader.go create mode 100644 vendor/github.com/klauspost/compress/zstd/decodeheader.go create mode 100644 vendor/github.com/klauspost/compress/zstd/decoder.go create mode 100644 vendor/github.com/klauspost/compress/zstd/decoder_options.go create mode 100644 vendor/github.com/klauspost/compress/zstd/dict.go create mode 100644 vendor/github.com/klauspost/compress/zstd/enc_base.go create mode 100644 vendor/github.com/klauspost/compress/zstd/enc_best.go create mode 100644 vendor/github.com/klauspost/compress/zstd/enc_better.go create mode 100644 vendor/github.com/klauspost/compress/zstd/enc_dfast.go create mode 100644 vendor/github.com/klauspost/compress/zstd/enc_fast.go create mode 100644 vendor/github.com/klauspost/compress/zstd/encoder.go create mode 100644 vendor/github.com/klauspost/compress/zstd/encoder_options.go create mode 100644 vendor/github.com/klauspost/compress/zstd/framedec.go create mode 100644 vendor/github.com/klauspost/compress/zstd/frameenc.go create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_decoder.go create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_encoder.go create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_predefined.go create mode 100644 vendor/github.com/klauspost/compress/zstd/hash.go create mode 100644 vendor/github.com/klauspost/compress/zstd/history.go create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go create mode 100644 vendor/github.com/klauspost/compress/zstd/seqdec.go create mode 100644 vendor/github.com/klauspost/compress/zstd/seqenc.go create mode 100644 vendor/github.com/klauspost/compress/zstd/snappy.go create mode 100644 vendor/github.com/klauspost/compress/zstd/zip.go create mode 100644 vendor/github.com/klauspost/compress/zstd/zstd.go create mode 100644 vendor/github.com/klauspost/pgzip/.gitignore create mode 100644 vendor/github.com/klauspost/pgzip/.travis.yml create mode 100644 vendor/github.com/klauspost/pgzip/GO_LICENSE create mode 100644 vendor/github.com/klauspost/pgzip/LICENSE create mode 100644 vendor/github.com/klauspost/pgzip/README.md create mode 100644 vendor/github.com/klauspost/pgzip/gunzip.go create mode 100644 vendor/github.com/klauspost/pgzip/gzip.go rename vendor/github.com/magiconair/properties/{LICENSE => LICENSE.md} (84%) create mode 100644 vendor/github.com/manifoldco/promptui/.gitignore create mode 100644 vendor/github.com/manifoldco/promptui/.golangci.yml create mode 100644 vendor/github.com/manifoldco/promptui/.travis.yml create mode 100644 vendor/github.com/manifoldco/promptui/CHANGELOG.md create mode 100644 vendor/github.com/manifoldco/promptui/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/manifoldco/promptui/LICENSE.md create mode 100644 vendor/github.com/manifoldco/promptui/Makefile create mode 100644 vendor/github.com/manifoldco/promptui/README.md create mode 100644 vendor/github.com/manifoldco/promptui/codes.go create mode 100644 vendor/github.com/manifoldco/promptui/cursor.go create mode 100644 vendor/github.com/manifoldco/promptui/go.mod create mode 100644 vendor/github.com/manifoldco/promptui/go.sum create mode 100644 vendor/github.com/manifoldco/promptui/keycodes.go create mode 100644 vendor/github.com/manifoldco/promptui/keycodes_other.go create mode 100644 vendor/github.com/manifoldco/promptui/keycodes_windows.go create mode 100644 vendor/github.com/manifoldco/promptui/list/list.go create mode 100644 vendor/github.com/manifoldco/promptui/prompt.go create mode 100644 vendor/github.com/manifoldco/promptui/promptui.go create mode 100644 vendor/github.com/manifoldco/promptui/screenbuf/screenbuf.go create mode 100644 vendor/github.com/manifoldco/promptui/select.go create mode 100644 vendor/github.com/manifoldco/promptui/styles.go create mode 100644 vendor/github.com/manifoldco/promptui/styles_windows.go rename vendor/github.com/mattn/go-runewidth/{README.mkd => README.md} (82%) create mode 100644 vendor/github.com/mattn/go-runewidth/go.sum create mode 100644 vendor/github.com/mattn/go-runewidth/go.test.sh create mode 100644 vendor/github.com/mattn/go-shellwords/.travis.yml create mode 100644 vendor/github.com/mattn/go-shellwords/LICENSE create mode 100644 vendor/github.com/mattn/go-shellwords/README.md create mode 100644 vendor/github.com/mattn/go-shellwords/go.mod create mode 100644 vendor/github.com/mattn/go-shellwords/go.test.sh create mode 100644 vendor/github.com/mattn/go-shellwords/shellwords.go create mode 100644 vendor/github.com/mattn/go-shellwords/util_posix.go create mode 100644 vendor/github.com/mattn/go-shellwords/util_windows.go create mode 100644 vendor/github.com/miekg/pkcs11/.gitignore create mode 100644 vendor/github.com/miekg/pkcs11/LICENSE create mode 100644 vendor/github.com/miekg/pkcs11/Makefile.release create mode 100644 vendor/github.com/miekg/pkcs11/README.md create mode 100644 vendor/github.com/miekg/pkcs11/error.go create mode 100644 vendor/github.com/miekg/pkcs11/go.mod create mode 100644 vendor/github.com/miekg/pkcs11/hsm.db create mode 100644 vendor/github.com/miekg/pkcs11/params.go create mode 100644 vendor/github.com/miekg/pkcs11/pkcs11.go create mode 100644 vendor/github.com/miekg/pkcs11/pkcs11.h create mode 100644 vendor/github.com/miekg/pkcs11/pkcs11f.h create mode 100644 vendor/github.com/miekg/pkcs11/pkcs11go.h create mode 100644 vendor/github.com/miekg/pkcs11/pkcs11t.h create mode 100644 vendor/github.com/miekg/pkcs11/release.go create mode 100644 vendor/github.com/miekg/pkcs11/softhsm.conf create mode 100644 vendor/github.com/miekg/pkcs11/softhsm2.conf create mode 100644 vendor/github.com/miekg/pkcs11/types.go create mode 100644 vendor/github.com/miekg/pkcs11/vendor.go create mode 100644 vendor/github.com/miekg/pkcs11/zconst.go create mode 100644 vendor/github.com/mistifyio/go-zfs/.gitignore create mode 100644 vendor/github.com/mistifyio/go-zfs/.travis.yml create mode 100644 vendor/github.com/mistifyio/go-zfs/CONTRIBUTING.md create mode 100644 vendor/github.com/mistifyio/go-zfs/LICENSE create mode 100644 vendor/github.com/mistifyio/go-zfs/README.md create mode 100644 vendor/github.com/mistifyio/go-zfs/Vagrantfile create mode 100644 vendor/github.com/mistifyio/go-zfs/error.go create mode 100644 vendor/github.com/mistifyio/go-zfs/utils.go create mode 100644 vendor/github.com/mistifyio/go-zfs/utils_notsolaris.go create mode 100644 vendor/github.com/mistifyio/go-zfs/utils_solaris.go create mode 100644 vendor/github.com/mistifyio/go-zfs/zfs.go create mode 100644 vendor/github.com/mistifyio/go-zfs/zpool.go delete mode 100644 vendor/github.com/mitchellh/mapstructure/.travis.yml create mode 100644 vendor/github.com/moby/sys/mount/LICENSE create mode 100644 vendor/github.com/moby/sys/mount/doc.go create mode 100644 vendor/github.com/moby/sys/mount/flags_bsd.go create mode 100644 vendor/github.com/moby/sys/mount/flags_linux.go create mode 100644 vendor/github.com/moby/sys/mount/flags_unix.go create mode 100644 vendor/github.com/moby/sys/mount/go.mod create mode 100644 vendor/github.com/moby/sys/mount/go.sum create mode 100644 vendor/github.com/moby/sys/mount/mount_errors.go create mode 100644 vendor/github.com/moby/sys/mount/mount_unix.go create mode 100644 vendor/github.com/moby/sys/mount/mounter_bsd.go create mode 100644 vendor/github.com/moby/sys/mount/mounter_linux.go create mode 100644 vendor/github.com/moby/sys/mount/mounter_unsupported.go create mode 100644 vendor/github.com/moby/sys/mount/sharedsubtree_linux.go create mode 100644 vendor/github.com/moby/sys/mountinfo/LICENSE create mode 100644 vendor/github.com/moby/sys/mountinfo/doc.go create mode 100644 vendor/github.com/moby/sys/mountinfo/go.mod create mode 100644 vendor/github.com/moby/sys/mountinfo/go.sum create mode 100644 vendor/github.com/moby/sys/mountinfo/mounted_linux.go create mode 100644 vendor/github.com/moby/sys/mountinfo/mounted_unix.go create mode 100644 vendor/github.com/moby/sys/mountinfo/mountinfo.go create mode 100644 vendor/github.com/moby/sys/mountinfo/mountinfo_bsd.go create mode 100644 vendor/github.com/moby/sys/mountinfo/mountinfo_filters.go create mode 100644 vendor/github.com/moby/sys/mountinfo/mountinfo_linux.go create mode 100644 vendor/github.com/moby/sys/mountinfo/mountinfo_unsupported.go create mode 100644 vendor/github.com/moby/sys/mountinfo/mountinfo_windows.go create mode 100644 vendor/github.com/modern-go/reflect2/go.mod create mode 100644 vendor/github.com/modern-go/reflect2/go_above_118.go delete mode 100644 vendor/github.com/modern-go/reflect2/go_above_17.go create mode 100644 vendor/github.com/modern-go/reflect2/go_below_118.go delete mode 100644 vendor/github.com/modern-go/reflect2/go_below_17.go delete mode 100644 vendor/github.com/modern-go/reflect2/go_below_19.go delete mode 100644 vendor/github.com/modern-go/reflect2/test.sh delete mode 100644 vendor/github.com/onsi/gomega/.travis.yml delete mode 100644 vendor/github.com/onsi/gomega/Dockerfile delete mode 100644 vendor/github.com/onsi/gomega/Makefile delete mode 100644 vendor/github.com/onsi/gomega/docker-compose.yaml create mode 100644 vendor/github.com/onsi/gomega/internal/assertion.go delete mode 100644 vendor/github.com/onsi/gomega/internal/assertion/assertion.go create mode 100644 vendor/github.com/onsi/gomega/internal/async_assertion.go delete mode 100644 vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion.go create mode 100644 vendor/github.com/onsi/gomega/internal/duration_bundle.go create mode 100644 vendor/github.com/onsi/gomega/internal/gomega.go create mode 100644 vendor/github.com/onsi/gomega/internal/gutil/post_ioutil.go create mode 100644 vendor/github.com/onsi/gomega/internal/gutil/using_ioutil.go delete mode 100644 vendor/github.com/onsi/gomega/internal/oraclematcher/oracle_matcher.go delete mode 100644 vendor/github.com/onsi/gomega/internal/testingtsupport/testing_t_support.go create mode 100644 vendor/github.com/onsi/gomega/matchers/have_each_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/have_field.go create mode 100644 vendor/github.com/onsi/gomega/matchers/have_http_body_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/have_http_header_with_value_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/have_value.go create mode 100644 vendor/github.com/onsi/gomega/tools create mode 100644 vendor/github.com/opencontainers/runc/LICENSE create mode 100644 vendor/github.com/opencontainers/runc/NOTICE create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/apparmor/apparmor.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/apparmor/apparmor_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/apparmor/apparmor_unsupported.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/devices/device.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/devices/device_unix.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/user/user.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/user/user_fuzzer.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/userns/userns.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/userns/userns_fuzzer.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/userns/userns_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/userns/userns_unsupported.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/utils/cmsg.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/utils/utils.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/utils/utils_unix.go create mode 100644 vendor/github.com/opencontainers/runtime-spec/LICENSE create mode 100644 vendor/github.com/opencontainers/runtime-spec/specs-go/config.go create mode 100644 vendor/github.com/opencontainers/runtime-spec/specs-go/state.go create mode 100644 vendor/github.com/opencontainers/runtime-spec/specs-go/version.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/LICENSE create mode 100644 vendor/github.com/opencontainers/runtime-tools/error/error.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/filepath/abs.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/filepath/ancestor.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/filepath/clean.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/filepath/doc.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/filepath/join.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/filepath/separator.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/generate/config.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/generate/generate.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/generate/seccomp/consts.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/generate/seccomp/parse_action.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/generate/seccomp/parse_architecture.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/generate/seccomp/parse_arguments.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/generate/seccomp/parse_remove.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/generate/seccomp/seccomp_default.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/generate/seccomp/seccomp_default_linux.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/generate/seccomp/seccomp_default_unsupported.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/generate/seccomp/syscall_compare.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/specerror/bundle.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/specerror/config-linux.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/specerror/config-windows.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/specerror/config.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/specerror/error.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/specerror/runtime-linux.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/specerror/runtime.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/validate/validate.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/validate/validate_linux.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/validate/validate_unsupported.go create mode 100644 vendor/github.com/opencontainers/selinux/LICENSE create mode 100644 vendor/github.com/opencontainers/selinux/go-selinux/doc.go create mode 100644 vendor/github.com/opencontainers/selinux/go-selinux/label/label.go create mode 100644 vendor/github.com/opencontainers/selinux/go-selinux/label/label_linux.go create mode 100644 vendor/github.com/opencontainers/selinux/go-selinux/label/label_stub.go create mode 100644 vendor/github.com/opencontainers/selinux/go-selinux/rchcon.go create mode 100644 vendor/github.com/opencontainers/selinux/go-selinux/rchcon_go115.go create mode 100644 vendor/github.com/opencontainers/selinux/go-selinux/selinux.go create mode 100644 vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go create mode 100644 vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go create mode 100644 vendor/github.com/opencontainers/selinux/go-selinux/xattrs_linux.go create mode 100644 vendor/github.com/opencontainers/selinux/pkg/pwalk/README.md create mode 100644 vendor/github.com/opencontainers/selinux/pkg/pwalk/pwalk.go create mode 100644 vendor/github.com/opencontainers/selinux/pkg/pwalkdir/README.md create mode 100644 vendor/github.com/opencontainers/selinux/pkg/pwalkdir/pwalkdir.go create mode 100644 vendor/github.com/openshift/imagebuilder/.gitignore create mode 100644 vendor/github.com/openshift/imagebuilder/.travis.yml create mode 100644 vendor/github.com/openshift/imagebuilder/LICENSE create mode 100644 vendor/github.com/openshift/imagebuilder/Makefile create mode 100644 vendor/github.com/openshift/imagebuilder/OWNERS create mode 100644 vendor/github.com/openshift/imagebuilder/README.md create mode 100644 vendor/github.com/openshift/imagebuilder/builder.go create mode 100644 vendor/github.com/openshift/imagebuilder/constants.go create mode 100644 vendor/github.com/openshift/imagebuilder/dispatchers.go create mode 100644 vendor/github.com/openshift/imagebuilder/doc.go create mode 100644 vendor/github.com/openshift/imagebuilder/dockerfile/NOTICE create mode 100644 vendor/github.com/openshift/imagebuilder/dockerfile/command/command.go create mode 100644 vendor/github.com/openshift/imagebuilder/dockerfile/parser/line_parsers.go create mode 100644 vendor/github.com/openshift/imagebuilder/dockerfile/parser/parser.go create mode 100644 vendor/github.com/openshift/imagebuilder/dockerfile/parser/split_command.go create mode 100644 vendor/github.com/openshift/imagebuilder/evaluator.go create mode 100644 vendor/github.com/openshift/imagebuilder/go.mod create mode 100644 vendor/github.com/openshift/imagebuilder/go.sum create mode 100644 vendor/github.com/openshift/imagebuilder/imagebuilder.spec create mode 100644 vendor/github.com/openshift/imagebuilder/internals.go create mode 100644 vendor/github.com/openshift/imagebuilder/shell_parser.go create mode 100644 vendor/github.com/openshift/imagebuilder/signal/README.md create mode 100644 vendor/github.com/openshift/imagebuilder/signal/signal.go create mode 100644 vendor/github.com/openshift/imagebuilder/signal/signals.go create mode 100644 vendor/github.com/openshift/imagebuilder/strslice/strslice.go create mode 100644 vendor/github.com/ostreedev/ostree-go/LICENSE create mode 100644 vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gboolean.go create mode 100644 vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gcancellable.go create mode 100644 vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gerror.go create mode 100644 vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gfile.go create mode 100644 vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gfileinfo.go create mode 100644 vendor/github.com/ostreedev/ostree-go/pkg/glibobject/ghashtable.go create mode 100644 vendor/github.com/ostreedev/ostree-go/pkg/glibobject/ghashtableiter.go create mode 100644 vendor/github.com/ostreedev/ostree-go/pkg/glibobject/glibobject.go create mode 100644 vendor/github.com/ostreedev/ostree-go/pkg/glibobject/glibobject.go.h create mode 100644 vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gobject.go create mode 100644 vendor/github.com/ostreedev/ostree-go/pkg/glibobject/goptioncontext.go create mode 100644 vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gvariant.go create mode 100644 vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/builtin.go create mode 100644 vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/builtin.go.h create mode 100644 vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/checkout.go create mode 100644 vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/commit.go create mode 100644 vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/init.go create mode 100644 vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/log.go create mode 100644 vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/prune.go create mode 100644 vendor/github.com/proglottis/gpgme/.appveyor.yml create mode 100644 vendor/github.com/proglottis/gpgme/.gitignore create mode 100644 vendor/github.com/proglottis/gpgme/.travis.yml create mode 100644 vendor/github.com/proglottis/gpgme/LICENSE create mode 100644 vendor/github.com/proglottis/gpgme/README.md create mode 100644 vendor/github.com/proglottis/gpgme/callbacks.go create mode 100644 vendor/github.com/proglottis/gpgme/data.go create mode 100644 vendor/github.com/proglottis/gpgme/go.mod create mode 100644 vendor/github.com/proglottis/gpgme/go_gpgme.c create mode 100644 vendor/github.com/proglottis/gpgme/go_gpgme.h create mode 100644 vendor/github.com/proglottis/gpgme/gpgme.go create mode 100644 vendor/github.com/proglottis/gpgme/unset_agent_info.go create mode 100644 vendor/github.com/proglottis/gpgme/unset_agent_info_windows.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go create mode 100644 vendor/github.com/rivo/uniseg/LICENSE.txt create mode 100644 vendor/github.com/rivo/uniseg/README.md create mode 100644 vendor/github.com/rivo/uniseg/doc.go create mode 100644 vendor/github.com/rivo/uniseg/go.mod create mode 100644 vendor/github.com/rivo/uniseg/grapheme.go create mode 100644 vendor/github.com/rivo/uniseg/properties.go create mode 100644 vendor/github.com/russross/blackfriday/v2/entities.go create mode 100644 vendor/github.com/seccomp/libseccomp-golang/.gitignore create mode 100644 vendor/github.com/seccomp/libseccomp-golang/.travis.yml create mode 100644 vendor/github.com/seccomp/libseccomp-golang/CHANGELOG create mode 100644 vendor/github.com/seccomp/libseccomp-golang/CONTRIBUTING.md create mode 100644 vendor/github.com/seccomp/libseccomp-golang/LICENSE create mode 100644 vendor/github.com/seccomp/libseccomp-golang/Makefile create mode 100644 vendor/github.com/seccomp/libseccomp-golang/README.md create mode 100644 vendor/github.com/seccomp/libseccomp-golang/go.mod create mode 100644 vendor/github.com/seccomp/libseccomp-golang/go.sum create mode 100644 vendor/github.com/seccomp/libseccomp-golang/seccomp.go create mode 100644 vendor/github.com/seccomp/libseccomp-golang/seccomp_internal.go delete mode 100644 vendor/github.com/shurcooL/sanitized_anchor_name/.travis.yml delete mode 100644 vendor/github.com/shurcooL/sanitized_anchor_name/README.md delete mode 100644 vendor/github.com/shurcooL/sanitized_anchor_name/go.mod delete mode 100644 vendor/github.com/shurcooL/sanitized_anchor_name/main.go create mode 100644 vendor/github.com/spf13/afero/.gitignore create mode 100644 vendor/github.com/spf13/afero/iofs.go create mode 100644 vendor/github.com/spf13/afero/symlink.go delete mode 100644 vendor/github.com/spf13/cast/.travis.yml create mode 100644 vendor/github.com/spf13/cast/timeformattype_string.go delete mode 100644 vendor/github.com/spf13/cobra/.travis.yml create mode 100644 vendor/github.com/spf13/cobra/MAINTAINERS create mode 100644 vendor/github.com/spf13/cobra/bash_completionsV2.go rename vendor/github.com/spf13/cobra/{custom_completions.go => completions.go} (59%) create mode 100644 vendor/github.com/spf13/cobra/user_guide.md create mode 100644 vendor/github.com/spf13/viper/TROUBLESHOOTING.md create mode 100644 vendor/github.com/spf13/viper/fs.go create mode 100644 vendor/github.com/spf13/viper/internal/encoding/decoder.go create mode 100644 vendor/github.com/spf13/viper/internal/encoding/encoder.go create mode 100644 vendor/github.com/spf13/viper/internal/encoding/error.go create mode 100644 vendor/github.com/spf13/viper/internal/encoding/hcl/codec.go create mode 100644 vendor/github.com/spf13/viper/internal/encoding/json/codec.go create mode 100644 vendor/github.com/spf13/viper/internal/encoding/toml/codec.go create mode 100644 vendor/github.com/spf13/viper/internal/encoding/yaml/codec.go create mode 100644 vendor/github.com/spf13/viper/logger.go create mode 100644 vendor/github.com/spf13/viper/viper_go1_15.go create mode 100644 vendor/github.com/spf13/viper/viper_go1_16.go create mode 100644 vendor/github.com/spf13/viper/watch.go create mode 100644 vendor/github.com/spf13/viper/watch_wasm.go create mode 100644 vendor/github.com/stefanberger/go-pkcs11uri/.gitignore create mode 100644 vendor/github.com/stefanberger/go-pkcs11uri/.travis.yml create mode 100644 vendor/github.com/stefanberger/go-pkcs11uri/LICENSE create mode 100644 vendor/github.com/stefanberger/go-pkcs11uri/Makefile create mode 100644 vendor/github.com/stefanberger/go-pkcs11uri/README.md create mode 100644 vendor/github.com/stefanberger/go-pkcs11uri/pkcs11uri.go create mode 100644 vendor/github.com/sylabs/sif/v2/LICENSE.md create mode 100644 vendor/github.com/sylabs/sif/v2/pkg/sif/arch.go create mode 100644 vendor/github.com/sylabs/sif/v2/pkg/sif/buffer.go create mode 100644 vendor/github.com/sylabs/sif/v2/pkg/sif/create.go create mode 100644 vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor.go create mode 100644 vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor_input.go create mode 100644 vendor/github.com/sylabs/sif/v2/pkg/sif/load.go create mode 100644 vendor/github.com/sylabs/sif/v2/pkg/sif/select.go create mode 100644 vendor/github.com/sylabs/sif/v2/pkg/sif/sif.go create mode 100644 vendor/github.com/syndtr/gocapability/LICENSE create mode 100644 vendor/github.com/syndtr/gocapability/capability/capability.go create mode 100644 vendor/github.com/syndtr/gocapability/capability/capability_linux.go create mode 100644 vendor/github.com/syndtr/gocapability/capability/capability_noop.go create mode 100644 vendor/github.com/syndtr/gocapability/capability/enum.go create mode 100644 vendor/github.com/syndtr/gocapability/capability/enum_gen.go create mode 100644 vendor/github.com/syndtr/gocapability/capability/syscall_linux.go create mode 100644 vendor/github.com/tchap/go-patricia/AUTHORS create mode 100644 vendor/github.com/tchap/go-patricia/LICENSE create mode 100644 vendor/github.com/tchap/go-patricia/patricia/children.go create mode 100644 vendor/github.com/tchap/go-patricia/patricia/patricia.go delete mode 100644 vendor/github.com/tonistiigi/fsutil/.gitignore delete mode 100644 vendor/github.com/tonistiigi/fsutil/Dockerfile delete mode 100644 vendor/github.com/tonistiigi/fsutil/chtimes_linux.go delete mode 100644 vendor/github.com/tonistiigi/fsutil/chtimes_nolinux.go delete mode 100644 vendor/github.com/tonistiigi/fsutil/copy/copy_freebsd.go delete mode 100644 vendor/github.com/tonistiigi/fsutil/copy/device_darwin.go delete mode 100644 vendor/github.com/tonistiigi/fsutil/copy/device_freebsd.go delete mode 100644 vendor/github.com/tonistiigi/fsutil/diff.go delete mode 100644 vendor/github.com/tonistiigi/fsutil/diff_containerd.go delete mode 100644 vendor/github.com/tonistiigi/fsutil/diskwriter.go delete mode 100644 vendor/github.com/tonistiigi/fsutil/diskwriter_freebsd.go delete mode 100644 vendor/github.com/tonistiigi/fsutil/diskwriter_unix.go delete mode 100644 vendor/github.com/tonistiigi/fsutil/diskwriter_unixnobsd.go delete mode 100644 vendor/github.com/tonistiigi/fsutil/diskwriter_windows.go delete mode 100644 vendor/github.com/tonistiigi/fsutil/docker-bake.hcl delete mode 100644 vendor/github.com/tonistiigi/fsutil/followlinks.go delete mode 100644 vendor/github.com/tonistiigi/fsutil/fs.go delete mode 100644 vendor/github.com/tonistiigi/fsutil/go.mod delete mode 100644 vendor/github.com/tonistiigi/fsutil/go.sum delete mode 100644 vendor/github.com/tonistiigi/fsutil/hardlinks.go delete mode 100644 vendor/github.com/tonistiigi/fsutil/readme.md delete mode 100644 vendor/github.com/tonistiigi/fsutil/receive.go delete mode 100644 vendor/github.com/tonistiigi/fsutil/send.go delete mode 100644 vendor/github.com/tonistiigi/fsutil/stat.go delete mode 100644 vendor/github.com/tonistiigi/fsutil/stat_unix.go delete mode 100644 vendor/github.com/tonistiigi/fsutil/stat_windows.go delete mode 100644 vendor/github.com/tonistiigi/fsutil/tarwriter.go delete mode 100644 vendor/github.com/tonistiigi/fsutil/types/generate.go delete mode 100644 vendor/github.com/tonistiigi/fsutil/types/stat.go delete mode 100644 vendor/github.com/tonistiigi/fsutil/types/stat.pb.go delete mode 100644 vendor/github.com/tonistiigi/fsutil/types/stat.proto delete mode 100644 vendor/github.com/tonistiigi/fsutil/types/wire.pb.go delete mode 100644 vendor/github.com/tonistiigi/fsutil/types/wire.proto delete mode 100644 vendor/github.com/tonistiigi/fsutil/validator.go delete mode 100644 vendor/github.com/tonistiigi/fsutil/walker.go create mode 100644 vendor/github.com/ulikunitz/xz/.gitignore create mode 100644 vendor/github.com/ulikunitz/xz/LICENSE create mode 100644 vendor/github.com/ulikunitz/xz/README.md create mode 100644 vendor/github.com/ulikunitz/xz/SECURITY.md create mode 100644 vendor/github.com/ulikunitz/xz/TODO.md create mode 100644 vendor/github.com/ulikunitz/xz/bits.go create mode 100644 vendor/github.com/ulikunitz/xz/crc.go create mode 100644 vendor/github.com/ulikunitz/xz/format.go create mode 100644 vendor/github.com/ulikunitz/xz/fox-check-none.xz create mode 100644 vendor/github.com/ulikunitz/xz/fox.xz create mode 100644 vendor/github.com/ulikunitz/xz/go.mod create mode 100644 vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go create mode 100644 vendor/github.com/ulikunitz/xz/internal/hash/doc.go create mode 100644 vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go create mode 100644 vendor/github.com/ulikunitz/xz/internal/hash/roller.go create mode 100644 vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/bintree.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/bitops.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/breader.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/buffer.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/bytewriter.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/decoder.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/decoderdict.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/directcodec.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/distcodec.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/encoder.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/encoderdict.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/fox.lzma create mode 100644 vendor/github.com/ulikunitz/xz/lzma/hashtable.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/header.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/header2.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/literalcodec.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/operation.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/prob.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/properties.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/rangecodec.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/reader.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/reader2.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/state.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/treecodecs.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/writer.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/writer2.go create mode 100644 vendor/github.com/ulikunitz/xz/lzmafilter.go create mode 100644 vendor/github.com/ulikunitz/xz/make-docs create mode 100644 vendor/github.com/ulikunitz/xz/none-check.go create mode 100644 vendor/github.com/ulikunitz/xz/reader.go create mode 100644 vendor/github.com/ulikunitz/xz/writer.go create mode 100644 vendor/github.com/vbauerster/mpb/v7/.gitignore create mode 100644 vendor/github.com/vbauerster/mpb/v7/README.md create mode 100644 vendor/github.com/vbauerster/mpb/v7/UNLICENSE create mode 100644 vendor/github.com/vbauerster/mpb/v7/bar.go create mode 100644 vendor/github.com/vbauerster/mpb/v7/bar_filler.go create mode 100644 vendor/github.com/vbauerster/mpb/v7/bar_filler_bar.go create mode 100644 vendor/github.com/vbauerster/mpb/v7/bar_filler_nop.go create mode 100644 vendor/github.com/vbauerster/mpb/v7/bar_filler_spinner.go create mode 100644 vendor/github.com/vbauerster/mpb/v7/bar_option.go create mode 100644 vendor/github.com/vbauerster/mpb/v7/container_option.go create mode 100644 vendor/github.com/vbauerster/mpb/v7/cwriter/doc.go create mode 100644 vendor/github.com/vbauerster/mpb/v7/cwriter/util_bsd.go create mode 100644 vendor/github.com/vbauerster/mpb/v7/cwriter/util_linux.go create mode 100644 vendor/github.com/vbauerster/mpb/v7/cwriter/util_solaris.go create mode 100644 vendor/github.com/vbauerster/mpb/v7/cwriter/util_zos.go create mode 100644 vendor/github.com/vbauerster/mpb/v7/cwriter/writer.go create mode 100644 vendor/github.com/vbauerster/mpb/v7/cwriter/writer_posix.go create mode 100644 vendor/github.com/vbauerster/mpb/v7/cwriter/writer_windows.go create mode 100644 vendor/github.com/vbauerster/mpb/v7/decor/any.go create mode 100644 vendor/github.com/vbauerster/mpb/v7/decor/counters.go create mode 100644 vendor/github.com/vbauerster/mpb/v7/decor/decorator.go create mode 100644 vendor/github.com/vbauerster/mpb/v7/decor/doc.go create mode 100644 vendor/github.com/vbauerster/mpb/v7/decor/elapsed.go create mode 100644 vendor/github.com/vbauerster/mpb/v7/decor/eta.go create mode 100644 vendor/github.com/vbauerster/mpb/v7/decor/merge.go create mode 100644 vendor/github.com/vbauerster/mpb/v7/decor/moving_average.go create mode 100644 vendor/github.com/vbauerster/mpb/v7/decor/name.go create mode 100644 vendor/github.com/vbauerster/mpb/v7/decor/on_abort.go create mode 100644 vendor/github.com/vbauerster/mpb/v7/decor/on_complete.go create mode 100644 vendor/github.com/vbauerster/mpb/v7/decor/on_condition.go create mode 100644 vendor/github.com/vbauerster/mpb/v7/decor/optimistic_string_writer.go create mode 100644 vendor/github.com/vbauerster/mpb/v7/decor/percentage.go create mode 100644 vendor/github.com/vbauerster/mpb/v7/decor/size_type.go create mode 100644 vendor/github.com/vbauerster/mpb/v7/decor/sizeb1000_string.go create mode 100644 vendor/github.com/vbauerster/mpb/v7/decor/sizeb1024_string.go create mode 100644 vendor/github.com/vbauerster/mpb/v7/decor/speed.go create mode 100644 vendor/github.com/vbauerster/mpb/v7/decor/spinner.go create mode 100644 vendor/github.com/vbauerster/mpb/v7/doc.go create mode 100644 vendor/github.com/vbauerster/mpb/v7/go.mod create mode 100644 vendor/github.com/vbauerster/mpb/v7/go.sum create mode 100644 vendor/github.com/vbauerster/mpb/v7/internal/percentage.go create mode 100644 vendor/github.com/vbauerster/mpb/v7/internal/width.go create mode 100644 vendor/github.com/vbauerster/mpb/v7/priority_queue.go create mode 100644 vendor/github.com/vbauerster/mpb/v7/progress.go create mode 100644 vendor/github.com/vbauerster/mpb/v7/proxyreader.go rename vendor/github.com/vishvananda/netlink/nl/{parse_attr.go => parse_attr_linux.go} (79%) create mode 100644 vendor/go.etcd.io/bbolt/.gitignore create mode 100644 vendor/go.etcd.io/bbolt/.travis.yml create mode 100644 vendor/go.etcd.io/bbolt/LICENSE create mode 100644 vendor/go.etcd.io/bbolt/Makefile create mode 100644 vendor/go.etcd.io/bbolt/README.md create mode 100644 vendor/go.etcd.io/bbolt/bolt_386.go create mode 100644 vendor/go.etcd.io/bbolt/bolt_amd64.go create mode 100644 vendor/go.etcd.io/bbolt/bolt_arm.go create mode 100644 vendor/go.etcd.io/bbolt/bolt_arm64.go create mode 100644 vendor/go.etcd.io/bbolt/bolt_linux.go create mode 100644 vendor/go.etcd.io/bbolt/bolt_mips64x.go create mode 100644 vendor/go.etcd.io/bbolt/bolt_mipsx.go create mode 100644 vendor/go.etcd.io/bbolt/bolt_openbsd.go create mode 100644 vendor/go.etcd.io/bbolt/bolt_ppc.go create mode 100644 vendor/go.etcd.io/bbolt/bolt_ppc64.go create mode 100644 vendor/go.etcd.io/bbolt/bolt_ppc64le.go create mode 100644 vendor/go.etcd.io/bbolt/bolt_riscv64.go create mode 100644 vendor/go.etcd.io/bbolt/bolt_s390x.go create mode 100644 vendor/go.etcd.io/bbolt/bolt_unix.go create mode 100644 vendor/go.etcd.io/bbolt/bolt_unix_aix.go create mode 100644 vendor/go.etcd.io/bbolt/bolt_unix_solaris.go create mode 100644 vendor/go.etcd.io/bbolt/bolt_windows.go create mode 100644 vendor/go.etcd.io/bbolt/boltsync_unix.go create mode 100644 vendor/go.etcd.io/bbolt/bucket.go create mode 100644 vendor/go.etcd.io/bbolt/compact.go create mode 100644 vendor/go.etcd.io/bbolt/cursor.go create mode 100644 vendor/go.etcd.io/bbolt/db.go create mode 100644 vendor/go.etcd.io/bbolt/doc.go create mode 100644 vendor/go.etcd.io/bbolt/errors.go create mode 100644 vendor/go.etcd.io/bbolt/freelist.go create mode 100644 vendor/go.etcd.io/bbolt/freelist_hmap.go create mode 100644 vendor/go.etcd.io/bbolt/go.mod create mode 100644 vendor/go.etcd.io/bbolt/go.sum create mode 100644 vendor/go.etcd.io/bbolt/mlock_unix.go create mode 100644 vendor/go.etcd.io/bbolt/mlock_windows.go create mode 100644 vendor/go.etcd.io/bbolt/node.go create mode 100644 vendor/go.etcd.io/bbolt/page.go create mode 100644 vendor/go.etcd.io/bbolt/tx.go create mode 100644 vendor/go.etcd.io/bbolt/unsafe.go create mode 100644 vendor/go.mozilla.org/pkcs7/.gitignore create mode 100644 vendor/go.mozilla.org/pkcs7/.travis.yml create mode 100644 vendor/go.mozilla.org/pkcs7/LICENSE create mode 100644 vendor/go.mozilla.org/pkcs7/Makefile create mode 100644 vendor/go.mozilla.org/pkcs7/README.md create mode 100644 vendor/go.mozilla.org/pkcs7/ber.go create mode 100644 vendor/go.mozilla.org/pkcs7/decrypt.go create mode 100644 vendor/go.mozilla.org/pkcs7/encrypt.go create mode 100644 vendor/go.mozilla.org/pkcs7/go.mod create mode 100644 vendor/go.mozilla.org/pkcs7/pkcs7.go create mode 100644 vendor/go.mozilla.org/pkcs7/sign.go create mode 100644 vendor/go.mozilla.org/pkcs7/verify.go create mode 100644 vendor/go.opencensus.io/.gitignore create mode 100644 vendor/go.opencensus.io/AUTHORS create mode 100644 vendor/go.opencensus.io/CONTRIBUTING.md create mode 100644 vendor/go.opencensus.io/LICENSE create mode 100644 vendor/go.opencensus.io/Makefile create mode 100644 vendor/go.opencensus.io/README.md create mode 100644 vendor/go.opencensus.io/appveyor.yml create mode 100644 vendor/go.opencensus.io/go.mod create mode 100644 vendor/go.opencensus.io/go.sum create mode 100644 vendor/go.opencensus.io/internal/internal.go create mode 100644 vendor/go.opencensus.io/internal/sanitize.go create mode 100644 vendor/go.opencensus.io/internal/traceinternals.go create mode 100644 vendor/go.opencensus.io/opencensus.go create mode 100644 vendor/go.opencensus.io/trace/basetypes.go create mode 100644 vendor/go.opencensus.io/trace/config.go create mode 100644 vendor/go.opencensus.io/trace/doc.go create mode 100644 vendor/go.opencensus.io/trace/evictedqueue.go create mode 100644 vendor/go.opencensus.io/trace/export.go create mode 100644 vendor/go.opencensus.io/trace/internal/internal.go create mode 100644 vendor/go.opencensus.io/trace/lrumap.go create mode 100644 vendor/go.opencensus.io/trace/sampling.go create mode 100644 vendor/go.opencensus.io/trace/spanbucket.go create mode 100644 vendor/go.opencensus.io/trace/spanstore.go create mode 100644 vendor/go.opencensus.io/trace/status_codes.go create mode 100644 vendor/go.opencensus.io/trace/trace.go create mode 100644 vendor/go.opencensus.io/trace/trace_api.go create mode 100644 vendor/go.opencensus.io/trace/trace_go11.go create mode 100644 vendor/go.opencensus.io/trace/trace_nongo11.go create mode 100644 vendor/go.opencensus.io/trace/tracestate/tracestate.go delete mode 100644 vendor/golang.org/x/crypto/ed25519/ed25519_go113.go delete mode 100644 vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go delete mode 100644 vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go rename vendor/golang.org/x/crypto/{ => internal}/poly1305/bits_compat.go (100%) rename vendor/golang.org/x/crypto/{ => internal}/poly1305/bits_go1.13.go (100%) rename vendor/golang.org/x/crypto/{ => internal}/poly1305/mac_noasm.go (100%) rename vendor/golang.org/x/crypto/{ => internal}/poly1305/poly1305.go (98%) rename vendor/golang.org/x/crypto/{ => internal}/poly1305/sum_amd64.go (100%) rename vendor/golang.org/x/crypto/{ => internal}/poly1305/sum_amd64.s (100%) rename vendor/golang.org/x/crypto/{ => internal}/poly1305/sum_generic.go (99%) rename vendor/golang.org/x/crypto/{ => internal}/poly1305/sum_ppc64le.go (100%) rename vendor/golang.org/x/crypto/{ => internal}/poly1305/sum_ppc64le.s (100%) rename vendor/golang.org/x/crypto/{ => internal}/poly1305/sum_s390x.go (99%) rename vendor/golang.org/x/crypto/{ => internal}/poly1305/sum_s390x.s (99%) create mode 100644 vendor/golang.org/x/crypto/openpgp/armor/armor.go create mode 100644 vendor/golang.org/x/crypto/openpgp/armor/encode.go create mode 100644 vendor/golang.org/x/crypto/openpgp/canonical_text.go create mode 100644 vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go create mode 100644 vendor/golang.org/x/crypto/openpgp/errors/errors.go create mode 100644 vendor/golang.org/x/crypto/openpgp/keys.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/compressed.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/config.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/literal.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/ocfb.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/one_pass_signature.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/opaque.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/packet.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/private_key.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/public_key.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/public_key_v3.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/reader.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/signature.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/signature_v3.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/userattribute.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/userid.go create mode 100644 vendor/golang.org/x/crypto/openpgp/read.go create mode 100644 vendor/golang.org/x/crypto/openpgp/s2k/s2k.go create mode 100644 vendor/golang.org/x/crypto/openpgp/write.go delete mode 100644 vendor/golang.org/x/net/http2/README create mode 100644 vendor/golang.org/x/net/http2/go118.go create mode 100644 vendor/golang.org/x/net/http2/not_go118.go create mode 100644 vendor/golang.org/x/net/idna/go118.go create mode 100644 vendor/golang.org/x/net/idna/pre_go118.go create mode 100644 vendor/golang.org/x/sync/semaphore/semaphore.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_loong64.go create mode 100644 vendor/golang.org/x/sys/unix/asm_linux_loong64.s create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_alarm.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_loong64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go create mode 100644 vendor/golang.org/x/term/codereview.cfg delete mode 100644 vendor/golang.org/x/term/term_solaris.go delete mode 100644 vendor/golang.org/x/term/term_unix_linux.go rename vendor/golang.org/x/term/{term_unix_aix.go => term_unix_other.go} (63%) delete mode 100644 vendor/golang.org/x/term/term_unix_zos.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go delete mode 100644 vendor/google.golang.org/grpc/.travis.yml create mode 100644 vendor/google.golang.org/grpc/NOTICE.txt delete mode 100644 vendor/google.golang.org/grpc/install_gae.sh delete mode 100644 vendor/google.golang.org/grpc/internal/credentials/spiffe_appengine.go rename vendor/google.golang.org/grpc/internal/{credentials/syscallconn_appengine.go => grpcutil/grpcutil.go} (72%) rename vendor/google.golang.org/grpc/internal/{resolver/dns/go113.go => grpcutil/regex.go} (66%) delete mode 100644 vendor/google.golang.org/grpc/internal/grpcutil/target.go create mode 100644 vendor/google.golang.org/grpc/internal/xds/env/env.go create mode 100644 vendor/google.golang.org/grpc/resolver/map.go create mode 100644 vendor/gopkg.in/ini.v1/.golangci.yml create mode 100644 vendor/gopkg.in/square/go-jose.v2/.gitcookies.sh.enc create mode 100644 vendor/gopkg.in/square/go-jose.v2/.gitignore create mode 100644 vendor/gopkg.in/square/go-jose.v2/.travis.yml create mode 100644 vendor/gopkg.in/square/go-jose.v2/BUG-BOUNTY.md create mode 100644 vendor/gopkg.in/square/go-jose.v2/CONTRIBUTING.md create mode 100644 vendor/gopkg.in/square/go-jose.v2/LICENSE create mode 100644 vendor/gopkg.in/square/go-jose.v2/README.md create mode 100644 vendor/gopkg.in/square/go-jose.v2/asymmetric.go create mode 100644 vendor/gopkg.in/square/go-jose.v2/cipher/cbc_hmac.go create mode 100644 vendor/gopkg.in/square/go-jose.v2/cipher/concat_kdf.go create mode 100644 vendor/gopkg.in/square/go-jose.v2/cipher/ecdh_es.go create mode 100644 vendor/gopkg.in/square/go-jose.v2/cipher/key_wrap.go create mode 100644 vendor/gopkg.in/square/go-jose.v2/crypter.go rename vendor/{google.golang.org/grpc/credentials/go12.go => gopkg.in/square/go-jose.v2/doc.go} (55%) create mode 100644 vendor/gopkg.in/square/go-jose.v2/encoding.go create mode 100644 vendor/gopkg.in/square/go-jose.v2/json/LICENSE create mode 100644 vendor/gopkg.in/square/go-jose.v2/json/README.md create mode 100644 vendor/gopkg.in/square/go-jose.v2/json/decode.go create mode 100644 vendor/gopkg.in/square/go-jose.v2/json/encode.go create mode 100644 vendor/gopkg.in/square/go-jose.v2/json/indent.go create mode 100644 vendor/gopkg.in/square/go-jose.v2/json/scanner.go create mode 100644 vendor/gopkg.in/square/go-jose.v2/json/stream.go create mode 100644 vendor/gopkg.in/square/go-jose.v2/json/tags.go create mode 100644 vendor/gopkg.in/square/go-jose.v2/jwe.go create mode 100644 vendor/gopkg.in/square/go-jose.v2/jwk.go create mode 100644 vendor/gopkg.in/square/go-jose.v2/jws.go create mode 100644 vendor/gopkg.in/square/go-jose.v2/opaque.go create mode 100644 vendor/gopkg.in/square/go-jose.v2/shared.go create mode 100644 vendor/gopkg.in/square/go-jose.v2/signing.go create mode 100644 vendor/gopkg.in/square/go-jose.v2/symmetric.go create mode 100644 vendor/k8s.io/utils/internal/third_party/forked/golang/LICENSE create mode 100644 vendor/k8s.io/utils/internal/third_party/forked/golang/PATENTS create mode 100644 vendor/k8s.io/utils/internal/third_party/forked/golang/net/ip.go create mode 100644 vendor/k8s.io/utils/internal/third_party/forked/golang/net/parse.go create mode 100644 vendor/k8s.io/utils/net/parse.go diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml index abb11b054c4..4110d144ea5 100644 --- a/.github/workflows/go.yml +++ b/.github/workflows/go.yml @@ -7,7 +7,6 @@ on: branches: "*" paths-ignore: - 'docs/**' - - 'vendor/**' - '*.md' - '*.yml' jobs: diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index fdec88b86e0..b14aeca1a8f 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -180,6 +180,7 @@ Here are some dependents with specific version: * golang : v1.14 * golangci-lint: 1.39.0 +* gpname(brew install gpgme) When you develop the Sealer project at the local environment, you should use subcommands of Makefile to help yourself to check and build the latest version of Sealer. For the convenience of developers, we use the docker to build Sealer. It can reduce problems of the developing environment. diff --git a/apply/apply.go b/apply/apply.go index b38747a1417..9eb9c5060b3 100644 --- a/apply/apply.go +++ b/apply/apply.go @@ -16,15 +16,17 @@ package apply import ( "fmt" + "os" "path/filepath" + "github.com/sealerio/sealer/pkg/imageengine" + common2 "github.com/sealerio/sealer/pkg/imageengine/common" + "github.com/sealerio/sealer/apply/driver" "github.com/sealerio/sealer/common" "github.com/sealerio/sealer/pkg/clusterfile" "github.com/sealerio/sealer/pkg/filesystem" - "github.com/sealerio/sealer/pkg/image" - "github.com/sealerio/sealer/pkg/image/store" v2 "github.com/sealerio/sealer/types/api/v2" ) @@ -57,24 +59,22 @@ func NewApplierFromFile(path string) (driver.Interface, error) { } path = filepath.Join(pa, path) } + Clusterfile, err := clusterfile.NewClusterFile(path) if err != nil { return nil, err } - imgSvc, err := image.NewImageService() - if err != nil { - return nil, err - } - mounter, err := filesystem.NewClusterImageMounter() + imageEngine, err := imageengine.NewImageEngine(common2.EngineGlobalConfigurations{}) if err != nil { return nil, err } - is, err := store.NewDefaultImageStore() + mounter, err := filesystem.NewClusterImageMounter(imageEngine) if err != nil { return nil, err } + cluster := Clusterfile.GetCluster() if cluster.Name == "" { return nil, fmt.Errorf("cluster name cannot be empty, make sure %s file is correct", path) @@ -85,9 +85,8 @@ func NewApplierFromFile(path string) (driver.Interface, error) { return &driver.Applier{ ClusterDesired: &cluster, ClusterFile: Clusterfile, - ImageManager: imgSvc, + ImageEngine: imageEngine, ClusterImageMounter: mounter, - ImageStore: is, }, nil } @@ -102,25 +101,19 @@ func NewDefaultApplier(cluster *v2.Cluster) (driver.Interface, error) { if cluster.Name == "" { return nil, fmt.Errorf("cluster name cannot be empty") } - imgSvc, err := image.NewImageService() - if err != nil { - return nil, err - } - - mounter, err := filesystem.NewClusterImageMounter() + imageEngine, err := imageengine.NewImageEngine(common2.EngineGlobalConfigurations{}) if err != nil { return nil, err } - is, err := store.NewDefaultImageStore() + mounter, err := filesystem.NewClusterImageMounter(imageEngine) if err != nil { return nil, err } return &driver.Applier{ ClusterDesired: cluster, - ImageManager: imgSvc, + ImageEngine: imageEngine, ClusterImageMounter: mounter, - ImageStore: is, }, nil } diff --git a/apply/driver/local.go b/apply/driver/local.go index fd887124927..a41797f7c31 100644 --- a/apply/driver/local.go +++ b/apply/driver/local.go @@ -18,16 +18,16 @@ import ( "fmt" "net" + "github.com/sealerio/sealer/pkg/auth" + "github.com/sealerio/sealer/pkg/imageengine" + "github.com/sealerio/sealer/apply/processor" "github.com/sealerio/sealer/common" "github.com/sealerio/sealer/pkg/client/k8s" "github.com/sealerio/sealer/pkg/clusterfile" "github.com/sealerio/sealer/pkg/filesystem/clusterimage" - "github.com/sealerio/sealer/pkg/image" - "github.com/sealerio/sealer/pkg/image/store" "github.com/sealerio/sealer/pkg/runtime" "github.com/sealerio/sealer/pkg/runtime/kubernetes" - v1 "github.com/sealerio/sealer/types/api/v1" v2 "github.com/sealerio/sealer/types/api/v2" "github.com/sealerio/sealer/utils" osi "github.com/sealerio/sealer/utils/os" @@ -36,6 +36,7 @@ import ( "github.com/sealerio/sealer/utils/strings" "github.com/pkg/errors" + imagecommon "github.com/sealerio/sealer/pkg/imageengine/common" "github.com/sirupsen/logrus" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/version" @@ -46,10 +47,9 @@ type Applier struct { ClusterDesired *v2.Cluster ClusterCurrent *v2.Cluster ClusterFile clusterfile.Interface - ImageManager image.Service + ImageEngine imageengine.Interface ClusterImageMounter clusterimage.Interface Client *k8s.Client - ImageStore store.ImageStore CurrentClusterInfo *version.Info } @@ -99,15 +99,33 @@ func (c *Applier) mountClusterImage() error { if err != nil { return err } - plats := []*v1.Platform{platform.GetDefaultPlatform()} + + platVisit := map[string]struct{}{} + platVisit[platform.GetDefaultPlatform().ToString()] = struct{}{} for _, v := range platsMap { - plat := v - plats = append(plats, &plat) + platVisit[v.ToString()] = struct{}{} } - err = c.ImageManager.PullIfNotExist(imageName, plats) - if err != nil { - return err + + plats := []string{} + for k := range platVisit { + plats = append(plats, k) + } + // TODO optimize image engine caller. + for _, plat := range plats { + err = c.ImageEngine.Pull(&imagecommon.PullOptions{ + Authfile: auth.GetDefaultAuthFilePath(), + Quiet: false, + TLSVerify: true, + PullPolicy: "missing", + Image: imageName, + Platform: plat, + }) + + if err != nil { + return err + } } + err = c.ClusterImageMounter.MountImage(c.ClusterDesired) if err != nil { return err @@ -143,13 +161,14 @@ func (c *Applier) reconcileCluster() error { logrus.Warnf("failed to umount image(%s): %v", c.ClusterDesired.ClusterName, err) } }() - - baseImage, err := c.ImageStore.GetByName(c.ClusterDesired.Spec.Image, platform.GetDefaultPlatform()) + image := c.ClusterDesired.Spec.Image + // TODO + imageExtension, err := c.ImageEngine.GetSealerImageExtension(&imagecommon.GetImageAnnoOptions{ImageNameOrID: image}) if err != nil { - return fmt.Errorf("failed to get base image(%s): %v", baseImage.Name, err) + return err } - // if no rootfs ,try to install applications. - if baseImage.Spec.ImageConfig.ImageType == common.AppImage { + + if imageExtension.ImageType == common.AppImage { return c.installApp() } diff --git a/apply/processor/create.go b/apply/processor/create.go index 9bd4b820484..354f8c51359 100644 --- a/apply/processor/create.go +++ b/apply/processor/create.go @@ -17,6 +17,9 @@ package processor import ( "fmt" + "github.com/sealerio/sealer/pkg/auth" + "github.com/sealerio/sealer/pkg/imageengine" + imagecommon "github.com/sealerio/sealer/pkg/imageengine/common" "github.com/sealerio/sealer/pkg/registry" "github.com/sealerio/sealer/pkg/clusterfile" @@ -24,11 +27,9 @@ import ( "github.com/sealerio/sealer/pkg/filesystem" "github.com/sealerio/sealer/pkg/filesystem/clusterimage" "github.com/sealerio/sealer/pkg/guest" - "github.com/sealerio/sealer/pkg/image" "github.com/sealerio/sealer/pkg/plugin" "github.com/sealerio/sealer/pkg/runtime" "github.com/sealerio/sealer/pkg/runtime/kubernetes" - v1 "github.com/sealerio/sealer/types/api/v1" v2 "github.com/sealerio/sealer/types/api/v2" "github.com/sealerio/sealer/utils/net" "github.com/sealerio/sealer/utils/platform" @@ -37,12 +38,12 @@ import ( type CreateProcessor struct { ClusterFile clusterfile.Interface - ImageManager image.Service cloudImageMounter clusterimage.Interface Runtime runtime.Interface Guest guest.Interface Config config.Interface Plugins plugin.Plugins + ImageEngine imageengine.Interface } func (c *CreateProcessor) GetPipeLine() ([]func(cluster *v2.Cluster) error, error) { @@ -82,14 +83,30 @@ func (c *CreateProcessor) MountImage(cluster *v2.Cluster) error { if err != nil { return err } - plats := []*v1.Platform{platform.GetDefaultPlatform()} + + platVisit := map[string]struct{}{} + platVisit[platform.GetDefaultPlatform().ToString()] = struct{}{} for _, v := range platsMap { - plat := v - plats = append(plats, &plat) + platVisit[v.ToString()] = struct{}{} } - if err = c.ImageManager.PullIfNotExist(cluster.Spec.Image, plats); err != nil { - return err + + plats := []string{} + for k := range platVisit { + plats = append(plats, k) } + for _, plat := range plats { + if err = c.ImageEngine.Pull(&imagecommon.PullOptions{ + Authfile: auth.GetDefaultAuthFilePath(), + Quiet: false, + TLSVerify: true, + PullPolicy: "missing", + Image: cluster.Spec.Image, + Platform: plat, + }); err != nil { + return err + } + } + if err = c.cloudImageMounter.MountImage(cluster); err != nil { return err } @@ -153,12 +170,12 @@ func (c *CreateProcessor) GetPhasePluginFunc(phase plugin.Phase) func(cluster *v } func NewCreateProcessor(clusterFile clusterfile.Interface) (Processor, error) { - imgSvc, err := image.NewImageService() + imageEngine, err := imageengine.NewImageEngine(imagecommon.EngineGlobalConfigurations{}) if err != nil { return nil, err } - mounter, err := filesystem.NewClusterImageMounter() + mounter, err := filesystem.NewClusterImageMounter(imageEngine) if err != nil { return nil, err } @@ -170,7 +187,7 @@ func NewCreateProcessor(clusterFile clusterfile.Interface) (Processor, error) { return &CreateProcessor{ ClusterFile: clusterFile, - ImageManager: imgSvc, + ImageEngine: imageEngine, cloudImageMounter: mounter, Guest: gs, }, nil diff --git a/apply/processor/delete.go b/apply/processor/delete.go index e7095b3b8ca..aa27e11aa29 100644 --- a/apply/processor/delete.go +++ b/apply/processor/delete.go @@ -16,6 +16,8 @@ package processor import ( "fmt" + "github.com/sealerio/sealer/pkg/imageengine" + common2 "github.com/sealerio/sealer/pkg/imageengine/common" "github.com/sealerio/sealer/pkg/registry" @@ -93,7 +95,12 @@ func (d *DeleteProcessor) CleanFS(cluster *v2.Cluster) error { } func NewDeleteProcessor(clusterFile clusterfile.Interface) (Processor, error) { - mounter, err := filesystem.NewClusterImageMounter() + imageEngine, err := imageengine.NewImageEngine(common2.EngineGlobalConfigurations{}) + if err != nil { + return nil, err + } + + mounter, err := filesystem.NewClusterImageMounter(imageEngine) if err != nil { return nil, err } diff --git a/apply/processor/gen.go b/apply/processor/gen.go index 4499a874d0c..491266df195 100644 --- a/apply/processor/gen.go +++ b/apply/processor/gen.go @@ -19,6 +19,10 @@ package processor import ( "fmt" "net" + + "github.com/sealerio/sealer/pkg/imageengine" + common2 "github.com/sealerio/sealer/pkg/imageengine/common" + "strconv" "github.com/sealerio/sealer/pkg/registry" @@ -55,20 +59,23 @@ type ParserArg struct { type GenerateProcessor struct { Runtime *kubernetes.Runtime ImageManager image.Service + ImageEngine imageengine.Interface ImageMounter clusterimage.Interface } func NewGenerateProcessor() (Processor, error) { - imageMounter, err := filesystem.NewClusterImageMounter() + imageEngine, err := imageengine.NewImageEngine(common2.EngineGlobalConfigurations{}) if err != nil { return nil, err } - imgSvc, err := image.NewImageService() + + imageMounter, err := filesystem.NewClusterImageMounter(imageEngine) if err != nil { return nil, err } + return &GenerateProcessor{ - ImageManager: imgSvc, + ImageEngine: imageEngine, ImageMounter: imageMounter, }, nil } diff --git a/build/buildinstruction/utils.go b/build/buildinstruction/utils.go index a86e8ed56c0..f624b55f8ff 100644 --- a/build/buildinstruction/utils.go +++ b/build/buildinstruction/utils.go @@ -15,23 +15,16 @@ package buildinstruction import ( - "context" - "fmt" - "os" "path/filepath" "strings" "github.com/opencontainers/go-digest" - "github.com/sirupsen/logrus" - fsutil "github.com/tonistiigi/fsutil/copy" - "github.com/sealerio/sealer/common" "github.com/sealerio/sealer/pkg/image" "github.com/sealerio/sealer/pkg/image/cache" v1 "github.com/sealerio/sealer/types/api/v1" - "github.com/sealerio/sealer/utils/archive" "github.com/sealerio/sealer/utils/collector" - "github.com/sealerio/sealer/utils/os/fs" + "github.com/sirupsen/logrus" ) func tryCache(parentID cache.ChainID, @@ -57,54 +50,47 @@ func tryCache(parentID cache.ChainID, } func GenerateSourceFilesDigest(root, src string) (digest.Digest, error) { - m, err := fsutil.ResolveWildcards(root, src, true) - if err != nil { - return "", err - } - - // wrong wildcards: no such file or directory - if len(m) == 0 { - return "", fmt.Errorf("%s not found", src) - } - - if len(m) == 1 { - return generateDigest(filepath.Join(root, src)) - } - - tmp, err := fs.NewFilesystem().MkTmpdir() - if err != nil { - return "", fmt.Errorf("failed to create tmp dir %s:%v", tmp, err) - } - - defer func() { - if err = os.RemoveAll(tmp); err != nil { - logrus.Warn(err) - } - }() - - xattrErrorHandler := func(dst, src, key string, err error) error { - logrus.Warn(err) - return nil - } - opt := []fsutil.Opt{ - fsutil.WithXAttrErrorHandler(xattrErrorHandler), - } - - for _, s := range m { - if err := fsutil.Copy(context.TODO(), root, s, tmp, filepath.Base(s), opt...); err != nil { - return "", err - } - } - - return generateDigest(tmp) -} - -func generateDigest(path string) (digest.Digest, error) { - layerDgst, _, err := archive.TarCanonicalDigest(path) - if err != nil { - return "", err - } - return layerDgst, nil + return "", nil + //m, err := fsutil.ResolveWildcards(root, src, true) + //if err != nil { + // return "", err + //} + // + //// wrong wildcards: no such file or directory + //if len(m) == 0 { + // return "", fmt.Errorf("%s not found", src) + //} + // + //if len(m) == 1 { + // return generateDigest(filepath.Join(root, src)) + //} + // + //tmp, err := fs.NewFilesystem().MkTmpdir() + //if err != nil { + // return "", fmt.Errorf("failed to create tmp dir %s:%v", tmp, err) + //} + // + //defer func() { + // if err = os.RemoveAll(tmp); err != nil { + // logrus.Warn(err) + // } + //}() + // + //xattrErrorHandler := func(dst, src, key string, err error) error { + // logrus.Warn(err) + // return nil + //} + //opt := []fsutil.Opt{ + // fsutil.WithXAttrErrorHandler(xattrErrorHandler), + //} + // + //for _, s := range m { + // if err := fsutil.Copy(context.TODO(), root, s, tmp, filepath.Base(s), opt...); err != nil { + // return "", err + // } + //} + // + //return generateDigest(tmp) } // GetBaseLayersPath used in build stage, where the image still has from layer diff --git a/cmd/sealer/cmd/build.go b/cmd/sealer/cmd/build.go index 2c2046464e5..85fb917d2df 100644 --- a/cmd/sealer/cmd/build.go +++ b/cmd/sealer/cmd/build.go @@ -15,14 +15,22 @@ package cmd import ( + "fmt" "os" + "path/filepath" + "github.com/containers/buildah/pkg/cli" + "github.com/containers/buildah/pkg/parse" + "github.com/pkg/errors" + "github.com/sealerio/sealer/build/buildimage" + pkgauth "github.com/sealerio/sealer/pkg/auth" + "github.com/sealerio/sealer/pkg/imageengine" + bc "github.com/sealerio/sealer/pkg/imageengine/common" + v1 "github.com/sealerio/sealer/types/api/v1" + "github.com/sealerio/sealer/version" "github.com/sirupsen/logrus" "github.com/spf13/cobra" - - "github.com/sealerio/sealer/build" - "github.com/sealerio/sealer/utils/platform" - "github.com/sealerio/sealer/utils/strings" + "k8s.io/apimachinery/pkg/util/json" ) type BuildFlag struct { @@ -35,16 +43,16 @@ type BuildFlag struct { Base bool } -var buildConfig *BuildFlag +var buildFlags = bc.BuildOptions{} // buildCmd represents the build command var buildCmd = &cobra.Command{ Use: "build [flags] PATH", Short: "build a ClusterImage from a Kubefile", - Long: `build command is used to build a ClusterImage from specified Kubefile. + Long: `build command is used to generate a ClusterImage from specified Kubefile. It organizes the specified Kubefile and input building context, and builds a brand new ClusterImage.`, - Args: cobra.ExactArgs(1), + Args: cobra.MaximumNArgs(1), Example: `the current path is the context path, default build type is lite and use build cache build: @@ -60,48 +68,111 @@ build with args: sealer build -f Kubefile -t my-kubernetes:1.19.8 --build-arg MY_ARG=abc,PASSWORD=Sealer123 . `, RunE: func(cmd *cobra.Command, args []string) error { - buildContext := args[0] + return buildSealerImage() + }, +} - targetPlatforms, err := platform.GetPlatform(buildConfig.Platform) - if err != nil { - return err +func buildSealerImage() error { + // TODO clean the logic here + _os, arch, variant, err := parse.Platform(buildFlags.Platform) + if err != nil { + return err + } + + engine, err := imageengine.NewImageEngine(bc.EngineGlobalConfigurations{}) + if err != nil { + return errors.Wrap(err, "failed to initiate a builder") + } + + extension := v1.ImageExtension{} + extensionBytes, err := json.Marshal(extension) + if err != nil { + return err + } + + buildFlags.Annotations = append(buildFlags.Annotations, fmt.Sprintf("%s=%s", v1.SealerImageExtension, string(extensionBytes))) + iid, err := engine.Build(&buildFlags) + if err != nil { + return errors.Errorf("error in building image, %v", err) + } + + defer func() { + if err := engine.RemoveImage(&bc.RemoveImageOptions{ + ImageNamesOrIDs: []string{iid}, + Force: true, + }); err != nil { + logrus.Warnf("failed to remove image %s, you need to remove it manually: %v", iid, err) } - for _, tp := range targetPlatforms { - p := tp - conf := &build.Config{ - BuildType: buildConfig.BuildType, - NoCache: buildConfig.NoCache, - ImageName: buildConfig.ImageName, - NoBase: !buildConfig.Base, - BuildArgs: strings.ConvertToMap(buildConfig.BuildArgs), - Platform: *p, - } - builder, err := build.NewBuilder(conf) - if err != nil { - return err - } - err = builder.Build(buildConfig.ImageName, buildContext, buildConfig.KubefileName) - if err != nil { - return err - } + }() + + tmpDir, err := os.MkdirTemp("", "sealer") + if err != nil { + return err + } + + tmpDirForLink := filepath.Join(tmpDir, "tmp-rootfs") + cid, err := engine.BuildRootfs(&bc.BuildRootfsOptions{ + ImageNameOrID: iid, + DestDir: tmpDirForLink, + }) + if err != nil { + return err + } + + defer func() { + err = os.RemoveAll(tmpDir) + if err != nil { + logrus.Warnf("failed to rm link dir to rootfs: %v : %v", tmpDir, err) } - return nil - }, + }() + + differ := buildimage.NewRegistryDiffer(v1.Platform{ + Architecture: arch, + OS: _os, + Variant: variant, + }) + + // TODO optimize the differ. + err = differ.Process(tmpDirForLink, tmpDirForLink) + if err != nil { + return err + } + + err = engine.Commit(&bc.CommitOptions{ + Format: cli.DefaultFormat(), + Rm: true, + ContainerID: cid, + Image: buildFlags.Tags[0], + }) + if err != nil { + return err + } + + return nil } func init() { - buildConfig = &BuildFlag{} - rootCmd.AddCommand(buildCmd) - buildCmd.Flags().StringVarP(&buildConfig.BuildType, "mode", "m", "lite", "ClusterImage build type, default is lite") - buildCmd.Flags().StringVarP(&buildConfig.KubefileName, "kubefile", "f", "Kubefile", "Kubefile filepath") - buildCmd.Flags().StringVarP(&buildConfig.ImageName, "imageName", "t", "", "the name of ClusterImage") - buildCmd.Flags().BoolVar(&buildConfig.NoCache, "no-cache", false, "build without cache") - buildCmd.Flags().BoolVar(&buildConfig.Base, "base", true, "build with base image, default value is true.") - buildCmd.Flags().StringSliceVar(&buildConfig.BuildArgs, "build-arg", []string{}, "set custom build args") - buildCmd.Flags().StringVar(&buildConfig.Platform, "platform", "", "set ClusterImage platform. If not set, keep same platform with runtime") - - if err := buildCmd.MarkFlagRequired("imageName"); err != nil { - logrus.Errorf("failed to init flag: %v", err) - os.Exit(1) + buildCmd.Flags().StringVarP(&buildFlags.BuildType, "mode", "m", "lite", "ClusterImage build type, default is lite") + buildCmd.Flags().StringVarP(&buildFlags.Kubefile, "file", "f", "Kubefile", "Kubefile filepath") + buildCmd.Flags().StringVar(&buildFlags.Platform, "platform", parse.DefaultPlatform(), "set the target platform, like linux/amd64 or linux/amd64/v7") + buildCmd.Flags().StringVar(&buildFlags.PullPolicy, "pull", "", "pull policy. Allow for --pull, --pull=true, --pull=false, --pull=never, --pull=always") + buildCmd.Flags().StringVar(&buildFlags.Authfile, "authfile", pkgauth.GetDefaultAuthFilePath(), "path of the authentication file.") + buildCmd.Flags().BoolVar(&buildFlags.NoCache, "no-cache", false, "do not use existing cached images for building. Build from the start with a new set of cached layers.") + buildCmd.Flags().BoolVar(&buildFlags.Base, "base", true, "build with base image, default value is true.") + buildCmd.Flags().StringSliceVarP(&buildFlags.Tags, "tag", "t", []string{}, "specify a name for ClusterImage") + buildCmd.Flags().StringSliceVar(&buildFlags.BuildArgs, "build-arg", []string{}, "set custom build args") + buildCmd.Flags().StringSliceVar(&buildFlags.Annotations, "annotation", []string{}, "add annotations for image. Format like --annotation key=[value]") + buildCmd.Flags().StringSliceVar(&buildFlags.Labels, "label", []string{getSealerLabel()}, "add labels for image. Format like --label key=[value]") + + requiredFlags := []string{"tag"} + for _, flag := range requiredFlags { + if err := buildCmd.MarkFlagRequired(flag); err != nil { + logrus.Fatal(err) + } } + rootCmd.AddCommand(buildCmd) +} + +func getSealerLabel() string { + return "io.sealer.version=" + version.Get().GitVersion } diff --git a/cmd/sealer/cmd/images.go b/cmd/sealer/cmd/images.go index 9d11e088f93..0a7ef171ea3 100644 --- a/cmd/sealer/cmd/images.go +++ b/cmd/sealer/cmd/images.go @@ -15,27 +15,12 @@ package cmd import ( - "fmt" - "sort" - "strings" - - "github.com/olekukonko/tablewriter" + "github.com/sealerio/sealer/pkg/imageengine" + "github.com/sealerio/sealer/pkg/imageengine/common" "github.com/spf13/cobra" - - "github.com/sealerio/sealer/common" - "github.com/sealerio/sealer/pkg/image" - "github.com/sealerio/sealer/pkg/image/reference" ) -const ( - imageID = "IMAGE ID" - imageName = "IMAGE NAME" - imageCreate = "CREATE" - imageSize = "SIZE" - imageArch = "ARCH" - imageVariant = "VARIANT" - timeDefaultFormat = "2006-01-02 15:04:05" -) +var imagesOpts *common.ImagesOptions var listCmd = &cobra.Command{ Use: "images", @@ -45,84 +30,24 @@ var listCmd = &cobra.Command{ Args: cobra.NoArgs, Example: `sealer images`, RunE: func(cmd *cobra.Command, args []string) error { - - ims, err := image.NewImageMetadataService() + engine, err := imageengine.NewImageEngine(common.EngineGlobalConfigurations{}) if err != nil { return err } - - imageMetadataMap, err := ims.List() - if err != nil { - return err - } - - var summaries = make(ManifestList, 0, len(imageMetadataMap)) - - for name, manifestList := range imageMetadataMap { - for _, m := range manifestList.Manifests { - displayName := name - create := m.CREATED.Format(timeDefaultFormat) - size := formatSize(m.SIZE) - named, err := reference.ParseToNamed(name) - if err != nil { - return err - } - - if reference.IsDefaultDomain(named.Domain()) { - displayName = named.RepoTag() - splits := strings.Split(displayName, "/") - if reference.IsDefaultRepo(splits[0]) { - displayName = splits[1] - } - } - - summaries = append(summaries, ManifestDescriptor{ - imageName: displayName, - imageID: m.ID, - imageArch: m.Platform.Architecture, - imageVariant: m.Platform.Variant, - imageCreate: create, - imageSize: size}) - } - } - - sort.Sort(sort.Reverse(summaries)) - - table := tablewriter.NewWriter(common.StdOut) - table.SetHeader([]string{imageName, imageID, imageArch, imageVariant, imageCreate, imageSize}) - - for _, md := range summaries { - table.Append([]string{md.imageName, md.imageID, md.imageArch, md.imageVariant, md.imageCreate, md.imageSize}) - } - - table.Render() - return nil + return engine.Images(imagesOpts) }, } -type ManifestDescriptor struct { - imageName, imageID, imageArch, imageVariant, imageCreate, imageSize string -} - -type ManifestList []ManifestDescriptor - -func (r ManifestList) Len() int { return len(r) } -func (r ManifestList) Swap(i, j int) { r[i], r[j] = r[j], r[i] } -func (r ManifestList) Less(i, j int) bool { return r[i].imageCreate < r[j].imageCreate } - func init() { - rootCmd.AddCommand(listCmd) -} + imagesOpts = &common.ImagesOptions{} + flags := listCmd.Flags() + flags.BoolVarP(&imagesOpts.All, "all", "a", false, "show all images, including intermediate images from a build") + flags.BoolVar(&imagesOpts.Digests, "digests", false, "show digests") + flags.BoolVar(&imagesOpts.JSON, "json", false, "output in JSON format") + flags.BoolVarP(&imagesOpts.NoHeading, "noheading", "n", false, "do not print column headings") + flags.BoolVar(&imagesOpts.NoTrunc, "no-trunc", false, "do not truncate output") + flags.BoolVarP(&imagesOpts.Quiet, "quiet", "q", false, "display only image IDs") + flags.BoolVarP(&imagesOpts.History, "history", "", false, "display the image name history") -func formatSize(size int64) (Size string) { - if size < 1024 { - Size = fmt.Sprintf("%.2fB", float64(size)/float64(1)) - } else if size < (1024 * 1024) { - Size = fmt.Sprintf("%.2fKiB", float64(size)/float64(1024)) - } else if size < (1024 * 1024 * 1024) { - Size = fmt.Sprintf("%.2fMiB", float64(size)/float64(1024*1024)) - } else { - Size = fmt.Sprintf("%.2fGiB", float64(size)/float64(1024*1024*1024)) - } - return + rootCmd.AddCommand(listCmd) } diff --git a/cmd/sealer/cmd/inspect.go b/cmd/sealer/cmd/inspect.go index 8eedc7b98bd..873fa40215e 100644 --- a/cmd/sealer/cmd/inspect.go +++ b/cmd/sealer/cmd/inspect.go @@ -15,42 +15,41 @@ package cmd import ( - "fmt" + "github.com/sealerio/sealer/pkg/imageengine" + "github.com/sealerio/sealer/pkg/imageengine/common" "github.com/spf13/cobra" - - "github.com/sealerio/sealer/pkg/image" - v1 "github.com/sealerio/sealer/types/api/v1" - "github.com/sealerio/sealer/utils/platform" ) -var inspectPlatformFlag string +var inspectOpts *common.InspectOptions // inspectCmd represents the inspect command var inspectCmd = &cobra.Command{ Use: "inspect", Short: "print the image information or Clusterfile", - Long: `sealer inspect ${image id} to print image information`, - Args: cobra.ExactArgs(1), + Example: `sealer inspect {imageName or imageID} +sealer inspect --format '{{.OCIv1.Config.Env}}' {imageName or imageID} +`, + Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { - var targetPlatforms []*v1.Platform - if inspectPlatformFlag != "" { - tp, err := platform.ParsePlatforms(inspectPlatformFlag) - if err != nil { - return err - } - targetPlatforms = tp + engine, err := imageengine.NewImageEngine(common.EngineGlobalConfigurations{}) + if err != nil { + return err } - file, err := image.GetImageDetails(args[0], targetPlatforms) + + inspectOpts.ImageNameOrID = args[0] + err = engine.Inspect(inspectOpts) if err != nil { - return fmt.Errorf("failed to find information by image %s: %v", args[0], err) + return err } - fmt.Println(file) return nil }, } func init() { + inspectOpts = &common.InspectOptions{} + flags := inspectCmd.Flags() + flags.StringVarP(&inspectOpts.Format, "format", "f", "", "use `format` as a Go template to format the output") + flags.StringVarP(&inspectOpts.InspectType, "type", "t", "image", "look at the item of the specified `type` (container or image) and name") rootCmd.AddCommand(inspectCmd) - inspectCmd.Flags().StringVar(&inspectPlatformFlag, "platform", "", "set ClusterImage platform") } diff --git a/cmd/sealer/cmd/load.go b/cmd/sealer/cmd/load.go index 7922ae0e7ee..a4e998405ac 100644 --- a/cmd/sealer/cmd/load.go +++ b/cmd/sealer/cmd/load.go @@ -15,16 +15,15 @@ package cmd import ( - "fmt" + "github.com/sealerio/sealer/pkg/imageengine" + "github.com/sealerio/sealer/pkg/imageengine/common" "os" "github.com/sirupsen/logrus" "github.com/spf13/cobra" - - "github.com/sealerio/sealer/pkg/image" ) -var imageSrc string +var loadOpts *common.LoadOptions // loadCmd represents the load command var loadCmd = &cobra.Command{ @@ -34,22 +33,22 @@ var loadCmd = &cobra.Command{ Example: `sealer load -i kubernetes.tar`, Args: cobra.NoArgs, RunE: func(cmd *cobra.Command, args []string) error { - ifs, err := image.NewImageFileService() + engine, err := imageengine.NewImageEngine(common.EngineGlobalConfigurations{}) if err != nil { return err } - if err = ifs.Load(imageSrc); err != nil { - return fmt.Errorf("failed to load image from %s: %v", imageSrc, err) - } - return nil + return engine.Load(loadOpts) }, } func init() { - rootCmd.AddCommand(loadCmd) - loadCmd.Flags().StringVarP(&imageSrc, "input", "i", "", "read image from tar archive file") + loadOpts = &common.LoadOptions{} + flags := loadCmd.Flags() + flags.StringVarP(&loadOpts.Input, "input", "i", "", "Load image from file") + flags.BoolVarP(&loadOpts.Quiet, "quiet", "q", false, "Suppress the output") if err := loadCmd.MarkFlagRequired("input"); err != nil { logrus.Errorf("failed to init flag: %v", err) os.Exit(1) } + rootCmd.AddCommand(loadCmd) } diff --git a/cmd/sealer/cmd/login.go b/cmd/sealer/cmd/login.go index 268032034d2..32ade838aa3 100644 --- a/cmd/sealer/cmd/login.go +++ b/cmd/sealer/cmd/login.go @@ -17,7 +17,9 @@ package cmd import ( "os" - "github.com/sealerio/sealer/pkg/image" + "github.com/sealerio/sealer/pkg/auth" + "github.com/sealerio/sealer/pkg/imageengine" + "github.com/sealerio/sealer/pkg/imageengine/common" "github.com/sirupsen/logrus" "github.com/spf13/cobra" @@ -29,7 +31,7 @@ type LoginFlag struct { RegistryPasswd string } -var loginConfig *LoginFlag +var loginConfig *common.LoginOptions var loginCmd = &cobra.Command{ Use: "login", @@ -39,20 +41,24 @@ var loginCmd = &cobra.Command{ Example: `sealer login registry.cn-qingdao.aliyuncs.com -u [username] -p [password]`, Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { - imgSvc, err := image.NewImageService() + adaptor, err := imageengine.NewImageEngine(common.EngineGlobalConfigurations{}) if err != nil { return err } + loginConfig.Domain = args[0] - return imgSvc.Login(args[0], loginConfig.RegistryUsername, loginConfig.RegistryPasswd) + return adaptor.Login(loginConfig) }, } func init() { - loginConfig = &LoginFlag{} + loginConfig = &common.LoginOptions{} rootCmd.AddCommand(loginCmd) - loginCmd.Flags().StringVarP(&loginConfig.RegistryUsername, "username", "u", "", "user name for login registry") - loginCmd.Flags().StringVarP(&loginConfig.RegistryPasswd, "passwd", "p", "", "password for login registry") + loginCmd.Flags().StringVarP(&loginConfig.Username, "username", "u", "", "user name for login registry") + loginCmd.Flags().StringVarP(&loginConfig.Password, "passwd", "p", "", "password for login registry") + loginCmd.Flags().StringVar(&loginConfig.AuthFile, "authfile", auth.GetDefaultAuthFilePath(), "path to store auth file after login. It will be $HOME/.sealer/config.json by default.") + loginCmd.Flags().BoolVar(&loginConfig.TLSVerify, "tls-verify", true, "require HTTPS and verify certificates when accessing the registry. TLS verification cannot be used when talking to an insecure registry.") + if err := loginCmd.MarkFlagRequired("username"); err != nil { logrus.Errorf("failed to init flag: %v", err) os.Exit(1) diff --git a/cmd/sealer/cmd/pull.go b/cmd/sealer/cmd/pull.go index 083f90f40d4..693ebb6334e 100644 --- a/cmd/sealer/cmd/pull.go +++ b/cmd/sealer/cmd/pull.go @@ -15,14 +15,14 @@ package cmd import ( - "github.com/sirupsen/logrus" + "github.com/containers/buildah/pkg/parse" + pkgauth "github.com/sealerio/sealer/pkg/auth" + "github.com/sealerio/sealer/pkg/imageengine" + "github.com/sealerio/sealer/pkg/imageengine/common" "github.com/spf13/cobra" - - "github.com/sealerio/sealer/pkg/image" - "github.com/sealerio/sealer/utils/platform" ) -var platformFlag string +var pullOpts *common.PullOptions // pullCmd represents the pull command var pullCmd = &cobra.Command{ @@ -33,24 +33,23 @@ var pullCmd = &cobra.Command{ Example: `sealer pull registry.cn-qingdao.aliyuncs.com/sealer-io/kubernetes:v1.19.8`, Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { - imgSvc, err := image.NewImageService() - if err != nil { - return err - } - - plat, err := platform.GetPlatform(platformFlag) + engine, err := imageengine.NewImageEngine(common.EngineGlobalConfigurations{}) if err != nil { return err } - if err := imgSvc.Pull(args[0], plat); err != nil { - return err - } - logrus.Infof("succeed in pulling ClusterImage(%s)", args[0]) - return nil + pullOpts.Image = args[0] + return engine.Pull(pullOpts) }, } func init() { + pullOpts = &common.PullOptions{} + + pullCmd.Flags().StringVar(&pullOpts.Platform, "platform", parse.DefaultPlatform(), "prefer OS/ARCH instead of the current operating system and architecture for choosing images") + pullCmd.Flags().StringVar(&pullOpts.Authfile, "authfile", pkgauth.GetDefaultAuthFilePath(), "path of the authentication file. Use REGISTRY_AUTH_FILE environment variable to override") + pullCmd.Flags().BoolVar(&pullOpts.TLSVerify, "tls-verify", true, "require HTTPS and verify certificates when accessing the registry. TLS verification cannot be used when talking to an insecure registry.") + pullCmd.Flags().StringVar(&pullOpts.PullPolicy, "policy", "missing", "missing, always, or never.") + pullCmd.Flags().BoolVarP(&pullOpts.Quiet, "quiet", "q", false, "don't output progress information when pulling images") + rootCmd.AddCommand(pullCmd) - pullCmd.Flags().StringVar(&platformFlag, "platform", "", "set ClusterImage platform") } diff --git a/cmd/sealer/cmd/push.go b/cmd/sealer/cmd/push.go index 41f725bf0bf..a7be759cf01 100644 --- a/cmd/sealer/cmd/push.go +++ b/cmd/sealer/cmd/push.go @@ -15,12 +15,16 @@ package cmd import ( + "github.com/sealerio/sealer/pkg/auth" + "github.com/sealerio/sealer/pkg/imageengine" + "github.com/sealerio/sealer/pkg/imageengine/common" "github.com/spf13/cobra" - "github.com/sealerio/sealer/pkg/image" "github.com/sealerio/sealer/pkg/image/utils" ) +var pushOpts *common.PushOptions + // pushCmd represents the push command var pushCmd = &cobra.Command{ Use: "push", @@ -30,17 +34,22 @@ var pushCmd = &cobra.Command{ Example: `sealer push registry.cn-qingdao.aliyuncs.com/sealer-io/my-kubernetes-cluster-with-dashboard:latest`, Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { - imgsvc, err := image.NewImageService() + adaptor, err := imageengine.NewImageEngine(common.EngineGlobalConfigurations{}) if err != nil { return err } - - return imgsvc.Push(args[0]) - + pushOpts.Image = args[0] + return adaptor.Push(pushOpts) }, ValidArgsFunction: utils.ImageListFuncForCompletion, } func init() { + pushOpts = &common.PushOptions{} + + pushCmd.Flags().StringVar(&pushOpts.Authfile, "authfile", auth.GetDefaultAuthFilePath(), "path to store auth file after login. Accessing registry with this auth.") + // tls-verify is not working currently + pushCmd.Flags().BoolVar(&pushOpts.TLSVerify, "tls-verify", true, "require HTTPS and verify certificates when accessing the registry. TLS verification cannot be used when talking to an insecure registry. (not work currently)") + pushCmd.Flags().BoolVarP(&pushOpts.Quiet, "quiet", "q", false, "don't output progress information when pushing images") rootCmd.AddCommand(pushCmd) } diff --git a/cmd/sealer/cmd/rmi.go b/cmd/sealer/cmd/rmi.go index 688e948705b..27cfdbd8a93 100644 --- a/cmd/sealer/cmd/rmi.go +++ b/cmd/sealer/cmd/rmi.go @@ -15,22 +15,15 @@ package cmd import ( - "errors" - "strings" + "github.com/sealerio/sealer/pkg/imageengine" + "github.com/sealerio/sealer/pkg/imageengine/common" "github.com/spf13/cobra" - "github.com/sealerio/sealer/pkg/image" "github.com/sealerio/sealer/pkg/image/utils" - v1 "github.com/sealerio/sealer/types/api/v1" - "github.com/sealerio/sealer/utils/platform" ) -type removeImageFlag struct { - platform string -} - -var opts removeImageFlag +var removeOpts *common.RemoveImageOptions // rmiCmd represents the rmi command var rmiCmd = &cobra.Command{ @@ -47,36 +40,20 @@ var rmiCmd = &cobra.Command{ } func runRemove(images []string) error { - imageService, err := image.NewImageService() + removeOpts.ImageNamesOrIDs = images + engine, err := imageengine.NewImageEngine(common.EngineGlobalConfigurations{}) if err != nil { return err } - var errs []string - var targetPlatforms []*v1.Platform - - if opts.platform != "" { - tp, err := platform.ParsePlatforms(opts.platform) - if err != nil { - return err - } - targetPlatforms = tp - } - - for _, img := range images { - if err := imageService.Delete(img, targetPlatforms); err != nil { - errs = append(errs, err.Error()) - } - } - if len(errs) > 0 { - msg := strings.Join(errs, "\n") - return errors.New(msg) - } - return nil + return engine.RemoveImage(removeOpts) } func init() { - opts = removeImageFlag{} + removeOpts = &common.RemoveImageOptions{} + flags := rmiCmd.Flags() + flags.BoolVarP(&removeOpts.All, "all", "a", false, "remove all images") + flags.BoolVarP(&removeOpts.Prune, "prune", "p", false, "prune dangling images") + flags.BoolVarP(&removeOpts.Force, "force", "f", false, "force removal of the image and any containers using the image") rootCmd.AddCommand(rmiCmd) - rmiCmd.Flags().StringVar(&opts.platform, "platform", "", "set ClusterImage platform") } diff --git a/cmd/sealer/cmd/save.go b/cmd/sealer/cmd/save.go index 844e0173e79..0f7baa5f458 100644 --- a/cmd/sealer/cmd/save.go +++ b/cmd/sealer/cmd/save.go @@ -15,25 +15,15 @@ package cmd import ( - "fmt" - "os" - "path/filepath" - + "github.com/sealerio/sealer/pkg/imageengine" + "github.com/sealerio/sealer/pkg/imageengine/buildah" + "github.com/sealerio/sealer/pkg/imageengine/common" "github.com/sirupsen/logrus" "github.com/spf13/cobra" - - "github.com/sealerio/sealer/pkg/image" - "github.com/sealerio/sealer/pkg/image/reference" - v1 "github.com/sealerio/sealer/types/api/v1" - "github.com/sealerio/sealer/utils/platform" + "os" ) -type saveFlag struct { - ImageTar string - Platform string -} - -var save saveFlag +var saveOpts *common.SaveOptions // saveCmd represents the save command var saveCmd = &cobra.Command{ @@ -46,58 +36,27 @@ save kubernetes:v1.19.8 image to kubernetes.tar file: sealer save -o kubernetes.tar kubernetes:v1.19.8`, Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { - imageTar := save.ImageTar - if imageTar == "" { - return fmt.Errorf("imagetar cannot be empty") - } - - dir, file := filepath.Split(imageTar) - if dir == "" { - dir = "." - } - if file == "" { - file = fmt.Sprintf("%s.tar", args[0]) - } - imageTar = filepath.Join(dir, file) - // only file path like "/tmp" will lose add image tar name,make sure imageTar with full file name. - if filepath.Ext(imageTar) != ".tar" { - imageTar = filepath.Join(imageTar, fmt.Sprintf("%s.tar", args[0])) - } - - ifs, err := image.NewImageFileService() - if err != nil { - return err - } - named, err := reference.ParseToNamed(args[0]) + engine, err := imageengine.NewImageEngine(common.EngineGlobalConfigurations{}) if err != nil { return err } - - var targetPlatforms []*v1.Platform - if save.Platform != "" { - tp, err := platform.ParsePlatforms(save.Platform) - if err != nil { - return err - } - targetPlatforms = tp - } - - if err = ifs.Save(named.Raw(), imageTar, targetPlatforms); err != nil { - return fmt.Errorf("failed to save image %s: %v", args[0], err) - } - logrus.Infof("save image %s to %s successfully", args[0], imageTar) - return nil + saveOpts.ImageNameOrID = args[0] + return engine.Save(saveOpts) }, } func init() { - rootCmd.AddCommand(saveCmd) - save = saveFlag{} - saveCmd.Flags().StringVarP(&save.ImageTar, "output", "o", "", "write the image to a file") - saveCmd.Flags().StringVar(&save.Platform, "platform", "", "set ClusterImage platform") + saveOpts = &common.SaveOptions{} + flags := saveCmd.Flags() + flags.StringVar(&saveOpts.Format, "format", buildah.V2s2Archive, "Save image to oci-archive, oci-dir (directory with oci manifest type), docker-archive, docker-dir (directory with v2s2 manifest type)") + flags.StringVarP(&saveOpts.Output, "output", "o", "", "Write image to a specified file") + flags.BoolVarP(&saveOpts.Quiet, "quiet", "q", false, "Suppress the output") + flags.BoolVar(&saveOpts.Compress, "compress", false, "Compress tarball image layers when saving to a directory using the 'dir' transport. (default is same compression type as source)") if err := saveCmd.MarkFlagRequired("output"); err != nil { logrus.Errorf("failed to init flag: %v", err) os.Exit(1) } + + rootCmd.AddCommand(saveCmd) } diff --git a/cmd/sealer/cmd/search.go b/cmd/sealer/cmd/search.go index 4f30f33a374..f08d8b552b1 100644 --- a/cmd/sealer/cmd/search.go +++ b/cmd/sealer/cmd/search.go @@ -27,6 +27,10 @@ import ( "github.com/spf13/cobra" ) +const ( + imageName = "IMAGE NAME" +) + // searchCmd represents the search command var searchCmd = &cobra.Command{ Use: "search", diff --git a/cmd/sealer/main.go b/cmd/sealer/main.go index 76ca24acee6..398b544f758 100644 --- a/cmd/sealer/main.go +++ b/cmd/sealer/main.go @@ -15,11 +15,16 @@ package main import ( + "github.com/containers/buildah" "github.com/sealerio/sealer/cmd/sealer/boot" "github.com/sealerio/sealer/cmd/sealer/cmd" ) func main() { + if buildah.InitReexec() { + return + } + if err := boot.OnBoot(); err != nil { panic(err) } diff --git a/docs/design/build_implementation.md b/docs/design/build_implementation.md new file mode 100644 index 00000000000..6ff8a8c1aa2 --- /dev/null +++ b/docs/design/build_implementation.md @@ -0,0 +1,15 @@ +# implementation of sealer build + +## Abstract + +Generally, the image generated from sealer build has no differences with container images. The image is compatible with OCI. Let's call it cluster image. +Sealer has some special operations based on the usual build of container images. Like (1) Adding a layer for storing containers +images automatically; (2) Saving cluster image information to the annotations from manifest of OCI v1 images. Sealer doesn't implement +the concrete building procedure. Sealer implements build over mature tools (we choose `buildah` currently). We will have an introduction for how the +sealer build implements next. + +## Implement + +### Engine + +### Store Container Images \ No newline at end of file diff --git a/go.mod b/go.mod index f4896bd5f82..caefb36982a 100644 --- a/go.mod +++ b/go.mod @@ -6,44 +6,52 @@ require ( github.com/Masterminds/semver/v3 v3.1.1 github.com/aliyun/alibaba-cloud-sdk-go v1.61.985 github.com/cavaliergopher/grab/v3 v3.0.1 + github.com/containers/buildah v1.24.5 + github.com/containers/common v0.47.5 + github.com/containers/image/v5 v5.19.3 + github.com/containers/ocicrypt v1.1.4 + github.com/containers/storage v1.38.5 github.com/distribution/distribution/v3 v3.0.0-20211125133600-cc4627fc6e5f github.com/docker/cli v20.10.7+incompatible - github.com/docker/distribution v2.7.1+incompatible - github.com/docker/docker v20.10.7+incompatible - github.com/docker/go-connections v0.4.0 - github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 // indirect + github.com/docker/distribution v2.8.1+incompatible + github.com/docker/docker v20.10.17+incompatible + github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11 + github.com/docker/go-units v0.4.0 github.com/go-git/go-git/v5 v5.4.2 - github.com/google/uuid v1.2.0 + github.com/google/uuid v1.3.0 + github.com/hashicorp/go-multierror v1.1.1 github.com/imdario/mergo v0.3.12 github.com/lestrrat-go/file-rotatelogs v2.4.0+incompatible github.com/lestrrat-go/strftime v1.0.6 // indirect github.com/mitchellh/go-homedir v1.1.0 github.com/moby/buildkit v0.9.3 - github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 + github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 github.com/olekukonko/tablewriter v0.0.4 - github.com/onsi/ginkgo v1.16.2 - github.com/onsi/gomega v1.12.0 + github.com/onsi/ginkgo v1.16.5 + github.com/onsi/gomega v1.19.0 github.com/opencontainers/go-digest v1.0.0 - github.com/opencontainers/image-spec v1.0.2 + github.com/opencontainers/image-spec v1.0.3-0.20211202193544-a5463b7f9c84 github.com/pkg/errors v0.9.1 github.com/pkg/sftp v1.13.0 github.com/rifflock/lfshook v0.0.0-20180920164130-b9218ef580f5 github.com/sealyun/lvscare v1.1.2-alpha.2 github.com/shirou/gopsutil v3.21.11+incompatible github.com/sirupsen/logrus v1.8.1 - github.com/spf13/cobra v1.1.3 - github.com/spf13/viper v1.7.0 - github.com/tonistiigi/fsutil v0.0.0-20211208191308-f95797418e48 - github.com/vbatts/tar-split v0.11.1 - github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852 + github.com/spf13/cobra v1.4.0 + github.com/spf13/pflag v1.0.5 + github.com/spf13/viper v1.10.0 + github.com/tonistiigi/fsutil v0.0.0-20210609172227-d72af97c0eaf + github.com/vbatts/tar-split v0.11.2 + github.com/vishvananda/netlink v1.1.1-0.20210330154013-f5de75959ad5 github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74 // indirect github.com/yusufpapurcu/wmi v1.2.2 // indirect go.etcd.io/etcd/client/v3 v3.5.0 go.uber.org/zap v1.17.0 - golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 - golang.org/x/net v0.0.0-20210510120150-4163338589ed + golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4 + golang.org/x/net v0.0.0-20220225172249-27dd8689420f golang.org/x/sync v0.0.0-20210220032951-036812b2e83c - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 + golang.org/x/sys v0.0.0-20220422013727-9388b58f7150 + golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b gotest.tools v2.2.0+incompatible @@ -54,9 +62,7 @@ require ( k8s.io/client-go v0.21.0 k8s.io/kube-proxy v0.21.0 k8s.io/kubelet v0.21.0 - k8s.io/utils v0.0.0-20210111153108-fddb29f9d009 + k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b sigs.k8s.io/controller-runtime v0.8.1 sigs.k8s.io/yaml v1.2.0 ) - -replace github.com/docker/docker => github.com/docker/docker v20.10.3-0.20211208011758-87521affb077+incompatible diff --git a/go.sum b/go.sum index e6ea0b3c42b..48f60cf11c3 100644 --- a/go.sum +++ b/go.sum @@ -19,14 +19,32 @@ cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6 cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= +cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= +cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= +cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= +cloud.google.com/go v0.98.0/go.mod h1:ua6Ush4NALrHk5QXDWnjvZHN93OuF0HfuEPq9I1X0cM= +cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +cloud.google.com/go/firestore v1.6.1/go.mod h1:asNXNOzBdyVQmEU+ggO8UPodTkEVFW5Qx+rwHnAz+EY= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= @@ -35,6 +53,7 @@ cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiy cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= code.gitea.io/sdk/gitea v0.12.0/go.mod h1:z3uwDV/b9Ls47NGukYM9XhnHtqPh/J+t40lsUrR6JDY= contrib.go.opencensus.io/exporter/aws v0.0.0-20181029163544-2befc13012d0/go.mod h1:uu1P0UCM/6RbsMrgPa98ll8ZcHM858i/AD06a9aLRCA= contrib.go.opencensus.io/exporter/ocagent v0.5.0/go.mod h1:ImxhfLRpxoYiSq891pBrLVhN+qmP8BTVvdH2YLs7Gl0= @@ -44,6 +63,8 @@ contrib.go.opencensus.io/resource v0.1.1/go.mod h1:F361eGI91LCmW1I/Saf+rX0+OFcig dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= git.apache.org/thrift.git v0.12.0/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= +github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774 h1:SCbEWT58NSt7d2mcFdvxC9uyrdcTfvBbPLThhkDmXzg= +github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774/go.mod h1:6/0dYRLLXyJjbkIPeeGyoJ/eKOSI0eU6eTlCBYibgd0= github.com/AkihiroSuda/containerd-fuse-overlayfs v1.0.0/go.mod h1:0mMDvQFeLbbn1Wy8P2j3hwFhqBq+FKn8OZPno8WLmp8= github.com/Azure/azure-amqp-common-go/v2 v2.1.0/go.mod h1:R8rea+gJRuJR6QxTir/XuEd+YuKoUiazDC/N96FiDEU= github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4= @@ -58,8 +79,9 @@ github.com/Azure/azure-sdk-for-go v42.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9mo github.com/Azure/azure-sdk-for-go v56.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-service-bus-go v0.9.1/go.mod h1:yzBx6/BUGfjfeqbRZny9AQIbIe3AcV9WZbAdpkoXOa0= github.com/Azure/azure-storage-blob-go v0.8.0/go.mod h1:lPI3aLPpuLTeUwh1sViKXFxwl2B6teiRqI0deQUvsw0= -github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest v10.15.5+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest v12.0.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= @@ -101,10 +123,13 @@ github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZ github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/toml v1.0.0 h1:dtDWrepsVPfW9H/4y7dDgFc2MBUSeJhlaDtK13CxFlU= +github.com/BurntSushi/toml v1.0.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/Djarvur/go-err113 v0.0.0-20200410182137-af658d038157/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= github.com/Djarvur/go-err113 v0.1.0/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= github.com/GoogleCloudPlatform/cloudsql-proxy v0.0.0-20191009163259-e802c2cb94ae/go.mod h1:mjwGPas4yKduTyubHvD1Atl9r1rUq8DfVy+gkVvZ+oo= @@ -132,8 +157,10 @@ github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= -github.com/Microsoft/go-winio v0.4.17 h1:iT12IBVClFevaf8PuVyi3UmZOVh4OqnaLxDTW2O6j3w= github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.5.1 h1:aPJp2QD7OOrhO5tQXqQoGSJc+DjDtWTGLOmNyAm6FgY= +github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= @@ -142,7 +169,12 @@ github.com/Microsoft/hcsshim v0.8.10/go.mod h1:g5uw8EV2mAlzqe94tfNBNdr89fnbD/n3H github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg= github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= +github.com/Microsoft/hcsshim v0.8.20/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= +github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= +github.com/Microsoft/hcsshim v0.8.22/go.mod h1:91uVCVzvX2QD16sMCenoxxXo6L1wJnLMX2PSufFMtF0= github.com/Microsoft/hcsshim v0.8.23/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg= +github.com/Microsoft/hcsshim v0.9.2 h1:wB06W5aYFfUB3IvootYAY2WnOmIdgPGfqSI6tufQNnY= +github.com/Microsoft/hcsshim v0.9.2/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= github.com/Microsoft/hcsshim/test v0.0.0-20200826032352-301c83a30e7c/go.mod h1:30A5igQ91GEmhYJF8TaRP79pMBOYynRsyOByfVV0dU4= github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= @@ -150,8 +182,10 @@ github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb0 github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmUx/1V+TNhjQvM= -github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7 h1:YoJbenK9C67SkzkDfmQuVln04ygHj3vjZfd9FL+GmQQ= github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo= +github.com/ProtonMail/go-crypto v0.0.0-20210920160938-87db9fbc61c7/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo= +github.com/ProtonMail/go-crypto v0.0.0-20211112122917-428f8eabeeb3 h1:XcF0cTDJeiuZ5NU8w7WUDge0HRwwNRmxj/GGk6KSA6g= +github.com/ProtonMail/go-crypto v0.0.0-20211112122917-428f8eabeeb3/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= @@ -163,7 +197,11 @@ github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:H github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= +github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow= +github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= +github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8= +github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo= github.com/acomagu/bufpipe v1.0.3 h1:fxAGrHZTgQ9w5QqVItgzwj235/uYZYgbXitB+dLupOk= github.com/acomagu/bufpipe v1.0.3/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4= github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= @@ -176,6 +214,7 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= +github.com/alexflint/go-filemutex v1.1.0/go.mod h1:7P4iRhttt/nUvUOrYIhcpMzv2G6CY9UnI16Z+UJqRyk= github.com/aliyun/alibaba-cloud-sdk-go v1.61.985 h1:ETObK47vrphrw9wC+2/SinHL59YQtle2eBtSRucLTRQ= github.com/aliyun/alibaba-cloud-sdk-go v1.61.985/go.mod h1:pUKYbK5JQ+1Dfxk80P0qxGqe5dkxDoabbZS7zOcouyA= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= @@ -192,7 +231,9 @@ github.com/aphistic/sweet v0.2.0/go.mod h1:fWDlIh/isSE9n6EPsRmC0det+whmX6dJid3st github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= @@ -226,7 +267,9 @@ github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJm github.com/blakesmith/ar v0.0.0-20190502131153-809d4375e1fb/go.mod h1:PkYb9DJNAwrSvRx5DYA+gUcOIgTGVMNkfSCbZM8cWpI= github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/bombsimon/wsl/v2 v2.0.0/go.mod h1:mf25kr/SqFEPhhcxW1+7pxzGlW+hIl/hYTKY95VwV8U= github.com/bombsimon/wsl/v2 v2.2.0/go.mod h1:Azh8c3XGEJl9LyX0/sFC+CKMc7Ssgua0g+6abzXN4Pg= @@ -236,6 +279,7 @@ github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBT github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= github.com/bshuster-repo/logrus-logstash-hook v1.0.0/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= @@ -249,25 +293,42 @@ github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QH github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw= github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= +github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E= +github.com/chzyer/logex v1.1.10 h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e h1:fY5BOSpyZCqRo5OhCuC+XN+r/bBCmeuuJtjz+bCNIf8= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc= github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211130200136-a8f946100490/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/codahale/hdrhistogram v0.0.0-20160425231609-f8ad88b59a58/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= @@ -284,6 +345,7 @@ github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1 github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= +github.com/containerd/cgroups v1.0.1 h1:iJnMvco9XGvKUvNQkv88bE4uJXxRQH18efbKo9w5vHQ= github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= @@ -291,6 +353,7 @@ github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4g github.com/containerd/console v1.0.0/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw= github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= +github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= @@ -307,9 +370,12 @@ github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7 github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU= github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI= github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s= +github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= github.com/containerd/containerd v1.5.2/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= -github.com/containerd/containerd v1.5.8 h1:NmkCC1/QxyZFBny8JogwLpOy2f+VEbO/f6bV2Mqtwuw= +github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c= github.com/containerd/containerd v1.5.8/go.mod h1:YdFSv5bTFLpG2HIYmfqDpSYYTDX+mc5qtSuYx1YUb/s= +github.com/containerd/containerd v1.5.9 h1:rs6Xg1gtIxaeyG+Smsb/0xaSDu1VgFhOCKBXxMxbsF4= +github.com/containerd/containerd v1.5.9/go.mod h1:fvQqCfadDGga5HZyn3j4+dx56qj2I9YwBrlSdalvJYQ= github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= @@ -340,8 +406,13 @@ github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFY github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= github.com/containerd/stargz-snapshotter v0.0.0-20201027054423-3a04e4c2c116/go.mod h1:o59b3PCKVAf9jjiKtCc/9hLAd+5p/rfhBfm6aBcTEr4= +github.com/containerd/stargz-snapshotter v0.6.4 h1:mox1Ozl/LicA5j0O5Xk9Q8z+nOQQLnClarhxokyw9hI= github.com/containerd/stargz-snapshotter v0.6.4/go.mod h1:1t0SF1gAHJhCSftWKDLVitvfF3c2qhL5hymG7C50wto= +github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM= github.com/containerd/stargz-snapshotter/estargz v0.6.4/go.mod h1:83VWDqHnurTKliEB0YvWMiCfLDwv4Cjj1X9Vk98GJZw= +github.com/containerd/stargz-snapshotter/estargz v0.9.0/go.mod h1:aE5PCyhFMwR8sbrErO5eM2GcvkyXTTJremG883D4qF0= +github.com/containerd/stargz-snapshotter/estargz v0.11.0 h1:t0IW5kOmY7AXDAWRUs2uVzDhijAUOAYVr/dyRhOQvBg= +github.com/containerd/stargz-snapshotter/estargz v0.11.0/go.mod h1:/KsZXsJRllMbTKFfG0miFQWViQKdI9+9aSXs+HN0+ac= github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= @@ -360,17 +431,39 @@ github.com/containerd/zfs v1.0.0/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNR github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/cni v1.0.1 h1:9OIL/sZmMYDBe+G8svzILAlulUpaDTUjeAbtH/JNLBo= +github.com/containernetworking/cni v1.0.1/go.mod h1:AKuhXbN5EzmD4yTNtfSsX3tPcmtrBI6QcRV0NiNt15Y= github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM= github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8= +github.com/containernetworking/plugins v1.0.1 h1:wwCfYbTCj5FC0EJgyzyjTXmqysOiJE9r712Z+2KVZAk= +github.com/containernetworking/plugins v1.0.1/go.mod h1:QHCfGpaTwYTbbH+nZXKVTxNBDZcxSOplJT5ico8/FLE= +github.com/containers/buildah v1.24.5 h1:AO41lnG8HXJW1QAMl+8v9N1hQ30LUm/Cvm7HXErrWZo= +github.com/containers/buildah v1.24.5/go.mod h1:7eNXqJUGYGSWx77o020hTNjSu0Y8h2ipCO/Ms72I/xQ= +github.com/containers/common v0.47.5 h1:Qm9o+wVPO9sbggTKubN3xYMtPRaPv7dmcrJQgongHHw= +github.com/containers/common v0.47.5/go.mod h1:HgX0mFXyB0Tbe2REEIp9x9CxET6iSzmHfwR6S/t2LZc= +github.com/containers/image/v5 v5.19.1/go.mod h1:ewoo3u+TpJvGmsz64XgzbyTHwHtM94q7mgK/pX+v2SE= +github.com/containers/image/v5 v5.19.3 h1:XljZWtaMNJOh9fE8IQyir+bw+7PhxnIwc2/kDF9GPoQ= +github.com/containers/image/v5 v5.19.3/go.mod h1:9nqGCbwNy4nrjykPRuxJL0aJG5F5CNfv4Nk4U1JezE4= +github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b h1:Q8ePgVfHDplZ7U33NwHZkrVELsZP5fYj9pM5WBZB2GE= +github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc= github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4= github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= +github.com/containers/ocicrypt v1.1.2/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= +github.com/containers/ocicrypt v1.1.4 h1:V0ktirShnF1iJ2ithuoYE4eNAOSL3af1PlTiykv3PLQ= +github.com/containers/ocicrypt v1.1.4/go.mod h1:xpdkbVAuaH3WzbEabUd5yDsl9SwJA5pABH85425Es2g= +github.com/containers/storage v1.37.0/go.mod h1:kqeJeS0b7DO2ZT1nVWs0XufrmPFbgV3c+Q/45RlH6r4= +github.com/containers/storage v1.38.2/go.mod h1:INP0RPLHWBxx+pTsO5uiHlDUGHDFvWZPWprAbAlQWPQ= +github.com/containers/storage v1.38.3/go.mod h1:INP0RPLHWBxx+pTsO5uiHlDUGHDFvWZPWprAbAlQWPQ= +github.com/containers/storage v1.38.5 h1:gymwFRLMvFAqiGcDJ0+OxaN6ZqQkv3FQSbhN7a2JBW0= +github.com/containers/storage v1.38.5/go.mod h1:INP0RPLHWBxx+pTsO5uiHlDUGHDFvWZPWprAbAlQWPQ= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= +github.com/coreos/go-iptables v0.6.0/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q= github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= @@ -390,18 +483,21 @@ github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfc github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.1 h1:r/myEWzV9lfsM1tFLgDyu0atFtJ1fXn261LKYj/3DxU= +github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/cyphar/filepath-securejoin v0.2.2 h1:jCwT2GTP+PY5nBz3c/YL5PAIbusElVrPujOBSCj8xRg= github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= +github.com/cyphar/filepath-securejoin v0.2.3 h1:YX6ebbZCZP7VkM3scTTokDgBL2TY741X51MTk3ycuNI= +github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I= +github.com/danieljoos/wincred v1.1.0/go.mod h1:XYlo+eRTsVA9aHGp7NGjFkPla4m+DCL7hqDjlFjiygg= github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= @@ -416,6 +512,8 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZm github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= +github.com/disiqueira/gotree/v3 v3.0.2 h1:ik5iuLQQoufZBNPY518dXhiO5056hyNBIK9lWhkNRq8= +github.com/disiqueira/gotree/v3 v3.0.2/go.mod h1:ZuyjE4+mUQZlbpkI24AmruZKhg3VHEgPLDY8Qk+uUu8= github.com/distribution/distribution/v3 v3.0.0-20211125133600-cc4627fc6e5f h1:oK2N+CZYddTrgbDNs+6mzJ5dRiy8agjWRApYv2Pfryo= github.com/distribution/distribution/v3 v3.0.0-20211125133600-cc4627fc6e5f/go.mod h1:UfCu3YXJJCI+IdnqGgYP82dk2+Joxmv+mUTVBES6wac= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= @@ -430,13 +528,30 @@ github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TT github.com/docker/distribution v0.0.0-20191216044856-a8371794149d/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= github.com/docker/distribution v2.6.0-rc.1.0.20180327202408-83389a148052+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v20.10.3-0.20211208011758-87521affb077+incompatible h1:gUm2bOLaaAPkV1z4te7ahrXTWyA+PNNWroISSOttSgU= -github.com/docker/docker v20.10.3-0.20211208011758-87521affb077+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/distribution v2.8.0+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= +github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v0.0.0-20200511152416-a93e9eb0e95c/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v1.4.2-0.20180531152204-71cd53e4a197/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v17.12.0-ce-rc1.0.20200618181300-9dc6525e6118+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v17.12.0-ce-rc1.0.20200730172259-9f28837c1d93+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.0-beta1.0.20201110211921-af34b94a78a1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.3-0.20210609071616-4c2ec79bf2a8+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.3-0.20220208084023-a5c757555091+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.6+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.12+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.17+incompatible h1:JYCuMrWaVNophQTOrMMoSwudOVEfcegoZZrleKc1xwE= +github.com/docker/docker v20.10.17+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= -github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= +github.com/docker/docker-credential-helpers v0.6.4 h1:axCks+yV+2MR3/kZhAmy07yC56WZ2Pwu/fKWtKuZB0o= +github.com/docker/docker-credential-helpers v0.6.4/go.mod h1:ofX3UI0Gz1TteYBjtgs07O36Pyasyp66D2uKT7H8W1c= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11 h1:IPrmumsT9t5BS7XcPhgsCTlkWbYg80SEXUzDpReaU6Y= +github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11/go.mod h1:a6bNUGTbQBsY6VRHTr4h/rkOXjl244DyRD0tx3fgq4Q= github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= @@ -445,6 +560,8 @@ github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHz github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/libnetwork v0.8.0-dev.2.0.20190625141545-5a177b73e316/go.mod h1:93m0aTqz6z+g32wla4l4WxTrdtvBRmVzYRkYvasA5Z8= +github.com/docker/libnetwork v0.8.0-dev.2.0.20200917202933-d0951081b35f h1:jC/ZXgYdzCUuKFkKGNiekhnIkGfUrdelEqvg4Miv440= github.com/docker/libnetwork v0.8.0-dev.2.0.20200917202933-d0951081b35f/go.mod h1:93m0aTqz6z+g32wla4l4WxTrdtvBRmVzYRkYvasA5Z8= github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 h1:UhxFibDNY/bfvqU5CAUmr9zpesgbU6SWc8/B4mflAE4= @@ -469,9 +586,14 @@ github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4s github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.1/go.mod h1:AY7fTTXNdv/aJ2O5jwpxAPOWUZ7hQAEvzN5Pf27BkQQ= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v0.6.2/go.mod h1:2t7qjJNvHPx8IjnBOzl9E9/baC+qXE/TeeyBRzgJDws= github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.9.0+incompatible h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses= @@ -480,6 +602,7 @@ github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZM github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/felixge/httpsnoop v1.0.2/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ= @@ -491,12 +614,17 @@ github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVB github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= +github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= +github.com/fsouza/go-dockerclient v1.7.7/go.mod h1:njNCXvoZj3sLPjf3yO0DPHf1mdLdCPDYPc14GskKA4Y= +github.com/fsouza/go-dockerclient v1.7.8 h1:Tp7IYXyvmZsmrCDffMENOv6l2xN2Aw17EThY8Gokq48= +github.com/fsouza/go-dockerclient v1.7.8/go.mod h1:7cvopLQDrW3dJ5mcx2LzWMBfmpv/fq7MZUEPcQlAtLw= github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= github.com/fvbommel/sortorder v1.0.1/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/gliderlabs/ssh v0.2.2 h1:6zsha5zo/TWhRhwqCD3+EarCAgZ2yN28ipRnGPnwkI0= @@ -616,9 +744,12 @@ github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e h1:BWhy2j3IXJhjCbC68FptL43tDKIq8FladmaTs3Xs7Z8= github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.0.6 h1:mkgN1ofwASrYnJ5W6U/BxG15eXXXjirgZc7CLqkcaro= +github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godror/godror v0.13.3/go.mod h1:2ouUT4kdhUBk7TAkHWD4SN0CdI0pgEQbo8FVHhbSKWg= github.com/gofrs/flock v0.0.0-20190320160742-5135e617513b/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gofrs/flock v0.7.3/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= @@ -644,8 +775,9 @@ github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4er github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= @@ -653,6 +785,9 @@ github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFU github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -669,6 +804,7 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= @@ -708,14 +844,19 @@ github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/go-containerregistry v0.0.0-20191010200024-a3d713f9b7f8/go.mod h1:KyKXa9ciM8+lgMXwOVsXi7UxGrsf9mM61Mzs+xKUrKE= github.com/google/go-containerregistry v0.1.2/go.mod h1:GPivBPgdAyd2SU+vf6EpsgOtWDuPqjW0hJZt4rNdTZ4= +github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-github/v28 v28.1.1/go.mod h1:bsqJWQX05omyWVmc00nEUql9mhQyv38lDZ8kPZcQVoM= +github.com/google/go-intervals v0.0.2 h1:FGrVEiUnTRKR8yE04qzXYaJMtnIYqobR5QbblK3ixcM= +github.com/google/go-intervals v0.0.2/go.mod h1:MkaR3LNRfeKLPmqgJYs4E66z5InYjmCjbbr4TQlcT6Y= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/go-replayers/grpcreplay v0.1.0/go.mod h1:8Ig2Idjpr6gifRd6pNVggX6TC1Zw6Jx74AKp7QNH2QE= github.com/google/go-replayers/httpreplay v0.1.0/go.mod h1:YKZViNhiGgqdBlUbI2MwGpq4pXxNmhJLPHQ7cv2b5no= @@ -726,12 +867,24 @@ github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian v2.1.1-0.20190517191504-25dcb96d9e51+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/rpmpack v0.0.0-20191226140753-aa36bfddb3a0/go.mod h1:RaTPr0KUf2K7fnZYLNDrr8rxAamWs3iNywJLtQ2AzBg= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= @@ -740,14 +893,17 @@ github.com/google/subcommands v1.0.1/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3 github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/wire v0.3.0/go.mod h1:i1DMg/Lu8Sz5yYl25iOdmc5CT5qusaa+zmRWs16741s= github.com/google/wire v0.4.0/go.mod h1:ngWDr9Qvq3yZA10YrxfyGELY/AFWGVpy9c1LTRi1EoU= github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go v2.0.2+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= +github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.2.2/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= @@ -755,7 +911,6 @@ github.com/googleapis/gnostic v0.5.1 h1:A8Yhf6EtqTv9RMsU6MQTyrtV1TjWlR6xU9BsZIwu github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= github.com/gookit/color v1.2.4/go.mod h1:AhIE+pS6D4Ql0SQWbBeXPHw7gY0/sjHoA4s/n1KB7xg= github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/goreleaser/goreleaser v0.136.0/go.mod h1:wiKrPUeSNh6Wu8nUHxZydSOVQ/OZvOaO7DTtFqie904= github.com/goreleaser/nfpm v1.2.1/go.mod h1:TtWrABZozuLOttX2uDlYyECfQX7x5XYkVxhjYcR6G9w= @@ -766,6 +921,7 @@ github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= @@ -794,21 +950,32 @@ github.com/hanwen/go-fuse/v2 v2.0.3/go.mod h1:0EQM6aH2ctVpvZ6a+onrQ/vaykxh2GH7hy github.com/hanwen/go-fuse/v2 v2.1.0/go.mod h1:oRyA5eK+pvJyv5otpO/DgccS8y/RvYMaO00GgRLGryc= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= +github.com/hashicorp/consul/api v1.11.0/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v1.0.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-retryablehttp v0.6.4/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= @@ -824,14 +991,22 @@ github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY= +github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= +github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= +github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= github.com/hashicorp/uuid v0.0.0-20160311170451-ebb0a03e909c/go.mod h1:fHzc09UnyJyqyW+bFuq864eh+wC7dj65aXmXLRe5to0= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/xstrings v1.3.1 h1:4jgBlKK6tLKFvO8u5pmYjG91cqytmDCDvGh7ECVFfFs= github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= +github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= @@ -843,8 +1018,10 @@ github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NH github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= github.com/ishidawataru/sctp v0.0.0-20191218070446-00ab2ac2db07/go.mod h1:co9pwDoBCm1kGxawmb4sPq0cSIOOWNPT4KnHotMP1Zg= +github.com/ishidawataru/sctp v0.0.0-20210226210310-f2269e66cdee h1:PAXLXk1heNZ5yokbMBpVLZQxo43wCZxRwl00mX+dd44= github.com/ishidawataru/sctp v0.0.0-20210226210310-f2269e66cdee/go.mod h1:co9pwDoBCm1kGxawmb4sPq0cSIOOWNPT4KnHotMP1Zg= github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= +github.com/j-keck/arping v1.0.2/go.mod h1:aJbELhR92bSk7tp79AWM/ftfc90EfEi2bQJrbBFOsPw= github.com/jaguilar/vt100 v0.0.0-20150826170717-2703a27b14ea/go.mod h1:QMdK4dGB3YhEW2BmA1wgGpPYI3HZy/5gD705PXKUVSg= github.com/jarcoal/httpmock v1.0.5/go.mod h1:ATjnClrvW/3tijVmpL/va5Z3aAyGvqU3gCT8nX0Txik= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= @@ -852,6 +1029,8 @@ github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= github.com/jingyugao/rowserrcheck v0.0.0-20191204022205-72ab7603b68a/go.mod h1:xRskid8CManxVta/ALEhJha/pweKBaVG6fWgc0yH25s= +github.com/jinzhu/copier v0.3.5 h1:GlvfUwHk62RokgqVNvYsku0TATCF7bAHVwEXoBh3iJg= +github.com/jinzhu/copier v0.3.5/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg= github.com/jirfag/go-printf-func-name v0.0.0-20191110105641-45db9963cdd3/go.mod h1:HEWGJkRDzjJY2sqdDwxccsGicWEf9BQOZsq2tV+xzM0= github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af/go.mod h1:HEWGJkRDzjJY2sqdDwxccsGicWEf9BQOZsq2tV+xzM0= github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= @@ -873,17 +1052,19 @@ github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCV github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351 h1:DowS9hvgyYSX4TO5NpyC606/Z4SxnNYbT+WX27or6Ck= github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= +github.com/kevinburke/ssh_config v1.1.0 h1:pH/t1WS9NzT8go394IqZeJTMHVm6Cr6ZJ6AQ+mdNo/o= +github.com/kevinburke/ssh_config v1.1.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= @@ -893,8 +1074,13 @@ github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0 github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.14.2 h1:S0OHlFk/Gbon/yauFJ4FfJJF5V0fc5HbBTJazi28pRw= +github.com/klauspost/compress v1.14.2/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE= +github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -929,13 +1115,17 @@ github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhn github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= +github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo= github.com/lithammer/dedent v1.1.0 h1:VNzHMVCBNG1j0fh3OrsFRkVUwStdDArbgBWoPAffktY= github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= +github.com/lyft/protoc-gen-star v0.5.3/go.mod h1:V0xaHgaf5oCCqmcxYcWiDfTiKsZsRc87/1qhoTACD8w= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= +github.com/magefile/mage v1.11.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.5 h1:b6kJs+EmPFMYGkow9GiUyCyOvIwYetYJ3fSaWak/Gls= +github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -943,6 +1133,8 @@ github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.0 h1:aizVhC/NAAcKWb+5QsU1iNOZb4Yws5UO2I+aIprQITM= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/manifoldco/promptui v0.9.0 h1:3V4HzJk1TtXW1MTZMP7mdlwbBpIinw3HztaIlYthEiA= +github.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GWtQEhdbn6Pgg= github.com/maratori/testpackage v1.0.1/go.mod h1:ddKdw+XG0Phzhx8BFDTKgpWP4i7MpApTE5fXSKAqwDU= github.com/markbates/pkger v0.17.1/go.mod h1:0JoVlrol20BSywW79rN3kdFFsE5xYM+rSCQDXbLhiuI= github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= @@ -954,6 +1146,8 @@ github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcncea github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= @@ -961,16 +1155,22 @@ github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNx github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-oci8 v0.0.7/go.mod h1:wjDx6Xm9q7dFtHJvIlrI99JytznLw5wQ4R+9mNXJwGI= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.7 h1:Ei8KR0497xHyKJPAv59M1dkC+rOZCMBJ+t3fZ+twI54= github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU= +github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= github.com/mattn/go-shellwords v1.0.10/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= github.com/mattn/go-shellwords v1.0.11/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= +github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk= +github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-sqlite3 v1.12.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= @@ -982,9 +1182,15 @@ github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182aff github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY= github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= +github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU= +github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible h1:aKW/4cBs+yK6gpqU3K/oIwk9Q/XICqd3zOX/UFuvqmk= github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/copystructure v1.1.1 h1:Bp6x9R1Wn16SIz3OfeDr0b7RnCG2OB66Y7PQyC/cvq4= github.com/mitchellh/copystructure v1.1.1/go.mod h1:EBArHfARyrSWO/+Wyr9zwEkc6XMFB9XyNgFNmRkZZU4= @@ -999,8 +1205,9 @@ github.com/mitchellh/hashstructure v1.0.0/go.mod h1:QjSHrPWS+BGUVBYkbTZWEnOh3G1D github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.3.1 h1:cCBH2gTD2K0OtLlv/Y5H01VQCqmlDxz30kS5Y5bqfLA= github.com/mitchellh/mapstructure v1.3.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.3 h1:OVowDSCllw/YjdLkam3/sm7wEtOy59d8ndGgCcyj8cs= +github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.1 h1:FVzMWA5RllMAKIdUSC8mdWo3XtwoecrH79BY70sEEpE= @@ -1015,24 +1222,29 @@ github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8 github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/moby/sys/mount v0.1.0/go.mod h1:FVQFLDRWwyBjDTBNQXDlWnSFREqOo3OKX9aqhmeoo74= github.com/moby/sys/mount v0.1.1/go.mod h1:FVQFLDRWwyBjDTBNQXDlWnSFREqOo3OKX9aqhmeoo74= +github.com/moby/sys/mount v0.2.0 h1:WhCW5B355jtxndN5ovugJlMFJawbUODuW8fSnEH6SSM= github.com/moby/sys/mount v0.2.0/go.mod h1:aAivFE2LB3W4bACsUXChRHQ0qKWsetY4Y9V7sxOougM= github.com/moby/sys/mountinfo v0.1.0/go.mod h1:w2t2Avltqx8vE7gX5l+QiBKxODu2TX0+Syr3h52Tw4o= github.com/moby/sys/mountinfo v0.1.3/go.mod h1:w2t2Avltqx8vE7gX5l+QiBKxODu2TX0+Syr3h52Tw4o= github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/sys/mountinfo v0.5.0 h1:2Ks8/r6lopsxWi9m58nlwjaeSzUX9iiL1vj5qB/9ObI= +github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= github.com/moby/term v0.0.0-20200915141129-7f0af18e79f2/go.mod h1:TjQg8pa4iejrUrjiz0MCtMV38jdMNW4doKSiBrEvCQQ= github.com/moby/term v0.0.0-20201110203204-bea5bbe245bf/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc= -github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 h1:rzf0wL0CHVc8CEsgyygG0Mn9CNCCPZqOPaz8RiiHYQk= github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc= +github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc= +github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= @@ -1078,9 +1290,14 @@ github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+ github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.13.0/go.mod h1:+REjRxOmWfHCjfv9TTWB1jD1Frx4XydAD3zm1lskyM0= github.com/onsi/ginkgo v1.14.1/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= -github.com/onsi/ginkgo v1.16.2 h1:HFB2fbVIlhIfCfOW81bZFbiC/RvnpXSdhbF2/DJr134= -github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= +github.com/onsi/ginkgo/v2 v2.1.3 h1:e/3Cwtogj0HA+25nMP1jCMDIf8RtRYbGwGGuBIFztkc= +github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= @@ -1092,8 +1309,11 @@ github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoT github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= -github.com/onsi/gomega v1.12.0 h1:p4oGGk2M2UJc0wWN4lHFvIB71lxsh0T/UiKCCgFADY8= -github.com/onsi/gomega v1.12.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= +github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0= +github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs= +github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw= +github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= @@ -1103,8 +1323,9 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8 github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.3-0.20211202193544-a5463b7f9c84 h1:g47eG1u/gw0JB7mZ88TcHKCmsy7sWUNZD8ZS9Jhi0O8= +github.com/opencontainers/image-spec v1.0.3-0.20211202193544-a5463b7f9c84/go.mod h1:Qnt1q4cjDNQI9bT832ziho5Iw2BhK8o1KwLOwW56VP4= github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc10/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= @@ -1113,17 +1334,28 @@ github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rm github.com/opencontainers/runc v1.0.0-rc92/go.mod h1:X1zlU4p7wOlX4+WRCz+hvlRv8phdL7UqbYD+vQwNMmE= github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= +github.com/opencontainers/runc v1.0.3/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= +github.com/opencontainers/runc v1.1.0 h1:O9+X96OcDjkmmZyfaG996kV7yq8HsoU2h1XRRQcefG8= +github.com/opencontainers/runc v1.1.0/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc= github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.3-0.20200728170252-4d89ac9fbff6/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 h1:3snG66yBm59tKhhSPQrQ/0bCrv1LQbKt40LnUPiUxdc= github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= +github.com/opencontainers/runtime-tools v0.9.0 h1:FYgwVsKRI/H9hU32MJ/4MLOzXWodKK5zsQavY8NPMkU= +github.com/opencontainers/runtime-tools v0.9.0/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= +github.com/opencontainers/selinux v1.8.5/go.mod h1:HTvjPFoGMbpQsG886e3lQwnsRWtE4TC1OF3OUvG9FAo= +github.com/opencontainers/selinux v1.10.0 h1:rAiKF8hTcgLI3w0DHm6i0ylVVcOrlgR1kK99DRLDhyU= +github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= +github.com/openshift/imagebuilder v1.2.2 h1:++jWWMkTVJKP2MIjTPaTk2MqwWIOYYlDaQbZyLlLBh0= +github.com/openshift/imagebuilder v1.2.2/go.mod h1:TRYHe4CH9U6nkDjxjBNM5klrLbJBrRbpJE5SaRwUBsQ= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= github.com/opentracing-contrib/go-stdlib v1.0.0/go.mod h1:qtI1ogk+2JhVPIXVc6q+NHziSmy2W5GbdQZFUHADCBU= github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= @@ -1136,14 +1368,18 @@ github.com/openzipkin/zipkin-go v0.1.3/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTm github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913 h1:TnbXhKzrTOyuvWrjI8W6pcoI9XPbLHFXCdN2dtUw7Rw= +github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc= github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.8.0/go.mod h1:D6yutnOGMveHEPV7VQOuvI/gXY61bv+9bAOTRnLElKs= github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= -github.com/pelletier/go-toml v1.9.1 h1:a6qW1EVNZWH9WGI6CsYdD8WAylkoXBS5yv0XHlh17Tc= github.com/pelletier/go-toml v1.9.1/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pelletier/go-toml v1.9.4 h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhECwM= +github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= @@ -1158,13 +1394,17 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= github.com/pkg/profile v1.5.0/go.mod h1:qBsxPvzyUincmltOk6iyRVxHYg4adc0OFOv72ZdLa18= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= github.com/pkg/sftp v1.13.0 h1:Riw6pgOKK41foc1I1Uu03CjvbLZDXeGpInycM4shXoI= github.com/pkg/sftp v1.13.0/go.mod h1:41g+FIPlQUTDCveupEmEA65IoiQFrtgCeDopC4ajGIM= github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= +github.com/proglottis/gpgme v0.1.1 h1:72xI0pt/hy7pqsRxk32KExITkXp+RZErRizsA+up/lQ= +github.com/proglottis/gpgme v0.1.1/go.mod h1:fPbW/EZ0LvwQtH8Hy7eixhp1eF3G39dtx7GUN+0Gmy0= github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= @@ -1173,9 +1413,11 @@ github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDf github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.11.1 h1:+4eQaD7vAZ6DsfsxB15hbE0odUjGI5ARs9yskGu1v4s= +github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= @@ -1192,6 +1434,7 @@ github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8 github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= @@ -1216,6 +1459,8 @@ github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqn github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M= github.com/rifflock/lfshook v0.0.0-20180920164130-b9218ef580f5 h1:mZHayPoR0lNmnHyvtYjDeq0zlVHn9K/ZXoy17ylucdo= github.com/rifflock/lfshook v0.0.0-20180920164130-b9218ef580f5/go.mod h1:GEXHk5HgEKCvEIIrSpFI3ozzG5xOKA2DVlEX/gGnewM= +github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= +github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.1.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= @@ -1227,28 +1472,38 @@ github.com/rogpeppe/go-internal v1.5.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTE github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rubenv/sql-migrate v0.0.0-20200616145509-8d140a17f351/go.mod h1:DCgfY80j8GYL7MLEfvcpSFvjD0L5yZq/aZUJmhZklyg= github.com/rubiojr/go-vhd v0.0.0-20160810183302-0bfd3b39853c/go.mod h1:DM5xW0nvfNNm2uytzsvhI3OnX8uzaRAg8UX/CnDqbto= -github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q= +github.com/russross/blackfriday v1.6.0 h1:KqfZb0pUVN2lYqZUYRddxF4OR8ZMURnJIG5Y3VRLtww= +github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryancurrah/gomodguard v1.0.4/go.mod h1:9T/Cfuxs5StfsocWr4WzDL36HqnX0fVb9d5fSEaLhoE= github.com/ryancurrah/gomodguard v1.1.0/go.mod h1:4O8tr7hBODaGE6VIhfJDHcwzh5GUccKSJBU0UMXJFVM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= +github.com/safchain/ethtool v0.0.0-20210803160452-9aa261dae9b1/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= +github.com/sagikazarmark/crypt v0.3.0/go.mod h1:uD/D+6UF4SrIR1uGEv7bBNkNqLGqUr43MRiaGWX1Nig= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/sassoftware/go-rpmutils v0.0.0-20190420191620-a8f1baeba37b/go.mod h1:am+Fp8Bt506lA3Rk3QCmSqmYmLMnPDhdDUcosQCAx+I= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw= github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= github.com/sealyun/lvscare v1.1.2-alpha.2 h1:SlnEAXOPn5gC6l8tgvbn9fApyJaQ0ecQHSV3cEfvQYY= github.com/sealyun/lvscare v1.1.2-alpha.2/go.mod h1:FtOEdsXuYtw9Jwd/Jct25K+PcpUFSDemvF4VgNygjj0= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/sebdah/goldie/v2 v2.5.3 h1:9ES/mNN+HNUbNWpVAlrzuZ7jE+Nrczbj8uFRjM7624Y= +github.com/sebdah/goldie/v2 v2.5.3/go.mod h1:oZ9fp0+se1eapSRjfYbsV/0Hqhbuu3bJVvKI/NNtssI= github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= +github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921 h1:58EBmR2dMNL2n/FnbQewK3D14nXr0V9CObDSvMJLq+Y= +github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= github.com/securego/gosec v0.0.0-20200103095621-79fbf3af8d83/go.mod h1:vvbZ2Ae7AzSq3/kywjUDxSNq2SJ27RxCz2un0H3ePqE= github.com/securego/gosec v0.0.0-20200401082031-e946c8c39989/go.mod h1:i9l/TNj+yDFh9SZXUTvspXTjbFXgZGP/UvhU1S65A4A= github.com/securego/gosec/v2 v2.3.0/go.mod h1:UzeVyUXbxukhLeHKV3VVqo7HdoQR9MrRfFmZYotn8ME= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= +github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002/go.mod h1:/yeG0My1xr/u+HZrFQ1tOQQQQrOawfyMUH13ai5brBc= github.com/shirou/gopsutil v0.0.0-20190901111213-e4ec7b275ada/go.mod h1:WWnYX4lzhCH5h/3YBfyVA3VbLYjlMZZAQcW9ojMexNc= github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI= @@ -1258,7 +1513,6 @@ github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXY github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= -github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= @@ -1270,11 +1524,9 @@ github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/assertions v1.0.0 h1:UVQPSSmc3qtTi+zPPkCXvZX9VvW/xT/NsRvKfwY81a8= github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= github.com/smartystreets/go-aws-auth v0.0.0-20180515143844-0c1422d1fdb9/go.mod h1:SnhjPscd9TpLiy1LpzGSKh3bXCfxxXuqd9xmQJy3slM= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/gunit v1.0.0/go.mod h1:qwPWnhz6pn0NnRBP++URONOVyNkPyr4SauJk4cUOwJs= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= @@ -1283,19 +1535,24 @@ github.com/sourcegraph/go-diff v0.5.1/go.mod h1:j2dHj3m8aZgQO8lMTcTnBcXkRRRqi34c github.com/sourcegraph/go-diff v0.5.3/go.mod h1:v9JDtjCE4HHHCZGId75rg8gkKKa98RVjBcBGsVmMmak= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= +github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= +github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/cobra v0.0.6/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= -github.com/spf13/cobra v1.1.3 h1:xghbfqPkxzxP3C/f3n5DdpAbdKLj4ZE4BWQI362l53M= github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= +github.com/spf13/cobra v1.3.0/go.mod h1:BrRVncBjOJa/eUcVVm9CE+oC6as8k+VYr4NY7WCi9V4= +github.com/spf13/cobra v1.4.0 h1:y+wJpx64xcgO1V+RcnwW0LEHxTKRi2ZDPSBjWnrg88Q= +github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= @@ -1308,8 +1565,10 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/spf13/viper v1.6.1/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k= -github.com/spf13/viper v1.7.0 h1:xVKxvI7ouOI5I+U9s2eeiUfMaWBVoXA3AWskkrqK0VM= github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/spf13/viper v1.10.0 h1:mXH0UwHS4D2HwWZa75im4xIQynLfblmWV7qcWpfv0yk= +github.com/spf13/viper v1.10.0/go.mod h1:SoyBPwAtKDzypXNDFKN5kzH7ppppbGZtls1UpIy5AsM= +github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 h1:lIOOHPEbXzO3vnmx2gok1Tfs31Q8GQqKLc8vVqyQq/I= github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= @@ -1331,11 +1590,17 @@ github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5Cc github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/sylabs/release-tools v0.1.0/go.mod h1:pqP/z/11/rYMQ0OM/Nn7TxGijw7KfZwW9UolD/J1TUo= +github.com/sylabs/sif/v2 v2.3.1 h1:NHoc/rZpnOS05etmT+j8IJOZP2Cc8zHHG8rKSVosvZs= +github.com/sylabs/sif/v2 v2.3.1/go.mod h1:NnvveH62GiibimL00MrI6YYcZfb7DnZMcRo/40giY+0= github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= +github.com/tchap/go-patricia v2.3.0+incompatible h1:GkY4dP3cEfEASBPPkWd+AmjYxhmDkqO9/zg7R0lSQRs= +github.com/tchap/go-patricia v2.3.0+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= github.com/tdakkota/asciicheck v0.0.0-20200416190851-d7f85be797a2/go.mod h1:yHp0ai0Z9gUljN3o0xMhYJnH/IcvkdTBOX2fmJ93JEM= github.com/tdakkota/asciicheck v0.0.0-20200416200610-e657995f937b/go.mod h1:yHp0ai0Z9gUljN3o0xMhYJnH/IcvkdTBOX2fmJ93JEM= github.com/tetafro/godot v0.3.7/go.mod h1:/7NLHhv08H1+8DNj0MElpAACw1ajsCuf3TKNQxA5S+0= @@ -1352,40 +1617,47 @@ github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1 github.com/tommy-muehle/go-mnd v1.1.1/go.mod h1:dSUh0FtTP8VhvkL1S+gUR1OKd9ZnSaozuI6r3m6wOig= github.com/tommy-muehle/go-mnd v1.3.1-0.20200224220436-e6f9a994e8fa/go.mod h1:dSUh0FtTP8VhvkL1S+gUR1OKd9ZnSaozuI6r3m6wOig= github.com/tonistiigi/fsutil v0.0.0-20201103201449-0834f99b7b85/go.mod h1:a7cilN64dG941IOXfhJhlH0qB92hxJ9A1ewrdUmJ6xo= +github.com/tonistiigi/fsutil v0.0.0-20210609172227-d72af97c0eaf h1:L0ixhsTk9j+dVnIvF6aiVCxPiaFvwTOyJxqimPq44p8= github.com/tonistiigi/fsutil v0.0.0-20210609172227-d72af97c0eaf/go.mod h1:lJAxK//iyZ3yGbQswdrPTxugZIDM7sd4bEsD0x3XMHk= -github.com/tonistiigi/fsutil v0.0.0-20211208191308-f95797418e48 h1:w/Jl6oDYyxHFLxV/PGzw7yUaoLMDnmUXYtm/mS8CUJ4= -github.com/tonistiigi/fsutil v0.0.0-20211208191308-f95797418e48/go.mod h1:oPAfvw32vlUJSjyDcQ3Bu0nb2ON2B+G0dtVN/SZNJiA= github.com/tonistiigi/go-actions-cache v0.0.0-20211002214948-4d48f2ff622a/go.mod h1:YiIBjH5gP7mao3t0dBrNNBGuKYdeJmcAJjYLXr43k6A= github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea/go.mod h1:WPnis/6cRcDZSUvVmezrxJPkiO87ThFYsoUiMwWNDJk= github.com/tonistiigi/vt100 v0.0.0-20210615222946-8066bb97264f/go.mod h1:ulncasL3N9uLrVann0m+CDlJKWsIAP34MPcOJF6VRvc= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/ulikunitz/xz v0.5.6/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= github.com/ulikunitz/xz v0.5.7/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/ulikunitz/xz v0.5.10 h1:t92gobL9l3HE202wg3rlk19F6X+JOxl9BBrCCMYEYd8= +github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/ultraware/funlen v0.0.2/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= github.com/ultraware/whitespace v0.0.4/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA= github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli v1.22.4/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/uudashr/gocognit v1.0.1/go.mod h1:j44Ayx2KW4+oB6SWMv8KsmHzZrOInQav7D3cQMJ5JUM= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/fasthttp v1.2.0/go.mod h1:4vX61m6KN+xDduDNwXrhIAVZaZaZiQ1luJk8LWSxF3s= github.com/valyala/quicktemplate v1.2.0/go.mod h1:EH+4AkTd43SvgIbQHYu59/cJyxDoOVRUAfrukLPuGJ4= github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= -github.com/vbatts/tar-split v0.11.1 h1:0Odu65rhcZ3JZaPHxl7tCI3V/C/Q9Zf82UFravl02dE= -github.com/vbatts/tar-split v0.11.1/go.mod h1:LEuURwDEiWjRjwu46yU3KVGuUdVv/dcnpcEPSzR8z6g= +github.com/vbatts/tar-split v0.11.2 h1:Via6XqJr0hceW4wff3QRzD5gAk/tatMw/4ZA7cTlIME= +github.com/vbatts/tar-split v0.11.2/go.mod h1:vV3ZuO2yWSVsz+pfFzDG/upWH1JhjOiEaWq6kXyQ3VI= +github.com/vbauerster/mpb/v7 v7.3.2 h1:tCuxMy8G9cLdjb61b6wO7I1vRT/LyMEzRbr3xCC0JPU= +github.com/vbauerster/mpb/v7 v7.3.2/go.mod h1:wfxIZcOJq/bG1/lAtfzMXcOiSvbqVi/5GX5WCSi+IsA= github.com/vdemeester/k8s-pkg-credentialprovider v1.17.4/go.mod h1:inCTmtUdr5KJbreVojo06krnTgaeAz/Z7lynpPk/Q2c= github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= -github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852 h1:cPXZWzzG0NllBLdjWoD1nDfaqu98YMv+OneaKc8sPOA= github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= +github.com/vishvananda/netlink v1.1.1-0.20210330154013-f5de75959ad5 h1:+UB2BJA852UkGH42H+Oee69djmxS3ANzl2b/JtT1YiA= +github.com/vishvananda/netlink v1.1.1-0.20210330154013-f5de75959ad5/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= +github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74 h1:gga7acRE695APm9hlsSMoOoE65U4/TcqNj90mc69Rlg= github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= github.com/vmware/govmomi v0.20.3/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= @@ -1395,10 +1667,12 @@ github.com/wonderivan/logger v1.0.0 h1:Z6Nz+3SNcizolx3ARH11axdD4DXjFpb2J+ziGUVlv github.com/wonderivan/logger v1.0.0/go.mod h1:NObMfQ3WOLKfYEZuGeZQfuQfSPE5+QNgRddVMzsAT/k= github.com/xanzy/go-gitlab v0.31.0/go.mod h1:sPLojNBn68fMUWSxIJtdVVIP8uSBYqesTfDUseX11Ug= github.com/xanzy/go-gitlab v0.32.0/go.mod h1:sPLojNBn68fMUWSxIJtdVVIP8uSBYqesTfDUseX11Ug= -github.com/xanzy/ssh-agent v0.3.0 h1:wUMzuKtKilRgBAD1sUb8gOwwRr2FGoBVumcjoOACClI= github.com/xanzy/ssh-agent v0.3.0/go.mod h1:3s9xbODqPuuhK9JV1R321M/FlMZSBvE5aY6eAcqrDh0= -github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c= +github.com/xanzy/ssh-agent v0.3.1 h1:AmzO1SSWxw73zxFZPRwaMN1MohDw8UyHnmuxyceTEGo= +github.com/xanzy/ssh-agent v0.3.1/go.mod h1:QIE4lCeL7nkC25x+yA3LBIYfwCc1TFziCtG7cBAac6w= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonpointer v0.0.0-20190809123943-df4f5c81cb3b h1:6cLsL+2FW6dRAdl5iMtHgRogVCff0QpRi9653YmdcJA= +github.com/xeipuuv/gojsonpointer v0.0.0-20190809123943-df4f5c81cb3b/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= @@ -1423,18 +1697,24 @@ github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wK go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= +go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489 h1:1JFLBqwIgdyHN1ZtgjTBwO+blA6gVOmZurpiMEsETKo= go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= -go.etcd.io/etcd/api/v3 v3.5.0 h1:GsV3S+OfZEOCNXdtNkBSR7kgLobAa/SO6tCxRa0GAYw= go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= -go.etcd.io/etcd/client/pkg/v3 v3.5.0 h1:2aQv6F436YnN7I4VbI8PPYrBhu+SmrTaADcf8Mi/6PU= +go.etcd.io/etcd/api/v3 v3.5.1 h1:v28cktvBq+7vGyJXF8G+rWJmj+1XUmMtqcLnH8hDocM= +go.etcd.io/etcd/api/v3 v3.5.1/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/pkg/v3 v3.5.1 h1:XIQcHCFSG53bJETYeRJtIxdLv2EWRGxcfzR8lSnTH4E= +go.etcd.io/etcd/client/pkg/v3 v3.5.1/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/v2 v2.305.1/go.mod h1:pMEacxZW7o8pg4CrFE7pquyCJJzZvkvdD2RibOCCCGs= go.etcd.io/etcd/client/v3 v3.5.0 h1:62Eh0XOro+rDwkrypAGDfgmNh5Joq+z+W9HZdlXMzek= go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0= go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1 h1:A/5uWzF44DlIgdm/PQFwfMkW0JX+cIcQi/SwLAmZP5M= go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= go.opencensus.io v0.15.0/go.mod h1:UffZAU+4sDEINUGP/B7UfBBkq4fqLu9zXAX7ke6CHW0= go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= @@ -1446,6 +1726,10 @@ go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opentelemetry.io/contrib v0.21.0/go.mod h1:EH4yDYeNoaTqn/8yCWQmfNB78VHfGX2Jt2bvnvzBlGM= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.21.0/go.mod h1:Vm5u/mtkj1OMhtao0v+BGo2LUoLCgHYXvRmj0jWITlE= go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.21.0/go.mod h1:a9cocRplhIBkUAJmak+BPDx+LVL7cTmqUPB0uBcTA4k= @@ -1460,6 +1744,7 @@ go.opentelemetry.io/otel/metric v0.21.0/go.mod h1:JWCt1bjivC4iCrz/aCrM1GSw+ZcvY4 go.opentelemetry.io/otel/oteltest v1.0.0-RC1/go.mod h1:+eoIG0gdEOaPNftuy1YScLr1Gb4mL/9lpDkZ0JjMRq4= go.opentelemetry.io/otel/sdk v1.0.0-RC1/go.mod h1:kj6yPn7Pgt5ByRuwesbaWcRLA+V7BSDg3Hf8xRvsvf8= go.opentelemetry.io/otel/trace v1.0.0-RC1/go.mod h1:86UHmyHWFEtWjfWPSbu0+d0Pf9Q6e1U+3ViBOc+NXAg= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.9.0/go.mod h1:1vKfU9rv61e9EVGthD1zNvUbiwPcimSsOPU9brfSHJg= go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 h1:+FNtrFTmVw0YZGpBGX56XDee331t6JAXeK2bcyhLOOc= go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0Hg7FvpRQsQh5OSqIylirxKC7o= @@ -1503,6 +1788,8 @@ golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190621222207-cc06ce4a13d4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -1516,8 +1803,12 @@ golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWP golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 h1:HWj/xjIHfjYU5nVXpTM0s39J9CbLn7Cc5a7IC5rwsMQ= +golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4 h1:kUhD7nTDoI3fVd9G4ORWrbV5NY0liEs/Jg2pv5f+bBA= +golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1544,6 +1835,7 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= @@ -1554,7 +1846,10 @@ golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1598,21 +1893,34 @@ golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210326060303-6b1517762897/go.mod h1:uSPa2vr4CLtc/ILN5odXGNXS6mhrKVzTaCXzk9m6W3k= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= -golang.org/x/net v0.0.0-20210510120150-4163338589ed h1:p9UgmWI9wKpfYmgaV/IZKGdXc5qEK45tDwwwDyjS26I= -golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210929193557-e81a3d93ecf6/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220114011407-0dd24b26b47d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180724155351-3d292e4d0cdc/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1622,8 +1930,20 @@ golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 h1:RerP+noqYHUQ8CMRcPlC2nvTa4dcBIjegkuWdcUDuqg= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1675,9 +1995,11 @@ golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191002063906-3421d5a6bb1c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1702,18 +2024,22 @@ golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200917073148-efd3b9a0ff20/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201013081832-0aaa2718063a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1721,11 +2047,16 @@ golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210313202042-bd2e13477e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1734,14 +2065,36 @@ golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210502180810-71e4cd670f79/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210820121016-41cdb8703e55/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211001092434-39dca1131b70/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220422013727-9388b58f7150 h1:xHms4gcpe1YE7A3yIllJXP16CMAGuqwO2lX1mTyyRRc= +golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d h1:SZxvLBoTP5yHO3Frd4z4vrF+DBX9vMVanchswa69toE= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1750,8 +2103,9 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1794,6 +2148,7 @@ golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= golang.org/x/tools v0.0.0-20190719005602-e377ae9d6386/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190910044552-dd2b5c81c578/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1834,13 +2189,27 @@ golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200502202811-ed308ab3e770/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20200916195026-c9a70fc28ce3/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1874,6 +2243,25 @@ google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/ google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.25.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= +google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= +google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/api v0.59.0/go.mod h1:sT2boj7M9YJxZzgeZqXogmhfmRWDtPzT31xkieUbuZU= +google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= +google.golang.org/api v0.62.0/go.mod h1:dKmwPCydfsad4qCH08MSdgWjfHOyfpd4VtDGgRFdavw= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1881,8 +2269,9 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= @@ -1916,11 +2305,48 @@ google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c h1:wtujag7C+4D6KMoulW9YauvK2lgdvCMS260jsqqBXr0= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= +google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211008145708-270636b82663/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211028162531-8db9c33dc351/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211129164237-f09f9a12af12/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211203200212-54befc351ae9/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa h1:I0YcKz0I7OAhddo7ya8kMnvprhcWM045PmkBdMO9zN0= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= @@ -1941,12 +2367,24 @@ google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8 google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.38.0 h1:/9BgsAsa5nWe26HqOlvlgJnqBuktYOLCgjCPqsa56W0= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.42.0 h1:XT2/MFpuPFsEX2fWh3YQtHkZ+WYZFQRfaUgLZYj/p6A= +google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1981,12 +2419,14 @@ gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ini.v1 v1.56.0 h1:DPMeDvGTM54DXbPkVIZsp19fp/I2K7zwA/itHYHKo8Y= gopkg.in/ini.v1 v1.56.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.66.2 h1:XfR1dOYubytKy4Shzc2LHrrGhU0lDCfDGG1yLPmpgsI= +gopkg.in/ini.v1 v1.66.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w= gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= @@ -2024,6 +2464,7 @@ honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.5/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= k8s.io/api v0.0.0-20180904230853-4e7be11eab3f/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA= k8s.io/api v0.17.4/go.mod h1:5qxx6vjmwUVG2nHQTKGlLts8Tbok8PzHl4vHtVFuZCA= @@ -2064,6 +2505,7 @@ k8s.io/client-go v0.21.0 h1:n0zzzJsAQmJngpC0IhgFcApZyoGXPrDIAD601HD09ag= k8s.io/client-go v0.21.0/go.mod h1:nNBytTF9qPFDEhoqgEPaarobC8QPae13bElIVHzIglA= k8s.io/cloud-provider v0.17.4/go.mod h1:XEjKDzfD+b9MTLXQFlDGkk6Ho8SGMpaU8Uugx/KNK9U= k8s.io/code-generator v0.17.2/go.mod h1:DVmfPQgxQENqDIzVR2ddLXMH34qeszkKSdH/N+s+38s= +k8s.io/code-generator v0.19.7/go.mod h1:lwEq3YnLYb/7uVXLorOJfxg+cUu2oihFhHZ0n9NIla0= k8s.io/code-generator v0.20.1/go.mod h1:UsqdF+VX4PU2g46NC2JRs4gc+IfrctnwHb76RNbWHJg= k8s.io/code-generator v0.21.0/go.mod h1:hUlps5+9QaTrKx+jiM4rmq7YmH8wPOIko64uZCHDh6Q= k8s.io/component-base v0.17.4/go.mod h1:5BRqHMbbQPm2kKu35v3G+CpVq4K0RJKC7TRioF0I9lE= @@ -2082,6 +2524,7 @@ k8s.io/csi-translation-lib v0.17.4/go.mod h1:CsxmjwxEI0tTNMzffIAcgR9lX4wOh6AKHdx k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= @@ -2111,8 +2554,9 @@ k8s.io/metrics v0.21.0/go.mod h1:L3Ji9EGPP1YBbfm9sPfEXSpnj8i24bfQbAFAsW0NueQ= k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20210111153108-fddb29f9d009 h1:0T5IaWHO3sJTEmCP6mUlBvMukxPKUQWqiI/YuiBNMiQ= k8s.io/utils v0.0.0-20210111153108-fddb29f9d009/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b h1:wxEMGetGMur3J1xuGLQY7GEQYg9bZxKn3tKo5k/eYcs= +k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= diff --git a/hack/build.sh b/hack/build.sh index 4c9bd5e51f9..105958b2c45 100755 --- a/hack/build.sh +++ b/hack/build.sh @@ -27,6 +27,14 @@ SEALER_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd -P)" export THIS_PLATFORM_BIN="${SEALER_ROOT}/_output/bin" export THIS_PLATFORM_ASSETS="${SEALER_ROOT}/_output/assets" +# fix containers dependency issue +# https://github.com/containers/image/pull/271/files +GO_BUILD_FLAGS="" + +if [ "$(uname)" == "Darwin" ]; then + GO_BUILD_FLAGS="-tags containers_image_openpgp" +fi + debug() { timestamp=$(date +"[%m%d %H:%M:%S]") echo "[debug] ${timestamp} ${1-}" >&2 @@ -99,7 +107,7 @@ build_binaries() { tarFile="${GIT_VERSION}-${1-}-${2-}.tar.gz" debug "!!! build $osarch sealer" - GOOS=${1-} GOARCH=${2-} go build -o $THIS_PLATFORM_BIN/sealer/$osarch/sealer -mod vendor -ldflags "$goldflags" $SEALER_ROOT/cmd/sealer/main.go + GOOS=${1-} GOARCH=${2-} go build $GO_BUILD_FLAGS -o $THIS_PLATFORM_BIN/sealer/$osarch/sealer -mod vendor -ldflags "$goldflags" $SEALER_ROOT/cmd/sealer/main.go check $? "build $osarch sealer" debug "output bin: $THIS_PLATFORM_BIN/sealer/$osarch/sealer" cd ${SEALER_ROOT}/_output/bin/sealer/$osarch/ diff --git a/pkg/auth/auth.go b/pkg/auth/auth.go new file mode 100644 index 00000000000..8940e93d15c --- /dev/null +++ b/pkg/auth/auth.go @@ -0,0 +1,29 @@ +// Copyright © 2022 Alibaba Group Holding Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package auth + +import ( + "path/filepath" + + "github.com/mitchellh/go-homedir" +) + +func GetDefaultAuthFilePath() string { + dir, err := homedir.Dir() + if err != nil { + dir = "/root" + } + return filepath.Join(dir, ".sealer/auth.json") +} diff --git a/pkg/filesystem/clusterimage/clusterimage.go b/pkg/filesystem/clusterimage/clusterimage.go index 429188bb9d8..556daf2ed9b 100644 --- a/pkg/filesystem/clusterimage/clusterimage.go +++ b/pkg/filesystem/clusterimage/clusterimage.go @@ -18,17 +18,19 @@ import ( "fmt" "io/ioutil" "net" + "os" "path/filepath" "time" + "github.com/sealerio/sealer/pkg/imageengine" + common2 "github.com/sealerio/sealer/pkg/imageengine/common" + "github.com/sealerio/sealer/utils/os/fs" osi "github.com/sealerio/sealer/utils/os" "github.com/sealerio/sealer/common" "github.com/sealerio/sealer/pkg/env" - "github.com/sealerio/sealer/pkg/image" - "github.com/sealerio/sealer/pkg/image/store" v2 "github.com/sealerio/sealer/types/api/v2" "github.com/sealerio/sealer/utils" "github.com/sealerio/sealer/utils/mount" @@ -42,8 +44,8 @@ type Interface interface { } type mounter struct { - imageStore store.ImageStore - fs fs.Interface + imageEngine imageengine.Interface + fs fs.Interface } func (m *mounter) MountImage(cluster *v2.Cluster) error { @@ -84,17 +86,18 @@ func (m *mounter) mountImage(cluster *v2.Cluster) error { var ( mountDirs = make(map[string]bool) driver = mount.NewMountDriver() + image = cluster.Spec.Image err error ) clusterPlatform, err := ssh.GetClusterPlatform(cluster) if err != nil { return err } + clusterPlatform["local"] = *platform.GetDefaultPlatform() for _, v := range clusterPlatform { pfm := v mountDir := platform.GetMountClusterImagePlatformDir(cluster.Name, pfm) - upperDir := filepath.Join(mountDir, "upper") if mountDirs[mountDir] { continue } @@ -110,21 +113,22 @@ func (m *mounter) mountImage(cluster *v2.Cluster) error { return fmt.Errorf("failed to clean %s: %v", mountDir, err) } } - Image, err := m.imageStore.GetByName(cluster.Spec.Image, &pfm) + + err = os.MkdirAll(filepath.Dir(mountDir), 0750) if err != nil { return err } - layers, err := image.GetImageLayerDirs(Image) + + // build oci image rootfs under graphroot + // and the rootfs will be linked to sealer rootfs. + _, err = m.imageEngine.BuildRootfs(&common2.BuildRootfsOptions{ + ImageNameOrID: image, + DestDir: mountDir, + }) if err != nil { - return fmt.Errorf("failed to get layers: %v", err) + return err } - if err = m.fs.MkdirAll(upperDir); err != nil { - return fmt.Errorf("failed to create upperdir: %s", err) - } - if err = driver.Mount(mountDir, upperDir, layers...); err != nil { - return fmt.Errorf("failed to mount files: %v", err) - } // use env list to render image mount dir: etc,charts,manifests. err = renderENV(mountDir, cluster.GetAllIPList(), env.NewEnvProcessor(cluster)) if err != nil { @@ -153,9 +157,9 @@ func renderENV(imageMountDir string, ipList []net.IP, p env.Interface) error { return nil } -func NewClusterImageMounter(is store.ImageStore) (Interface, error) { +func NewClusterImageMounter(ie imageengine.Interface) (Interface, error) { return &mounter{ - imageStore: is, - fs: fs.NewFilesystem(), + imageEngine: ie, + fs: fs.NewFilesystem(), }, nil } diff --git a/pkg/filesystem/filesystem.go b/pkg/filesystem/filesystem.go index e9510b791a4..f679b96e823 100644 --- a/pkg/filesystem/filesystem.go +++ b/pkg/filesystem/filesystem.go @@ -17,19 +17,16 @@ package filesystem import ( "fmt" + "github.com/sealerio/sealer/pkg/imageengine" + "github.com/sealerio/sealer/pkg/filesystem/cloudfilesystem" "github.com/sealerio/sealer/pkg/filesystem/clusterimage" - "github.com/sealerio/sealer/pkg/image/store" "github.com/sealerio/sealer/pkg/runtime" ) -// NewCloudClusterMounter :mount and unmount ClusterImage. -func NewClusterImageMounter() (clusterimage.Interface, error) { - is, err := store.NewDefaultImageStore() - if err != nil { - return nil, err - } - return clusterimage.NewClusterImageMounter(is) +// NewClusterImageMounter mount and unmount ClusterImage. +func NewClusterImageMounter(engine imageengine.Interface) (clusterimage.Interface, error) { + return clusterimage.NewClusterImageMounter(engine) } // NewFilesystem :according to the Metadata file content to determine what kind of Filesystem will be load. diff --git a/pkg/guest/guest.go b/pkg/guest/guest.go index 050016d885a..82e2f9da059 100644 --- a/pkg/guest/guest.go +++ b/pkg/guest/guest.go @@ -17,16 +17,16 @@ package guest import ( "fmt" - "github.com/sealerio/sealer/utils/maps" + "github.com/sealerio/sealer/pkg/imageengine" + common2 "github.com/sealerio/sealer/pkg/imageengine/common" + "github.com/sealerio/sealer/utils/strings" "github.com/moby/buildkit/frontend/dockerfile/shell" "github.com/sealerio/sealer/common" - "github.com/sealerio/sealer/pkg/image/store" v1 "github.com/sealerio/sealer/types/api/v1" v2 "github.com/sealerio/sealer/types/api/v2" - "github.com/sealerio/sealer/utils/platform" "github.com/sealerio/sealer/utils/ssh" ) @@ -36,30 +36,31 @@ type Interface interface { } type Default struct { - imageStore store.ImageStore + ImageEngine imageengine.Interface } func NewGuestManager() (Interface, error) { - is, err := store.NewDefaultImageStore() + ie, err := imageengine.NewImageEngine(common2.EngineGlobalConfigurations{}) if err != nil { return nil, err } - return &Default{imageStore: is}, nil + return &Default{ImageEngine: ie}, nil } func (d *Default) Apply(cluster *v2.Cluster) error { var ( clusterRootfs = common.DefaultTheClusterRootfsDir(cluster.Name) ex = shell.NewLex('\\') + image = cluster.Spec.Image ) - image, err := d.imageStore.GetByName(cluster.Spec.Image, platform.GetDefaultPlatform()) + extension, err := d.ImageEngine.GetSealerImageExtension(&common2.GetImageAnnoOptions{ImageNameOrID: image}) if err != nil { return fmt.Errorf("failed to get ClusterImage: %s", err) } - cmdArgs := d.getGuestCmdArg(cluster, image) - cmd := d.getGuestCmd(cluster, image) + cmdArgs := d.getGuestCmdArg(cluster.Spec.CMDArgs, extension) + cmd := d.getGuestCmd(cluster.Spec.CMD, extension) sshClient, err := ssh.NewStdoutSSHClient(cluster.GetMaster0IP(), cluster) if err != nil { return err @@ -82,11 +83,11 @@ func (d *Default) Apply(cluster *v2.Cluster) error { return nil } -func (d *Default) getGuestCmd(cluster *v2.Cluster, image *v1.Image) []string { +func (d *Default) getGuestCmd(CmdFromClusterFile []string, extension v1.ImageExtension) []string { var ( - cmd = image.Spec.ImageConfig.Cmd.Parent - clusterCmd = cluster.Spec.CMD - imageType = image.Spec.ImageConfig.ImageType + cmd = extension.CmdSet + clusterCmd = CmdFromClusterFile + imageType = extension.ImageType ) // application image: if cluster cmd not nil, use cluster cmd directly @@ -94,29 +95,29 @@ func (d *Default) getGuestCmd(cluster *v2.Cluster, image *v1.Image) []string { if len(clusterCmd) != 0 { return clusterCmd } - return image.Spec.ImageConfig.Cmd.Current + return clusterCmd } // normal image: if cluster cmd not nil, use cluster cmd as current cmd if len(clusterCmd) != 0 { return strings.Merge(cmd, clusterCmd) } - return strings.Merge(cmd, image.Spec.ImageConfig.Cmd.Current) + return strings.Merge(cmd, clusterCmd) } -func (d *Default) getGuestCmdArg(cluster *v2.Cluster, image *v1.Image) map[string]string { +func (d *Default) getGuestCmdArg(clusterCmdsArgs []string, extension v1.ImageExtension) map[string]string { var ( base map[string]string - clusterArgs = cluster.Spec.CMDArgs - imageType = image.Spec.ImageConfig.ImageType + clusterArgs = clusterCmdsArgs + //imageType = extension.ImageType ) - if imageType == common.AppImage { - base = image.Spec.ImageConfig.Args.Current - } else { - base = maps.Merge(image.Spec.ImageConfig.Args.Parent, image.Spec.ImageConfig.Args.Current) - } - + //if imageType == common.AppImage { + // base = extension.ArgSet + //} else { + // base = maps.Merge(image.Spec.ImageConfig.Args.Parent, image.Spec.ImageConfig.Args.Current) + //} + base = extension.ArgSet for k, v := range strings.ConvertToMap(clusterArgs) { base[k] = v } diff --git a/pkg/image/distributionutil/login.go b/pkg/image/distributionutil/login.go index 2b01c7bd53f..273fe21878b 100644 --- a/pkg/image/distributionutil/login.go +++ b/pkg/image/distributionutil/login.go @@ -84,7 +84,7 @@ func Login(ctx context.Context, authConfig *types.AuthConfig) error { } func authHTTPClient(endpoint *url.URL, authTransport http.RoundTripper, modifiers []transport.RequestModifier, creds auth.CredentialStore, scopes []auth.Scope) (*http.Client, error) { - challengeManager, err := dockerRegistry.PingV2Registry(endpoint, authTransport) + challengeManager, _, err := dockerRegistry.PingV2Registry(endpoint, authTransport) if err != nil { return nil, err } diff --git a/pkg/image/distributionutil/repository.go b/pkg/image/distributionutil/repository.go index 75b60f27013..835ab5fc387 100644 --- a/pkg/image/distributionutil/repository.go +++ b/pkg/image/distributionutil/repository.go @@ -74,7 +74,7 @@ func NewRepository(ctx context.Context, authConfig types.AuthConfig, repoName st modifiers := dockerRegistry.Headers(dockerversion.DockerUserAgent(ctx), nil) authTransport := dockerTransport.NewTransport(base, modifiers...) - challengeManager, err := dockerRegistry.PingV2Registry(rurl, authTransport) + challengeManager, _, err := dockerRegistry.PingV2Registry(rurl, authTransport) if err != nil { return nil, err } diff --git a/pkg/image/distributionutil/utils.go b/pkg/image/distributionutil/utils.go index ec901f29b81..39b06f91b82 100644 --- a/pkg/image/distributionutil/utils.go +++ b/pkg/image/distributionutil/utils.go @@ -31,7 +31,6 @@ func PlatformSpecFromOCI(p *v1.Platform) *manifestlist.PlatformSpec { return &manifestlist.PlatformSpec{ Architecture: p.Architecture, OS: p.OS, - OSVersion: p.OSVersion, Variant: p.Variant, } } diff --git a/pkg/imageengine/buildah/build.go b/pkg/imageengine/buildah/build.go new file mode 100644 index 00000000000..8e5c1a55c4b --- /dev/null +++ b/pkg/imageengine/buildah/build.go @@ -0,0 +1,370 @@ +// Copyright © 2022 Alibaba Group Holding Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package buildah + +import ( + "context" + "fmt" + + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" + + "github.com/containers/buildah/define" + "github.com/containers/buildah/imagebuildah" + buildahcli "github.com/containers/buildah/pkg/cli" + "github.com/containers/buildah/pkg/parse" + buildahutil "github.com/containers/buildah/pkg/util" + "github.com/containers/buildah/util" + "github.com/containers/common/pkg/auth" + "github.com/pkg/errors" + "github.com/sealerio/sealer/pkg/imageengine/common" + "github.com/sirupsen/logrus" +) + +type buildFlagsWrapper struct { + *buildahcli.BudResults + *buildahcli.LayerResults + *buildahcli.FromAndBudResults + *buildahcli.NameSpaceResults + *buildahcli.UserNSResults +} + +func (engine *Engine) Build(opts *common.BuildOptions) (string, error) { + // The following block is to init buildah default options. + // And call migrateFlags2BuildahBuild to set flags based on sealer build options. + wrapper := &buildFlagsWrapper{ + BudResults: &buildahcli.BudResults{}, + LayerResults: &buildahcli.LayerResults{}, + FromAndBudResults: &buildahcli.FromAndBudResults{}, + NameSpaceResults: &buildahcli.NameSpaceResults{}, + UserNSResults: &buildahcli.UserNSResults{}, + } + + flags := engine.Flags() + buildFlags := buildahcli.GetBudFlags(wrapper.BudResults) + buildFlags.StringVar(&wrapper.Runtime, "runtime", util.Runtime(), "`path` to an alternate runtime. Use BUILDAH_RUNTIME environment variable to override.") + + layerFlags := buildahcli.GetLayerFlags(wrapper.LayerResults) + fromAndBudFlags, err := buildahcli.GetFromAndBudFlags(wrapper.FromAndBudResults, wrapper.UserNSResults, wrapper.NameSpaceResults) + if err != nil { + return "", fmt.Errorf("failed to setup From and Build flags: %v", err) + } + + flags.AddFlagSet(&buildFlags) + flags.AddFlagSet(&layerFlags) + flags.AddFlagSet(&fromAndBudFlags) + flags.SetNormalizeFunc(buildahcli.AliasFlags) + + err = engine.migrateFlags2Wrapper(opts, wrapper) + if err != nil { + return "", err + } + + options, kubefiles, err := engine.wrapper2Options(opts, wrapper) + if err != nil { + return "", err + } + + return engine.build(getContext(), kubefiles, options) +} + +func (engine *Engine) wrapper2Options(opts *common.BuildOptions, wrapper *buildFlagsWrapper) (define.BuildOptions, []string, error) { + output := "" + cleanTmpFile := false + tags := []string{} + if engine.Flag("tag").Changed { + tags = wrapper.Tag + if len(tags) > 0 { + output = tags[0] + tags = tags[1:] + } + if engine.Flag("manifest").Changed { + for _, tag := range tags { + if tag == wrapper.Manifest { + return define.BuildOptions{}, []string{}, errors.New("the same name must not be specified for both '--tag' and '--manifest'") + } + } + } + } + + if err := auth.CheckAuthFile(wrapper.Authfile); err != nil { + return define.BuildOptions{}, []string{}, err + } + wrapper.Authfile, cleanTmpFile = + buildahutil.MirrorToTempFileIfPathIsDescriptor(wrapper.Authfile) + if cleanTmpFile { + defer os.Remove(wrapper.Authfile) + } + + // Allow for --pull, --pull=true, --pull=false, --pull=never, --pull=always + // --pull-always and --pull-never. The --pull-never and --pull-always options + // will not be documented. + pullPolicy := define.PullIfMissing + if strings.EqualFold(strings.TrimSpace(wrapper.Pull), "true") { + pullPolicy = define.PullIfNewer + } + if wrapper.PullAlways || strings.EqualFold(strings.TrimSpace(wrapper.Pull), "always") { + pullPolicy = define.PullAlways + } + if wrapper.PullNever || strings.EqualFold(strings.TrimSpace(wrapper.Pull), "never") { + pullPolicy = define.PullNever + } + logrus.Debugf("Pull Policy for pull [%v]", pullPolicy) + + format, err := getFormat(wrapper.Format) + if err != nil { + return define.BuildOptions{}, []string{}, err + } + + layers := buildahcli.UseLayers() + if engine.Flag("layers").Changed { + layers = wrapper.Layers + } + + contextDir := opts.ContextDir + + // Nothing provided, we assume the current working directory as build + // context + if len(contextDir) == 0 { + contextDir, err = os.Getwd() + if err != nil { + return define.BuildOptions{}, []string{}, errors.Wrapf(err, "unable to choose current working directory as build context") + } + } else { + // It was local. Use it as is. + contextDir, err = filepath.Abs(contextDir) + if err != nil { + return define.BuildOptions{}, []string{}, errors.Wrapf(err, "error determining path to directory") + } + } + + kubefiles := getKubefiles(wrapper.File) + if len(kubefiles) == 0 { + kubefile, err := DiscoverKubefile(contextDir) + if err != nil { + return define.BuildOptions{}, []string{}, err + } + kubefiles = append(kubefiles, kubefile) + } + + contextDir, err = filepath.EvalSymlinks(contextDir) + if err != nil { + return define.BuildOptions{}, []string{}, errors.Wrapf(err, "error evaluating symlinks in build context path") + } + + var stdin io.Reader + if wrapper.Stdin { + stdin = os.Stdin + } + var stdout, stderr, reporter = os.Stdout, os.Stderr, os.Stderr + if engine.Flag("logfile").Changed { + f, err := os.OpenFile(wrapper.Logfile, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0600) + if err != nil { + return define.BuildOptions{}, []string{}, errors.Errorf("error opening logfile %q: %v", wrapper.Logfile, err) + } + defer func() { + // this will incur GoSec warning + _ = f.Close() + }() + logrus.SetOutput(f) + stdout = f + stderr = f + reporter = f + } + + systemContext, err := parse.SystemContextFromOptions(engine.Command) + if err != nil { + return define.BuildOptions{}, []string{}, errors.Wrapf(err, "error building system context") + } + + isolation, err := defaultIsolationOption() + if err != nil { + return define.BuildOptions{}, []string{}, err + } + + runtimeFlags := []string{} + for _, arg := range wrapper.RuntimeFlags { + runtimeFlags = append(runtimeFlags, "--"+arg) + } + + commonOpts, err := parse.CommonBuildOptions(engine.Command) + if err != nil { + return define.BuildOptions{}, []string{}, err + } + + namespaceOptions, networkPolicy := defaultNamespaceOptions() + + usernsOption, idmappingOptions, err := parse.IDMappingOptions(engine.Command, isolation) + if err != nil { + return define.BuildOptions{}, []string{}, errors.Wrapf(err, "error parsing ID mapping options") + } + namespaceOptions.AddOrReplace(usernsOption...) + + platforms, err := parse.PlatformsFromOptions(engine.Command) + if err != nil { + return define.BuildOptions{}, []string{}, err + } + + var excludes []string + if wrapper.IgnoreFile != "" { + if excludes, _, err = parse.ContainerIgnoreFile(contextDir, wrapper.IgnoreFile); err != nil { + return define.BuildOptions{}, []string{}, err + } + } + + var timestamp *time.Time + if engine.Command.Flag("timestamp").Changed { + t := time.Unix(wrapper.Timestamp, 0).UTC() + timestamp = &t + } + + compression := define.Gzip + if wrapper.DisableCompression { + compression = define.Uncompressed + } + + options := define.BuildOptions{ + AddCapabilities: wrapper.CapAdd, + AdditionalTags: tags, + AllPlatforms: wrapper.AllPlatforms, + Annotations: wrapper.Annotation, + Architecture: systemContext.ArchitectureChoice, + //Args: args, + BlobDirectory: wrapper.BlobCache, + CNIConfigDir: wrapper.CNIConfigDir, + CNIPluginPath: wrapper.CNIPlugInPath, + CommonBuildOpts: commonOpts, + Compression: compression, + ConfigureNetwork: networkPolicy, + ContextDirectory: contextDir, + DefaultMountsFilePath: "", + Devices: wrapper.Devices, + DropCapabilities: wrapper.CapDrop, + Err: stderr, + ForceRmIntermediateCtrs: wrapper.ForceRm, + From: wrapper.From, + IDMappingOptions: idmappingOptions, + IIDFile: wrapper.Iidfile, + In: stdin, + Isolation: isolation, + IgnoreFile: wrapper.IgnoreFile, + Labels: wrapper.Label, + Layers: layers, + LogRusage: wrapper.LogRusage, + Manifest: wrapper.Manifest, + MaxPullPushRetries: maxPullPushRetries, + NamespaceOptions: namespaceOptions, + NoCache: wrapper.NoCache, + OS: systemContext.OSChoice, + Out: stdout, + Output: output, + OutputFormat: format, + PullPolicy: pullPolicy, + PullPushRetryDelay: pullPushRetryDelay, + Quiet: wrapper.Quiet, + RemoveIntermediateCtrs: wrapper.Rm, + ReportWriter: reporter, + Runtime: wrapper.Runtime, + RuntimeArgs: runtimeFlags, + RusageLogFile: wrapper.RusageLogFile, + SignBy: wrapper.SignBy, + SignaturePolicyPath: wrapper.SignaturePolicy, + Squash: wrapper.Squash, + SystemContext: systemContext, + Target: wrapper.Target, + TransientMounts: wrapper.Volumes, + Jobs: &wrapper.Jobs, + Excludes: excludes, + Timestamp: timestamp, + Platforms: platforms, + UnsetEnvs: wrapper.UnsetEnvs, + } + + if wrapper.Quiet { + options.ReportWriter = ioutil.Discard + } + + return options, kubefiles, nil +} + +func (engine *Engine) build(cxt context.Context, kubefiles []string, options define.BuildOptions) (id string, err error) { + id, ref, err := imagebuildah.BuildDockerfiles(cxt, engine.ImageStore(), options, kubefiles...) + if err == nil && options.Manifest != "" { + logrus.Debugf("manifest list id = %q, ref = %q", id, ref.String()) + } + + return id, nil +} + +func getKubefiles(files []string) []string { + var kubefiles []string + for _, f := range files { + if f == "-" { + kubefiles = append(kubefiles, "/dev/stdin") + } else { + kubefiles = append(kubefiles, f) + } + } + return kubefiles +} + +// this function aims to set buildah configuration based on sealer imageengine flags. +func (engine *Engine) migrateFlags2Wrapper(opts *common.BuildOptions, wrapper *buildFlagsWrapper) error { + flags := engine.Flags() + // imageengine cache related flags + // cache is enabled when "layers" is true & "no-cache" is false + err := flags.Set("layers", "true") + if err != nil { + return err + } + wrapper.Layers = !opts.NoCache + wrapper.NoCache = opts.NoCache + // tags. Like -t kubernetes:v1.16 + err = flags.Set("tag", strings.Join(opts.Tags, ",")) + wrapper.Tag = opts.Tags + if err != nil { + return err + } + // Hardcoded for network configuration. + // check parse.NamespaceOptions for detailed logic. + // this network setup for stage container, especially for RUN wget and so on. + // so I think we can set as host network. + err = flags.Set("network", "host") + if err != nil { + return err + } + + // set platform to the flags in buildah + // check the detail in parse.PlatformsFromOptions + err = flags.Set("platform", opts.Platform) + if err != nil { + return err + } + + wrapper.Authfile = opts.Authfile + // do not pack kubefile into image. + wrapper.IgnoreFile = opts.Kubefile + wrapper.File = []string{opts.Kubefile} + + wrapper.Pull = opts.PullPolicy + + wrapper.Label = append(wrapper.Label, opts.Labels...) + wrapper.Annotation = append(wrapper.Annotation, opts.Annotations...) + return nil +} diff --git a/pkg/imageengine/buildah/build_rootfs.go b/pkg/imageengine/buildah/build_rootfs.go new file mode 100644 index 00000000000..55d340f6c80 --- /dev/null +++ b/pkg/imageengine/buildah/build_rootfs.go @@ -0,0 +1,50 @@ +// Copyright © 2022 Alibaba Group Holding Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package buildah + +import ( + "github.com/sealerio/sealer/pkg/imageengine/common" + + "os" +) + +// BuildRootfs will make an image rootfs under /var/lib/containers/storage +// And then link it to the DestDir +func (engine *Engine) BuildRootfs(opts *common.BuildRootfsOptions) (containerID string, err error) { + // TODO clean environment when it fails + cid, err := engine.CreateContainer(&common.FromOptions{ + Image: opts.ImageNameOrID, + Quiet: false, + }) + if err != nil { + return "", err + } + + mounts, err := engine.Mount(&common.MountOptions{Containers: []string{cid}}) + if err != nil { + return "", err + } + + // remove destination dir if it exists, otherwise the Symlink will fail. + if _, err = os.Stat(opts.DestDir); err == nil { + err = os.RemoveAll(opts.DestDir) + if err != nil { + return "", err + } + } + + mountPoint := mounts[0].MountPoint + return cid, os.Symlink(mountPoint, opts.DestDir) +} diff --git a/pkg/imageengine/buildah/commit.go b/pkg/imageengine/buildah/commit.go new file mode 100644 index 00000000000..2c7679cecf0 --- /dev/null +++ b/pkg/imageengine/buildah/commit.go @@ -0,0 +1,123 @@ +// Copyright © 2022 Alibaba Group Holding Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package buildah + +import ( + "os" + "time" + + "github.com/containers/buildah" + "github.com/containers/buildah/define" + "github.com/containers/buildah/pkg/parse" + "github.com/containers/buildah/util" + "github.com/containers/image/v5/pkg/shortnames" + storageTransport "github.com/containers/image/v5/storage" + "github.com/containers/image/v5/transports/alltransports" + "github.com/containers/image/v5/types" + "github.com/pkg/errors" + "github.com/sealerio/sealer/pkg/imageengine/common" + "github.com/sirupsen/logrus" +) + +func (engine *Engine) Commit(opts *common.CommitOptions) error { + var dest types.ImageReference + if len(opts.ContainerID) == 0 { + return errors.Errorf("container ID must be specified") + } + if len(opts.Image) == 0 { + return errors.Errorf("image name should be specifiled") + } + + name := opts.ContainerID + image := opts.Image + compress := define.Gzip + if opts.DisableCompression { + compress = define.Uncompressed + } + + format, err := getFormat(opts.Format) + if err != nil { + return err + } + + ctx := getContext() + store := engine.ImageStore() + builder, err := openBuilder(ctx, store, name) + if err != nil { + return errors.Wrapf(err, "error reading build container %q", name) + } + + systemContext, err := parse.SystemContextFromOptions(engine.Command) + if err != nil { + return errors.Wrapf(err, "error building system context") + } + + // If the user specified an image, we may need to massage it a bit if + // no transport is specified. + // TODO we support commit to local image only, we'd better limit the input of name + if image != "" { + if dest, err = alltransports.ParseImageName(image); err != nil { + candidates, err := shortnames.ResolveLocally(systemContext, image) + if err != nil { + return err + } + if len(candidates) == 0 { + return errors.Errorf("error parsing target image name %q", image) + } + dest2, err2 := storageTransport.Transport.ParseStoreReference(store, candidates[0].String()) + if err2 != nil { + return errors.Wrapf(err, "error parsing target image name %q", image) + } + dest = dest2 + } + } + + // Add builder identity information. + builder.SetLabel(buildah.BuilderIdentityAnnotation, define.Version) + + options := buildah.CommitOptions{ + PreferredManifestType: format, + Manifest: opts.Manifest, + Compression: compress, + SystemContext: systemContext, + Squash: opts.Squash, + } + if opts.Timestamp != 0 { + timestamp := time.Unix(opts.Timestamp, 0).UTC() + options.HistoryTimestamp = ×tamp + } + + if !opts.Quiet { + options.ReportWriter = os.Stderr + } + id, ref, _, err := builder.Commit(ctx, dest, options) + if err != nil { + return util.GetFailureCause(err, errors.Wrapf(err, "error committing container %q to %q", builder.Container, image)) + } + if ref != nil && id != "" { + logrus.Debugf("wrote image %s with ID %s", ref, id) + } else if ref != nil { + logrus.Debugf("wrote image %s", ref) + } else if id != "" { + logrus.Debugf("wrote image with ID %s", id) + } else { + logrus.Debugf("wrote image") + } + + if opts.Rm { + return builder.Delete() + } + return nil +} diff --git a/pkg/imageengine/buildah/common.go b/pkg/imageengine/buildah/common.go new file mode 100644 index 00000000000..1e1ddae4e6d --- /dev/null +++ b/pkg/imageengine/buildah/common.go @@ -0,0 +1,208 @@ +// Copyright © 2022 Alibaba Group Holding Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package buildah + +import ( + "context" + + "github.com/containers/buildah" + + "os" + "time" + + "github.com/containers/buildah/define" + "github.com/containers/buildah/pkg/parse" + "github.com/containers/common/pkg/umask" + is "github.com/containers/image/v5/storage" + "github.com/containers/image/v5/types" + "github.com/containers/storage" + "github.com/containers/storage/pkg/unshare" + "github.com/pkg/errors" + "github.com/sealerio/sealer/pkg/imageengine/common" +) + +// +//var ( +// // configuration, including customizations made in containers.conf +// needToShutdownStore = false +//) + +const ( + maxPullPushRetries = 3 + pullPushRetryDelay = 2 * time.Second +) + +// TODO find a package to place these flags +const ( + OCIManifestDir = "oci-dir" + OCIArchive = "oci-archive" + V2s2ManifestDir = "docker-dir" + V2s2Archive = "docker-archive" +) + +func getStore(configurations *common.EngineGlobalConfigurations) (storage.Store, error) { + options, err := storage.DefaultStoreOptions(unshare.IsRootless(), unshare.GetRootlessUID()) + if err != nil { + return nil, err + } + + if configurations != nil { + if len(configurations.GraphRoot) > 0 { + options.GraphRoot = configurations.GraphRoot + } + if len(configurations.RunRoot) > 0 { + options.RunRoot = configurations.RunRoot + } + } + + // TODO support more flags in the future + + // + //if err := setXDGRuntimeDir(); err != nil { + // return nil, err + //} + // + //if c.Flag("storage-driver").Changed { + // options.GraphDriverName = globalFlagResults.StorageDriver + // // If any options setup in config, these should be dropped if user overrode the driver + // options.GraphDriverOptions = []string{} + //} + //if c.Flag("storage-opt").Changed { + // if len(globalFlagResults.StorageOpts) > 0 { + // options.GraphDriverOptions = globalFlagResults.StorageOpts + // } + //} + + // Do not allow to mount a graphdriver that is not vfs if we are creating the userns as part + // of the mount command. + // Differently, allow the mount if we are already in a userns, as the mount point will still + // be accessible once "buildah mount" exits. + if os.Geteuid() != 0 && options.GraphDriverName != "vfs" { + return nil, errors.Errorf("cannot mount using driver %s in rootless mode. You need to run it in a `buildah unshare` session", options.GraphDriverName) + } + + // TODO support more flags in the future + //if len(globalFlagResults.UserNSUID) > 0 { + // uopts := globalFlagResults.UserNSUID + // gopts := globalFlagResults.UserNSGID + // + // if len(gopts) == 0 { + // gopts = uopts + // } + // + // uidmap, gidmap, err := unshare.ParseIDMappings(uopts, gopts) + // if err != nil { + // return nil, err + // } + // options.UIDMap = uidmap + // options.GIDMap = gidmap + //} else { + // if len(globalFlagResults.UserNSGID) > 0 { + // return nil, errors.New("option --userns-gid-map can not be used without --userns-uid-map") + // } + //} + // + //// If a subcommand has the flags, check if they are set; if so, override the global values + //if c.Flags().Lookup("userns-uid-map").Changed { + // uopts, _ := c.Flags().GetStringSlice("userns-uid-map") + // gopts, _ := c.Flags().GetStringSlice("userns-gid-map") + // if len(gopts) == 0 { + // gopts = uopts + // } + // uidmap, gidmap, err := unshare.ParseIDMappings(uopts, gopts) + // if err != nil { + // return nil, err + // } + // options.UIDMap = uidmap + // options.GIDMap = gidmap + //} else { + // if c.Flags().Lookup("userns-gid-map").Changed { + // return nil, errors.New("option --userns-gid-map can not be used without --userns-uid-map") + // } + //} + umask.Check() + + store, err := storage.GetStore(options) + if store != nil { + is.Transport.SetStore(store) + } + //needToShutdownStore = true + return store, err +} + +func openBuilder(ctx context.Context, store storage.Store, name string) (builder *buildah.Builder, err error) { + if name != "" { + builder, err = buildah.OpenBuilder(store, name) + if os.IsNotExist(errors.Cause(err)) { + options := buildah.ImportOptions{ + Container: name, + } + builder, err = buildah.ImportBuilder(ctx, store, options) + } + } + if err != nil { + return nil, err + } + if builder == nil { + return nil, errors.Errorf("error finding build container") + } + return builder, nil +} + +func openImage(ctx context.Context, sc *types.SystemContext, store storage.Store, name string) (builder *buildah.Builder, err error) { + options := buildah.ImportFromImageOptions{ + Image: name, + SystemContext: sc, + } + builder, err = buildah.ImportBuilderFromImage(ctx, store, options) + if err != nil { + return nil, err + } + if builder == nil { + return nil, errors.Errorf("error mocking up build configuration") + } + return builder, nil +} + +// getContext returns a context.TODO +func getContext() context.Context { + return context.TODO() +} + +func getFormat(format string) (string, error) { + switch format { + case define.OCI: + return define.OCIv1ImageManifest, nil + case define.DOCKER: + return define.Dockerv2ImageManifest, nil + default: + return "", errors.Errorf("unrecognized image type %q", format) + } +} + +func defaultIsolationOption() (define.Isolation, error) { + return parse.IsolationOption("") +} + +func defaultNamespaceOptions() (namespaceOptions define.NamespaceOptions, networkPolicy define.NetworkConfigurationPolicy) { + options := make(define.NamespaceOptions, 0, 7) + policy := define.NetworkEnabled + options.AddOrReplace(define.NamespaceOption{ + Name: "network", + Host: true, + }) + + return options, policy +} diff --git a/pkg/imageengine/buildah/copy.go b/pkg/imageengine/buildah/copy.go new file mode 100644 index 00000000000..f24609a2d53 --- /dev/null +++ b/pkg/imageengine/buildah/copy.go @@ -0,0 +1,95 @@ +// Copyright © 2022 Alibaba Group Holding Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package buildah + +import ( + "fmt" + + "github.com/containers/buildah" + + "time" + + buildahcli "github.com/containers/buildah/pkg/cli" + "github.com/containers/buildah/pkg/parse" + "github.com/pkg/errors" + "github.com/sealerio/sealer/pkg/imageengine/common" +) + +func (engine *Engine) Copy(opts *common.CopyOptions) error { + if len(opts.Container) == 0 { + return errors.Errorf("container ID must be specified") + } + if len(opts.SourcesRel2CxtDir) == 0 { + return errors.Errorf("src must be specified") + } + if len(opts.DestinationInContainer) == 0 { + return errors.Errorf("destination in container must be specifuled") + } + + name := opts.Container + dest := opts.DestinationInContainer + store := engine.ImageStore() + + var idMappingOptions *buildah.IDMappingOptions + contextdir := opts.ContextDir + if opts.IgnoreFile != "" && contextdir == "" { + return errors.Errorf("--ignorefile option requires that you specify a context dir using --contextdir") + } + + builder, err := openBuilder(getContext(), store, name) + if err != nil { + return errors.Wrapf(err, "error reading build container %q", name) + } + + builder.ContentDigester.Restart() + + options := buildah.AddAndCopyOptions{ + ContextDir: contextdir, + IDMappingOptions: idMappingOptions, + } + if opts.ContextDir != "" { + var excludes []string + + excludes, options.IgnoreFile, err = parse.ContainerIgnoreFile(options.ContextDir, opts.IgnoreFile) + if err != nil { + return err + } + options.Excludes = excludes + } + + err = builder.Add(dest, false, options, opts.SourcesRel2CxtDir...) + if err != nil { + return errors.Wrapf(err, "error adding content to container %q", builder.Container) + } + + contentType, digest := builder.ContentDigester.Digest() + if !opts.Quiet { + fmt.Printf("%s\n", digest.Hex()) + } + if contentType != "" { + contentType = contentType + ":" + } + conditionallyAddHistory(builder, opts, "/bin/sh -c #(nop) %s %s%s", "COPY", contentType, digest.Hex()) + return builder.Save() +} + +func conditionallyAddHistory(builder *buildah.Builder, opts *common.CopyOptions, createdByFmt string, args ...interface{}) { + if opts.AddHistory || buildahcli.DefaultHistory() { + now := time.Now().UTC() + created := &now + createdBy := fmt.Sprintf(createdByFmt, args...) + builder.AddPrependedEmptyLayer(created, createdBy, "", "") + } +} diff --git a/pkg/imageengine/buildah/engine.go b/pkg/imageengine/buildah/engine.go new file mode 100644 index 00000000000..56a924b9f09 --- /dev/null +++ b/pkg/imageengine/buildah/engine.go @@ -0,0 +1,61 @@ +// Copyright © 2022 Alibaba Group Holding Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package buildah + +import ( + "github.com/containers/buildah/pkg/parse" + "github.com/containers/common/libimage" + "github.com/containers/image/v5/types" + "github.com/containers/storage" + "github.com/sealerio/sealer/pkg/imageengine/common" + "github.com/spf13/cobra" +) + +type Engine struct { + *cobra.Command + libimageRuntime *libimage.Runtime + imageStore storage.Store +} + +func (engine *Engine) ImageRuntime() *libimage.Runtime { + return engine.libimageRuntime +} + +func (engine *Engine) ImageStore() storage.Store { + return engine.imageStore +} + +func NewBuildahImageEngine(configurations common.EngineGlobalConfigurations) (*Engine, error) { + if err := initBuildah(); err != nil { + return nil, err + } + + store, err := getStore(&configurations) + if err != nil { + return nil, err + } + + sysCxt := &types.SystemContext{BigFilesTemporaryDir: parse.GetTempDir()} + imageRuntime, err := libimage.RuntimeFromStore(store, &libimage.RuntimeOptions{SystemContext: sysCxt}) + if err != nil { + return nil, err + } + + return &Engine{ + Command: &cobra.Command{}, + libimageRuntime: imageRuntime, + imageStore: store, + }, nil +} diff --git a/pkg/imageengine/buildah/from.go b/pkg/imageengine/buildah/from.go new file mode 100644 index 00000000000..58391fe3939 --- /dev/null +++ b/pkg/imageengine/buildah/from.go @@ -0,0 +1,262 @@ +// Copyright © 2022 Alibaba Group Holding Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package buildah + +import ( + "fmt" + + "io" + "io/ioutil" + "os" + "strings" + + "github.com/containers/buildah" + "github.com/containers/buildah/define" + buildahcli "github.com/containers/buildah/pkg/cli" + "github.com/containers/buildah/pkg/parse" + "github.com/containers/common/pkg/config" + encconfig "github.com/containers/ocicrypt/config" + "github.com/pkg/errors" + "github.com/sealerio/sealer/pkg/imageengine/common" + "github.com/sirupsen/logrus" +) + +type fromFlagsWrapper struct { + *buildahcli.FromAndBudResults + *buildahcli.UserNSResults + *buildahcli.NameSpaceResults +} + +// createContainerFromImage create a working container. This function is copied from +// "buildah from". This function takes args([]string{"$image"}), and create a working container +// based on $image, this will generate an empty dictionary, not a real rootfs. And this container is a fake container. +func (engine *Engine) createContainerFromImage(opts *common.FromOptions) (string, error) { + defaultContainerConfig, err := config.Default() + if err != nil { + return "", errors.Wrapf(err, "failed to get container config") + } + + if len(opts.Image) == 0 { + return "", errors.Errorf("an image name (or \"scratch\") must be specified") + } + + systemContext, err := parse.SystemContextFromOptions(engine.Command) + if err != nil { + return "", errors.Wrapf(err, "error building system context") + } + //platforms, err := parse.PlatformsFromOptions(engine.Command) + //if err != nil { + // return "", err + //} + //if len(platforms) > 1 { + // logrus.Warnf("ignoring platforms other than %+v: %+v", platforms[0], platforms[1:]) + //} + + // TODO we do not support from remote currently + // maybe this should be PullNever. + pullPolicy := define.PullIfMissing + + store := engine.ImageStore() + + commonOpts, err := parse.CommonBuildOptions(engine.Command) + if err != nil { + return "", err + } + + isolation, err := defaultIsolationOption() + if err != nil { + return "", err + } + + //namespaceOptions, networkPolicy, err := parse.NamespaceOptions(adaptor.Command) + //if err != nil { + // return "", errors.Wrapf(err, "error parsing namespace-related options") + //} + + namespaceOptions, networkPolicy := defaultNamespaceOptions() + + usernsOption, idmappingOptions, err := parse.IDMappingOptions(engine.Command, isolation) + if err != nil { + return "", errors.Wrapf(err, "error parsing ID mapping options") + } + namespaceOptions.AddOrReplace(usernsOption...) + + // hardcode format here, user do not concern about this. + format, err := getFormat(define.OCI) + if err != nil { + return "", err + } + + capabilities, err := defaultContainerConfig.Capabilities("", []string{}, []string{}) + if err != nil { + return "", err + } + + commonOpts.Ulimit = append(defaultContainerConfig.Containers.DefaultUlimits, commonOpts.Ulimit...) + + options := buildah.BuilderOptions{ + FromImage: opts.Image, + Container: "", + ContainerSuffix: "", + PullPolicy: pullPolicy, + SystemContext: systemContext, + DefaultMountsFilePath: "", + Isolation: isolation, + NamespaceOptions: namespaceOptions, + ConfigureNetwork: networkPolicy, + CNIPluginPath: "", + CNIConfigDir: "", + IDMappingOptions: idmappingOptions, + Capabilities: capabilities, + CommonBuildOpts: commonOpts, + Format: format, + DefaultEnv: defaultContainerConfig.GetDefaultEnv(), + MaxPullRetries: maxPullPushRetries, + PullRetryDelay: pullPushRetryDelay, + OciDecryptConfig: &encconfig.DecryptConfig{}, + } + + if !opts.Quiet { + options.ReportWriter = os.Stderr + } + + builder, err := buildah.NewBuilder(getContext(), store, options) + if err != nil { + return "", err + } + + if err := onBuild(builder, opts.Quiet); err != nil { + return "", err + } + + // we don't need the cidfile(write container id into file) + + //if adaptor.cidfile != "" { + // filePath := iopts.cidfile + // if err := ioutil.WriteFile(filePath, []byte(builder.ContainerID), 0644); err != nil { + // return errors.Wrapf(err, "filed to write Container ID File %q", filePath) + // } + //} + //fmt.Printf("%s\n", builder.Container) + return builder.ContainerID, builder.Save() +} + +func (engine *Engine) CreateContainer(opts *common.FromOptions) (string, error) { + wrapper := &fromFlagsWrapper{ + FromAndBudResults: &buildahcli.FromAndBudResults{}, + UserNSResults: &buildahcli.UserNSResults{}, + NameSpaceResults: &buildahcli.NameSpaceResults{}, + } + + flags := engine.Flags() + fromAndBudFlags, err := buildahcli.GetFromAndBudFlags(wrapper.FromAndBudResults, wrapper.UserNSResults, wrapper.NameSpaceResults) + if err != nil { + return "", err + } + + flags.AddFlagSet(&fromAndBudFlags) + + err = engine.migrateFlags2BuildahFrom(opts) + if err != nil { + return "", err + } + + return engine.createContainerFromImage(opts) +} + +func (engine *Engine) migrateFlags2BuildahFrom(opts *common.FromOptions) error { + return nil +} + +func onBuild(builder *buildah.Builder, quiet bool) error { + ctr := 0 + for _, onBuildSpec := range builder.OnBuild() { + ctr = ctr + 1 + commands := strings.Split(onBuildSpec, " ") + command := strings.ToUpper(commands[0]) + args := commands[1:] + if !quiet { + fmt.Fprintf(os.Stderr, "STEP %d: %s\n", ctr, onBuildSpec) + } + switch command { + case "ADD": + case "COPY": + dest := "" + size := len(args) + if size > 1 { + dest = args[size-1] + args = args[:size-1] + } + if err := builder.Add(dest, command == "ADD", buildah.AddAndCopyOptions{}, args...); err != nil { + return err + } + case "ANNOTATION": + annotation := strings.SplitN(args[0], "=", 2) + if len(annotation) > 1 { + builder.SetAnnotation(annotation[0], annotation[1]) + } else { + builder.UnsetAnnotation(annotation[0]) + } + case "CMD": + builder.SetCmd(args) + case "ENV": + env := strings.SplitN(args[0], "=", 2) + if len(env) > 1 { + builder.SetEnv(env[0], env[1]) + } else { + builder.UnsetEnv(env[0]) + } + case "ENTRYPOINT": + builder.SetEntrypoint(args) + case "EXPOSE": + builder.SetPort(strings.Join(args, " ")) + case "HOSTNAME": + builder.SetHostname(strings.Join(args, " ")) + case "LABEL": + label := strings.SplitN(args[0], "=", 2) + if len(label) > 1 { + builder.SetLabel(label[0], label[1]) + } else { + builder.UnsetLabel(label[0]) + } + case "MAINTAINER": + builder.SetMaintainer(strings.Join(args, " ")) + case "ONBUILD": + builder.SetOnBuild(strings.Join(args, " ")) + case "RUN": + var stdout io.Writer + if quiet { + stdout = ioutil.Discard + } + if err := builder.Run(args, buildah.RunOptions{Stdout: stdout}); err != nil { + return err + } + case "SHELL": + builder.SetShell(args) + case "STOPSIGNAL": + builder.SetStopSignal(strings.Join(args, " ")) + case "USER": + builder.SetUser(strings.Join(args, " ")) + case "VOLUME": + builder.AddVolume(strings.Join(args, " ")) + case "WORKINGDIR": + builder.SetWorkDir(strings.Join(args, " ")) + default: + logrus.Errorf("unknown OnBuild command %q; ignored", onBuildSpec) + } + } + builder.ClearOnBuild() + return nil +} diff --git a/pkg/imageengine/buildah/get_annotation.go b/pkg/imageengine/buildah/get_annotation.go new file mode 100644 index 00000000000..dbd39b39066 --- /dev/null +++ b/pkg/imageengine/buildah/get_annotation.go @@ -0,0 +1,48 @@ +// Copyright © 2022 Alibaba Group Holding Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package buildah + +import ( + "errors" + + "github.com/containers/buildah" + "github.com/containers/buildah/pkg/parse" + + "github.com/sealerio/sealer/pkg/imageengine/common" +) + +func (engine *Engine) GetImageAnnotation(opts *common.GetImageAnnoOptions) (map[string]string, error) { + if len(opts.ImageNameOrID) == 0 { + return nil, errors.New("image name id or image name should be specified") + } + + var builder *buildah.Builder + systemContext, err := parse.SystemContextFromOptions(engine.Command) + if err != nil { + return nil, err + } + + ctx := getContext() + store := engine.ImageStore() + name := opts.ImageNameOrID + + builder, err = openImage(ctx, systemContext, store, name) + if err != nil { + return nil, err + } + + out := buildah.GetBuildInfo(builder) + return out.ImageAnnotations, nil +} diff --git a/pkg/imageengine/buildah/images.go b/pkg/imageengine/buildah/images.go new file mode 100644 index 00000000000..c145f66d02e --- /dev/null +++ b/pkg/imageengine/buildah/images.go @@ -0,0 +1,300 @@ +// Copyright © 2022 Alibaba Group Holding Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package buildah + +import ( + "context" + "encoding/json" + "fmt" + "sort" + "strings" + "time" + + "github.com/containers/buildah/pkg/formats" + "github.com/containers/buildah/pkg/parse" + "github.com/containers/common/libimage" + "github.com/docker/go-units" + "github.com/pkg/errors" + "github.com/sealerio/sealer/pkg/imageengine/common" +) + +const none = "" + +type jsonImage struct { + ID string `json:"id"` + Names []string `json:"names"` + Digest string `json:"digest"` + CreatedAt string `json:"createdat"` + Size string `json:"size"` + Created int64 `json:"created"` + CreatedAtRaw time.Time `json:"createdatraw"` + ReadOnly bool `json:"readonly"` + History []string `json:"history"` +} + +type imageOutputParams struct { + Tag string + ID string + Name string + Digest string + Created int64 + CreatedAt string + Size string + CreatedAtRaw time.Time + ReadOnly bool + History string +} + +type imageOptions struct { + all bool + digests bool + format string + json bool + noHeading bool + truncate bool + quiet bool + readOnly bool + history bool +} + +var imagesHeader = map[string]string{ + "Name": "REPOSITORY", + "Tag": "TAG", + "ID": "IMAGE ID", + "CreatedAt": "CREATED", + "Size": "SIZE", + "ReadOnly": "R/O", + "History": "HISTORY", +} + +func (engine *Engine) Images(opts *common.ImagesOptions) error { + store := engine.ImageStore() + systemContext, err := parse.SystemContextFromOptions(engine.Command) + if err != nil { + return errors.Wrapf(err, "error building system context") + } + runtime, err := libimage.RuntimeFromStore(store, &libimage.RuntimeOptions{SystemContext: systemContext}) + if err != nil { + return err + } + + ctx := context.Background() + + options := &libimage.ListImagesOptions{} + if !opts.All { + options.Filters = append(options.Filters, "intermediate=false") + options.Filters = append(options.Filters, "label=io.sealer.version") + } + + //TODO add some label to identify sealer image and oci image. + images, err := runtime.ListImages(ctx, []string{}, options) + if err != nil { + return err + } + + imageOpts := imageOptions{ + all: opts.All, + digests: opts.Digests, + json: opts.JSON, + noHeading: opts.NoHeading, + truncate: !opts.NoTrunc, + quiet: opts.Quiet, + history: opts.History, + } + + if opts.JSON { + return formatImagesJSON(images, imageOpts) + } + + return formatImages(images, imageOpts) +} + +func outputHeader(opts imageOptions) string { + if opts.format != "" { + return strings.Replace(opts.format, `\t`, "\t", -1) + } + if opts.quiet { + return formats.IDString + } + format := "table {{.Name}}\t{{.Tag}}\t" + if opts.noHeading { + format = "{{.Name}}\t{{.Tag}}\t" + } + + if opts.digests { + format += "{{.Digest}}\t" + } + format += "{{.ID}}\t{{.CreatedAt}}\t{{.Size}}" + if opts.readOnly { + format += "\t{{.ReadOnly}}" + } + if opts.history { + format += "\t{{.History}}" + } + return format +} + +func formatImagesJSON(images []*libimage.Image, opts imageOptions) error { + jsonImages := []jsonImage{} + for _, image := range images { + // Copy the base data over to the output param. + size, err := image.Size() + if err != nil { + return err + } + created := image.Created() + jsonImages = append(jsonImages, + jsonImage{ + CreatedAtRaw: created, + Created: created.Unix(), + CreatedAt: units.HumanDuration(time.Since(created)) + " ago", + Digest: image.Digest().String(), + ID: truncateID(image.ID(), opts.truncate), + Names: image.Names(), + ReadOnly: image.IsReadOnly(), + Size: formattedSize(size), + }) + } + + data, err := json.MarshalIndent(jsonImages, "", " ") + if err != nil { + return err + } + fmt.Printf("%s\n", data) + return nil +} + +type imagesSorted []imageOutputParams + +func (a imagesSorted) Less(i, j int) bool { + return a[i].CreatedAtRaw.After(a[j].CreatedAtRaw) +} + +func (a imagesSorted) Len() int { + return len(a) +} + +func (a imagesSorted) Swap(i, j int) { + a[i], a[j] = a[j], a[i] +} + +func formatImages(images []*libimage.Image, opts imageOptions) error { + var outputData imagesSorted + + for _, image := range images { + var outputParam imageOutputParams + size, err := image.Size() + if err != nil { + return err + } + created := image.Created() + outputParam.CreatedAtRaw = created + outputParam.Created = created.Unix() + outputParam.CreatedAt = units.HumanDuration(time.Since(created)) + " ago" + outputParam.Digest = image.Digest().String() + outputParam.ID = truncateID(image.ID(), opts.truncate) + outputParam.Size = formattedSize(size) + outputParam.ReadOnly = image.IsReadOnly() + + repoTags, err := image.NamedRepoTags() + if err != nil { + return err + } + + nameTagPairs, err := libimage.ToNameTagPairs(repoTags) + if err != nil { + return err + } + + for _, pair := range nameTagPairs { + newParam := outputParam + newParam.Name = pair.Name + newParam.Tag = pair.Tag + newParam.History = formatHistory(image.NamesHistory(), pair.Name, pair.Tag) + outputData = append(outputData, newParam) + // `images -q` should a given ID only once. + if opts.quiet { + break + } + } + } + + sort.Sort(outputData) + out := formats.StdoutTemplateArray{Output: imagesToGeneric(outputData), Template: outputHeader(opts), Fields: imagesHeader} + return formats.Writer(out).Out() +} + +func formatHistory(history []string, name, tag string) string { + if len(history) == 0 { + return none + } + // Skip the first history entry if already existing as name + if fmt.Sprintf("%s:%s", name, tag) == history[0] { + if len(history) == 1 { + return none + } + return strings.Join(history[1:], ", ") + } + return strings.Join(history, ", ") +} + +func truncateID(id string, truncate bool) string { + if !truncate { + return "sha256:" + id + } + + if idTruncLength := 12; len(id) > idTruncLength { + return id[:idTruncLength] + } + return id +} + +func imagesToGeneric(templParams []imageOutputParams) (genericParams []interface{}) { + if len(templParams) > 0 { + for _, v := range templParams { + genericParams = append(genericParams, interface{}(v)) + } + } + return genericParams +} + +func formattedSize(size int64) string { + suffixes := [5]string{"B", "KB", "MB", "GB", "TB"} + + count := 0 + formattedSize := float64(size) + for formattedSize >= 1000 && count < 4 { + formattedSize /= 1000 + count++ + } + return fmt.Sprintf("%.3g %s", formattedSize, suffixes[count]) +} + +//func matchesID(imageID, argID string) bool { +// return strings.HasPrefix(imageID, argID) +//} +// +//func matchesReference(name, argName string) bool { +// if argName == "" { +// return true +// } +// splitName := strings.Split(name, ":") +// // If the arg contains a tag, we handle it differently than if it does not +// if strings.Contains(argName, ":") { +// splitArg := strings.Split(argName, ":") +// return strings.HasSuffix(splitName[0], splitArg[0]) && (splitName[1] == splitArg[1]) +// } +// return strings.HasSuffix(splitName[0], argName) +//} diff --git a/pkg/imageengine/buildah/init.go b/pkg/imageengine/buildah/init.go new file mode 100644 index 00000000000..ac8c045aa76 --- /dev/null +++ b/pkg/imageengine/buildah/init.go @@ -0,0 +1,79 @@ +// Copyright © 2022 Alibaba Group Holding Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package buildah + +import ( + "os" + "path/filepath" +) + +const ( + buildahEtcRegistriesConf = ` +[registries.search] +registries = ['docker.io'] + +# Registries that do not use TLS when pulling images or uses self-signed +# certificates. +[registries.insecure] +registries = [] + +[registries.block] +registries = [] +` + + builadhEtcPolicy = ` +{ + "default": [ + { + "type": "insecureAcceptAnything" + } + ], + "transports": + { + "docker-daemon": + { + "": [{"type":"insecureAcceptAnything"}] + } + } +}` +) + +// TODO do we have an util or unified local storage accessing pattern? +func writeFileIfNotExist(path string, content []byte) error { + _, err := os.Stat(path) + if err != nil { + err = os.MkdirAll(filepath.Dir(path), 0750) + if err != nil { + return err + } + + err = os.WriteFile(path, content, 0600) + if err != nil { + return err + } + } + return nil +} + +func initBuildah() error { + policyAbsPath := "/etc/containers/policy.json" + err := writeFileIfNotExist(policyAbsPath, []byte(builadhEtcPolicy)) + if err != nil { + return err + } + + registriesAbsPath := "/etc/containers/registries.conf" + return writeFileIfNotExist(registriesAbsPath, []byte(buildahEtcRegistriesConf)) +} diff --git a/pkg/imageengine/buildah/inspect.go b/pkg/imageengine/buildah/inspect.go new file mode 100644 index 00000000000..cc743f94ab0 --- /dev/null +++ b/pkg/imageengine/buildah/inspect.go @@ -0,0 +1,91 @@ +// Copyright © 2022 Alibaba Group Holding Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package buildah + +import ( + "encoding/json" + "fmt" + + "github.com/containers/buildah" + "github.com/containers/buildah/pkg/parse" + "github.com/pkg/errors" + "github.com/sealerio/sealer/pkg/imageengine/common" + "golang.org/x/term" + + "os" + "regexp" + "text/template" +) + +const ( + //inspectTypeContainer = "container" + inspectTypeImage = "image" + //inspectTypeManifest = "manifest" +) + +func (engine *Engine) Inspect(opts *common.InspectOptions) error { + if len(opts.ImageNameOrID) == 0 { + return errors.Errorf("image name or image id must be specified.") + } + var builder *buildah.Builder + + systemContext, err := parse.SystemContextFromOptions(engine.Command) + if err != nil { + return err + } + + ctx := getContext() + store := engine.ImageStore() + name := opts.ImageNameOrID + + switch opts.InspectType { + case inspectTypeImage: + builder, err = openImage(ctx, systemContext, store, name) + if err != nil { + return err + } + //case inspectTypeManifest: + default: + return errors.Errorf("the only recognized type is %q", inspectTypeImage) + } + + out := buildah.GetBuildInfo(builder) + if opts.Format != "" { + format := opts.Format + if matched, err := regexp.MatchString("{{.*}}", format); err != nil { + return errors.Wrapf(err, "error validating format provided: %s", format) + } else if !matched { + return errors.Errorf("error invalid format provided: %s", format) + } + t, err := template.New("format").Parse(format) + if err != nil { + return errors.Wrapf(err, "Template parsing error") + } + if err = t.Execute(os.Stdout, out); err != nil { + return err + } + if term.IsTerminal(int(os.Stdout.Fd())) { + fmt.Println() + } + return nil + } + + enc := json.NewEncoder(os.Stdout) + enc.SetIndent("", " ") + if term.IsTerminal(int(os.Stdout.Fd())) { + enc.SetEscapeHTML(false) + } + return enc.Encode(out) +} diff --git a/pkg/imageengine/buildah/load.go b/pkg/imageengine/buildah/load.go new file mode 100644 index 00000000000..013b658de36 --- /dev/null +++ b/pkg/imageengine/buildah/load.go @@ -0,0 +1,59 @@ +// Copyright © 2022 Alibaba Group Holding Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package buildah + +import ( + "context" + "fmt" + + "os" + + "github.com/containers/common/libimage" + "github.com/sealerio/sealer/pkg/imageengine/common" + + "strings" +) + +func (engine *Engine) Load(opts *common.LoadOptions) error { + // Download the input file if needed. + //if strings.HasPrefix(opts.Input, "https://") || strings.HasPrefix(opts.Input, "http://") { + // tmpdir, err := util.DefaultContainerConfig().ImageCopyTmpDir() + // if err != nil { + // return err + // } + // tmpfile, err := download.FromURL(tmpdir, loadOpts.Input) + // if err != nil { + // return err + // } + // defer os.Remove(tmpfile) + // loadOpts.Input = tmpfile + //} + + if _, err := os.Stat(opts.Input); err != nil { + return err + } + + loadOpts := &libimage.LoadOptions{} + if !opts.Quiet { + loadOpts.Writer = os.Stderr + } + + loadedImages, err := engine.ImageRuntime().Load(context.Background(), opts.Input, loadOpts) + if err != nil { + return err + } + fmt.Println("Loaded image: " + strings.Join(loadedImages, "\nLoaded image: ")) + return nil +} diff --git a/pkg/imageengine/buildah/login.go b/pkg/imageengine/buildah/login.go new file mode 100644 index 00000000000..03adf0e00de --- /dev/null +++ b/pkg/imageengine/buildah/login.go @@ -0,0 +1,52 @@ +// Copyright © 2022 Alibaba Group Holding Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package buildah + +import ( + "context" + + "github.com/containers/buildah/pkg/parse" + "github.com/containers/common/pkg/auth" + + "os" + + "github.com/pkg/errors" + "github.com/sealerio/sealer/pkg/imageengine/common" +) + +func (engine *Engine) Login(opts *common.LoginOptions) error { + if len(opts.Domain) == 0 { + return errors.Errorf("please specify a registry to login to") + } + + systemContext, err := parse.SystemContextFromOptions(engine.Command) + if err != nil { + return errors.Wrapf(err, "error building system context") + } + + systemContext.AuthFilePath = opts.AuthFile + + return auth.Login(context.TODO(), + systemContext, + &auth.LoginOptions{ + AuthFile: opts.AuthFile, + CertDir: opts.CertDir, + Password: opts.Password, + Username: opts.Username, + Stdout: os.Stdout, + AcceptRepositories: true, + }, + []string{opts.Domain}) +} diff --git a/pkg/imageengine/buildah/logout.go b/pkg/imageengine/buildah/logout.go new file mode 100644 index 00000000000..cbe5e15fe51 --- /dev/null +++ b/pkg/imageengine/buildah/logout.go @@ -0,0 +1,43 @@ +// Copyright © 2022 Alibaba Group Holding Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package buildah + +import ( + "github.com/containers/buildah/pkg/parse" + "github.com/containers/common/pkg/auth" + + "os" + + "github.com/pkg/errors" + pkgauth "github.com/sealerio/sealer/pkg/auth" + "github.com/sealerio/sealer/pkg/imageengine/common" +) + +func (engine *Engine) Logout(opts *common.LogoutOptions) error { + if len(opts.Domain) == 0 { + return errors.Errorf("registry must be given") + } + + systemContext, err := parse.SystemContextFromOptions(engine.Command) + if err != nil { + return errors.Wrapf(err, "error building system context") + } + return auth.Logout(systemContext, &auth.LogoutOptions{ + AuthFile: pkgauth.GetDefaultAuthFilePath(), + All: opts.All, + AcceptRepositories: true, + Stdout: os.Stdout, + }, []string{opts.Domain}) +} diff --git a/pkg/imageengine/buildah/manifest.go b/pkg/imageengine/buildah/manifest.go new file mode 100644 index 00000000000..a00922e324c --- /dev/null +++ b/pkg/imageengine/buildah/manifest.go @@ -0,0 +1,87 @@ +// Copyright © 2022 Alibaba Group Holding Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package buildah + +import ( + "github.com/containers/common/libimage" + "github.com/containers/common/libimage/manifests" + cp "github.com/containers/image/v5/copy" + "github.com/containers/image/v5/manifest" + + "os" + + "github.com/containers/image/v5/transports/alltransports" + "github.com/containers/image/v5/types" + "github.com/containers/storage" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sealerio/sealer/pkg/imageengine/common" +) + +func manifestPush(systemContext *types.SystemContext, store storage.Store, listImageSpec, destSpec string, opts common.PushOptions) error { + runtime, err := libimage.RuntimeFromStore(store, &libimage.RuntimeOptions{SystemContext: systemContext}) + if err != nil { + return err + } + + manifestList, err := runtime.LookupManifestList(listImageSpec) + if err != nil { + return err + } + + _, list, err := manifests.LoadFromImage(store, manifestList.ID()) + if err != nil { + return err + } + + dest, err := alltransports.ParseImageName(destSpec) + if err != nil { + return err + } + + var manifestType string + if opts.Format != "" { + switch opts.Format { + case "oci": + manifestType = imgspecv1.MediaTypeImageManifest + case "v2s2", "docker": + manifestType = manifest.DockerV2Schema2MediaType + default: + return errors.Errorf("unknown format %q. Choose on of the supported formats: 'oci' or 'v2s2'", opts.Format) + } + } + + options := manifests.PushOptions{ + Store: store, + SystemContext: systemContext, + ImageListSelection: cp.CopySpecificImages, + Instances: nil, + ManifestType: manifestType, + } + if opts.All { + options.ImageListSelection = cp.CopyAllImages + } + if !opts.Quiet { + options.ReportWriter = os.Stderr + } + + _, _, err = list.Push(getContext(), dest, options) + + if err == nil && opts.Rm { + _, err = store.DeleteImage(manifestList.ID(), true) + } + + return err +} diff --git a/pkg/imageengine/buildah/mount.go b/pkg/imageengine/buildah/mount.go new file mode 100644 index 00000000000..7d1c316b003 --- /dev/null +++ b/pkg/imageengine/buildah/mount.go @@ -0,0 +1,86 @@ +// Copyright © 2022 Alibaba Group Holding Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package buildah + +import ( + "fmt" + "os" + + "github.com/pkg/errors" + "github.com/sealerio/sealer/pkg/imageengine/common" +) + +func (engine *Engine) Mount(opts *common.MountOptions) ([]common.JSONMount, error) { + containers := opts.Containers + if len(containers) == 0 { + return []common.JSONMount{}, errors.Errorf("id/name of containers mube be specifiled") + } + + store := engine.ImageStore() + var jsonMounts []common.JSONMount + var lastError error + // Do not allow to mount a graphdriver that is not vfs if we are creating the userns as part + // of the mount command. + // Differently, allow the mount if we are already in a userns, as the mount point will still + // be accessible once "buildah mount" exits. + if os.Geteuid() != 0 && store.GraphDriverName() != "vfs" { + return []common.JSONMount{}, errors.Errorf("cannot mount using driver %s in rootless mode. You need to run it in a `buildah unshare` session", store.GraphDriverName()) + } + + for _, name := range containers { + builder, err := openBuilder(getContext(), store, name) + if err != nil { + if lastError != nil { + fmt.Fprintln(os.Stderr, lastError) + } + lastError = errors.Wrapf(err, "error reading build container %q", name) + continue + } + mountPoint, err := builder.Mount(builder.MountLabel) + if err != nil { + if lastError != nil { + fmt.Fprintln(os.Stderr, lastError) + } + lastError = errors.Wrapf(err, "error mounting %q container %q", name, builder.Container) + continue + } + if len(containers) > 1 { + jsonMounts = append(jsonMounts, common.JSONMount{Container: name, MountPoint: mountPoint}) + continue + } else { + jsonMounts = append(jsonMounts, common.JSONMount{MountPoint: mountPoint}) + continue + } + } + //else { + // builders, err := openBuilders(store) + // if err != nil { + // return []common.JsonMount{}, errors.Wrapf(err, "error reading build containers") + // } + // + // for _, builder := range builders { + // mounted, err := builder.Mounted() + // if err != nil { + // return []common.JsonMount{}, err + // } + // if mounted { + // jsonMounts = append(jsonMounts, common.JsonMount{Container: builder.Container, MountPoint: builder.MountPoint}) + // continue + // } + // } + //} + + return jsonMounts, nil +} diff --git a/pkg/imageengine/buildah/pull.go b/pkg/imageengine/buildah/pull.go new file mode 100644 index 00000000000..bd6670d5cef --- /dev/null +++ b/pkg/imageengine/buildah/pull.go @@ -0,0 +1,92 @@ +// Copyright © 2022 Alibaba Group Holding Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package buildah + +import ( + "fmt" + "os" + + "github.com/containers/buildah" + "github.com/containers/buildah/define" + "github.com/containers/buildah/pkg/parse" + "github.com/containers/common/pkg/auth" + "github.com/pkg/errors" + "github.com/sealerio/sealer/pkg/imageengine/common" +) + +func (engine *Engine) Pull(opts *common.PullOptions) error { + if len(opts.Image) == 0 { + return errors.Errorf("an image name must be specified") + } + + if err := auth.CheckAuthFile(opts.Authfile); err != nil { + return err + } + + if err := engine.migratePullOptionsFlags2Command(opts); err != nil { + return err + } + + systemContext, err := parse.SystemContextFromOptions(engine.Command) + if err != nil { + return errors.Wrapf(err, "error building system context") + } + systemContext.AuthFilePath = opts.Authfile + + store := engine.ImageStore() + + policy, ok := define.PolicyMap[opts.PullPolicy] + if !ok { + return fmt.Errorf("unsupported pull policy %q", opts.PullPolicy) + } + options := buildah.PullOptions{ + Store: store, + SystemContext: systemContext, + // consider export this option later + AllTags: false, + ReportWriter: os.Stderr, + MaxRetries: maxPullPushRetries, + RetryDelay: pullPushRetryDelay, + PullPolicy: policy, + } + + if opts.Quiet { + options.ReportWriter = nil // Turns off logging output + } + + id, err := buildah.Pull(getContext(), opts.Image, options) + if err != nil { + return err + } + fmt.Printf("%s\n", id) + return nil +} + +func (engine *Engine) migratePullOptionsFlags2Command(opts *common.PullOptions) error { + var ( + flags = engine.Command.Flags() + err error + ) + + if len(opts.Platform) > 0 { + flags.StringSlice("platform", []string{}, "") + // set pull platform, check "parse.SystemContextFromOptions(engine.Command)" + err = flags.Set("platform", opts.Platform) + if err != nil { + return err + } + } + return nil +} diff --git a/pkg/imageengine/buildah/push.go b/pkg/imageengine/buildah/push.go new file mode 100644 index 00000000000..b25328f811b --- /dev/null +++ b/pkg/imageengine/buildah/push.go @@ -0,0 +1,124 @@ +// Copyright © 2022 Alibaba Group Holding Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package buildah + +import ( + "github.com/containers/buildah" + "github.com/containers/buildah/define" + "github.com/containers/buildah/pkg/parse" + + "os" + "strings" + + "github.com/containers/buildah/util" + "github.com/containers/common/pkg/auth" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/transports/alltransports" + "github.com/containers/storage" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sealerio/sealer/pkg/imageengine/common" + "github.com/sirupsen/logrus" +) + +func (engine *Engine) Push(opts *common.PushOptions) error { + if len(opts.Image) == 0 { + return errors.New("At least a source image ID must be specified") + } + if err := auth.CheckAuthFile(opts.Authfile); err != nil { + return err + } + + src, destSpec := opts.Image, opts.Image + compress := define.Gzip + + store := engine.ImageStore() + + dest, err := alltransports.ParseImageName(destSpec) + // add the docker:// transport to see if they neglected it. + if err != nil { + destTransport := strings.Split(destSpec, ":")[0] + if t := transports.Get(destTransport); t != nil { + return err + } + + if strings.Contains(destSpec, "://") { + return err + } + + destSpec = "docker://" + destSpec + dest2, err2 := alltransports.ParseImageName(destSpec) + if err2 != nil { + return err + } + dest = dest2 + logrus.Debugf("Assuming docker:// as the transport method for DESTINATION: %s", destSpec) + } + + systemContext, err := parse.SystemContextFromOptions(engine.Command) + if err != nil { + return errors.Wrapf(err, "error building system context") + } + // PushOptions from build does not support passing authfile + // they use authfile from system context. + systemContext.AuthFilePath = opts.Authfile + + var manifestType string + if opts.Format != "" { + switch opts.Format { + case "oci": + manifestType = imgspecv1.MediaTypeImageManifest + case "v2s1": + manifestType = manifest.DockerV2Schema1SignedMediaType + case "v2s2", "docker": + manifestType = manifest.DockerV2Schema2MediaType + default: + return errors.Errorf("unknown format %q. Choose on of the supported formats: 'oci', 'v2s1', or 'v2s2'", opts.Format) + } + } + + options := buildah.PushOptions{ + Compression: compress, + ManifestType: manifestType, + Store: store, + SystemContext: systemContext, + MaxRetries: maxPullPushRetries, + RetryDelay: pullPushRetryDelay, + } + if !opts.Quiet { + options.ReportWriter = os.Stderr + } + + ref, digest, err := buildah.Push(getContext(), src, dest, options) + if err != nil { + if errors.Cause(err) != storage.ErrImageUnknown { + // Image might be a manifest so attempt a manifest push + if manifestsErr := manifestPush(systemContext, store, src, destSpec, *opts); manifestsErr == nil { + return nil + } + } + return util.GetFailureCause(err, errors.Wrapf(err, "error pushing image %q to %q", src, destSpec)) + } + if ref != nil { + logrus.Debugf("pushed image %q with digest %s", ref, digest.String()) + } else { + logrus.Debugf("pushed image with digest %s", digest.String()) + } + + logrus.Infof("Successfully pushed %s with digest %s", transports.ImageName(dest), digest.String()) + + return nil +} diff --git a/pkg/imageengine/buildah/rmi.go b/pkg/imageengine/buildah/rmi.go new file mode 100644 index 00000000000..fe3865d8fc2 --- /dev/null +++ b/pkg/imageengine/buildah/rmi.go @@ -0,0 +1,68 @@ +// Copyright © 2022 Alibaba Group Holding Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package buildah + +import ( + "context" + "fmt" + + "github.com/containers/common/libimage" + "github.com/hashicorp/go-multierror" + "github.com/pkg/errors" + + "github.com/sealerio/sealer/pkg/imageengine/common" +) + +func (engine *Engine) RemoveImage(opts *common.RemoveImageOptions) error { + if len(opts.ImageNamesOrIDs) == 0 && !opts.All && !opts.Prune { + return errors.Errorf("image name or ID must be specified") + } + if len(opts.ImageNamesOrIDs) > 0 && opts.All { + return errors.Errorf("when using the --all switch, you may not pass any images names or IDs") + } + if opts.All && opts.Prune { + return errors.Errorf("when using the --all switch, you may not use --prune switch") + } + if len(opts.ImageNamesOrIDs) > 0 && opts.Prune { + return errors.Errorf("when using the --prune switch, you may not pass any images names or IDs") + } + + options := &libimage.RemoveImagesOptions{ + Filters: []string{"readonly=false"}, + } + + if opts.Prune { + options.Filters = append(options.Filters, "dangling=true") + } else if !opts.All { + options.Filters = append(options.Filters, "intermediate=false") + } + options.Force = opts.Force + + rmiReports, rmiErrors := engine.ImageRuntime().RemoveImages(context.Background(), opts.ImageNamesOrIDs, options) + for _, r := range rmiReports { + for _, u := range r.Untagged { + fmt.Printf("untagged: %s\n", u) + } + } + for _, r := range rmiReports { + if r.Removed { + fmt.Printf("%s\n", r.ID) + } + } + + var multiE *multierror.Error + multiE = multierror.Append(multiE, rmiErrors...) + return multiE.ErrorOrNil() +} diff --git a/pkg/imageengine/buildah/save.go b/pkg/imageengine/buildah/save.go new file mode 100644 index 00000000000..0e64c3df4df --- /dev/null +++ b/pkg/imageengine/buildah/save.go @@ -0,0 +1,48 @@ +// Copyright © 2022 Alibaba Group Holding Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package buildah + +import ( + "context" + + "github.com/containers/common/libimage" + "github.com/pkg/errors" + "github.com/sealerio/sealer/pkg/imageengine/common" +) + +func (engine *Engine) Save(opts *common.SaveOptions) error { + if len(opts.ImageNameOrID) == 0 { + return errors.New("image name or id must be specified") + } + if opts.Compress && (opts.Format != OCIManifestDir && opts.Format != V2s2ManifestDir) { + return errors.New("--compress can only be set when --format is either 'oci-dir' or 'docker-dir'") + } + + saveOptions := &libimage.SaveOptions{ + CopyOptions: libimage.CopyOptions{ + DirForceCompress: opts.Compress, + OciAcceptUncompressedLayers: false, + // Force signature removal to preserve backwards compat. + // See https://github.com/containers/podman/pull/11669#issuecomment-925250264 + RemoveSignatures: true, + }, + } + + // TODO we can support multiAchieve in the future + // check podman save + names := []string{opts.ImageNameOrID} + + return engine.ImageRuntime().Save(context.Background(), names, opts.Format, opts.Output, saveOptions) +} diff --git a/pkg/imageengine/buildah/sealer_image_extension.go b/pkg/imageengine/buildah/sealer_image_extension.go new file mode 100644 index 00000000000..c8fb7a654aa --- /dev/null +++ b/pkg/imageengine/buildah/sealer_image_extension.go @@ -0,0 +1,42 @@ +// Copyright © 2022 Alibaba Group Holding Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package buildah + +import ( + "fmt" + + "github.com/sealerio/sealer/pkg/imageengine/common" + v1 "github.com/sealerio/sealer/types/api/v1" + "k8s.io/apimachinery/pkg/util/json" +) + +func (engine *Engine) GetSealerImageExtension(opts *common.GetImageAnnoOptions) (v1.ImageExtension, error) { + annotation, err := engine.GetImageAnnotation(opts) + extension := v1.ImageExtension{} + if err != nil { + return extension, err + } + + extensionStr := annotation[v1.SealerImageExtension] + if len(extensionStr) == 0 { + return extension, fmt.Errorf("%s does not exist in image %s", v1.SealerImageExtension, opts.ImageNameOrID) + } + + err = json.Unmarshal([]byte(extensionStr), &extension) + if err != nil { + return extension, fmt.Errorf("failed to unmarshal %v for image %v: %v", v1.SealerImageExtension, opts.ImageNameOrID, err) + } + return extension, nil +} diff --git a/pkg/imageengine/buildah/util.go b/pkg/imageengine/buildah/util.go new file mode 100644 index 00000000000..9a378e25d4a --- /dev/null +++ b/pkg/imageengine/buildah/util.go @@ -0,0 +1,56 @@ +// Copyright © 2022 Alibaba Group Holding Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package buildah + +import ( + "os" + "path/filepath" + + "github.com/pkg/errors" +) + +// DiscoverKubefile tries to find a Kubefile within the provided `path`. +func DiscoverKubefile(path string) (foundFile string, err error) { + // Test for existence of the file + target, err := os.Stat(path) + if err != nil { + return "", errors.Wrap(err, "discovering Kubefile") + } + + switch mode := target.Mode(); { + case mode.IsDir(): + // If the path is a real directory, we assume a Kubefile within it + kubefile := filepath.Join(path, "Kubefile") + + // Test for existence of the Kubefile file + file, err := os.Stat(kubefile) + if err != nil { + return "", errors.Wrap(err, "cannot find Kubefile in context directory") + } + + // The file exists, now verify the correct mode + if mode := file.Mode(); mode.IsRegular() { + foundFile = kubefile + } else { + return "", errors.Errorf("assumed Kubefile %q is not a file", kubefile) + } + + case mode.IsRegular(): + // If the context dir is a file, we assume this as Kubefile + foundFile = path + } + + return foundFile, nil +} diff --git a/pkg/imageengine/common.go b/pkg/imageengine/common.go new file mode 100644 index 00000000000..52dd76a503c --- /dev/null +++ b/pkg/imageengine/common.go @@ -0,0 +1,24 @@ +// Copyright © 2022 Alibaba Group Holding Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package imageengine + +import ( + "github.com/sealerio/sealer/pkg/imageengine/buildah" + "github.com/sealerio/sealer/pkg/imageengine/common" +) + +func NewImageEngine(configurations common.EngineGlobalConfigurations) (Interface, error) { + return buildah.NewBuildahImageEngine(configurations) +} diff --git a/pkg/imageengine/common/options.go b/pkg/imageengine/common/options.go new file mode 100644 index 00000000000..494c84ffd81 --- /dev/null +++ b/pkg/imageengine/common/options.go @@ -0,0 +1,157 @@ +// Copyright © 2022 Alibaba Group Holding Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package common + +// BuildOptions should be out of buildah scope. +type BuildOptions struct { + BuildType string + Kubefile string + Tags []string + NoCache bool + Base bool + BuildArgs []string + Platform string + ContextDir string + Authfile string + PullPolicy string + Labels []string + Annotations []string +} + +type FromOptions struct { + Image string + Quiet bool +} + +type MountOptions struct { + //Json bool + Containers []string +} + +type JSONMount struct { + Container string `json:"container,omitempty"` + MountPoint string `json:"mountPoint"` +} + +type CopyOptions struct { + AddHistory bool + Quiet bool + IgnoreFile string + ContextDir string + Container string + SourcesRel2CxtDir []string + DestinationInContainer string +} + +type CommitOptions struct { + Format string + Manifest string + Timestamp int64 + Quiet bool + Rm bool + Squash bool + DisableCompression bool + ContainerID string + Image string +} + +type LoginOptions struct { + Domain string + Username string + Password string + TLSVerify bool + CertDir string + AuthFile string +} + +type LogoutOptions struct { + Authfile string + All bool + Domain string +} + +type PushOptions struct { + Authfile string + CertDir string + Format string + Rm bool + Quiet bool + TLSVerify bool + Image string + All bool +} + +type PullOptions struct { + Authfile string + CertDir string + Quiet bool + TLSVerify bool + PullPolicy string + Image string + Platform string +} + +type ImagesOptions struct { + All bool + Digests bool + NoHeading bool + NoTrunc bool + Quiet bool + History bool + JSON bool +} + +type SaveOptions struct { + Compress bool + Format string + // don't support currently + MultiImageArchive bool + Output string + Quiet bool + ImageNameOrID string +} + +type LoadOptions struct { + Input string + Quiet bool +} + +type InspectOptions struct { + Format string + InspectType string + ImageNameOrID string +} + +type BuildRootfsOptions struct { + ImageNameOrID string + DestDir string +} + +type RemoveImageOptions struct { + ImageNamesOrIDs []string + Force bool + All bool + Prune bool +} + +type GetImageAnnoOptions struct { + ImageNameOrID string +} + +type EngineGlobalConfigurations struct { + AuthFile string + GraphRoot string + RunRoot string +} diff --git a/pkg/imageengine/interface.go b/pkg/imageengine/interface.go new file mode 100644 index 00000000000..4492585d417 --- /dev/null +++ b/pkg/imageengine/interface.go @@ -0,0 +1,57 @@ +// Copyright © 2022 Alibaba Group Holding Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package imageengine + +import ( + "github.com/sealerio/sealer/pkg/imageengine/common" + v1 "github.com/sealerio/sealer/types/api/v1" +) + +type Interface interface { + Build(sealerBuildFlags *common.BuildOptions) (string, error) + + CreateContainer(opts *common.FromOptions) (string, error) + + Mount(opts *common.MountOptions) ([]common.JSONMount, error) + + Copy(opts *common.CopyOptions) error + + Commit(opts *common.CommitOptions) error + + Login(opts *common.LoginOptions) error + + Logout(opts *common.LogoutOptions) error + + Push(opts *common.PushOptions) error + + Pull(opts *common.PullOptions) error + + Images(opts *common.ImagesOptions) error + + Save(opts *common.SaveOptions) error + + Load(opts *common.LoadOptions) error + + Inspect(opts *common.InspectOptions) error + + GetImageAnnotation(opts *common.GetImageAnnoOptions) (map[string]string, error) + + RemoveImage(opts *common.RemoveImageOptions) error + + // TODO the following functions should be upper to image engine + BuildRootfs(opts *common.BuildRootfsOptions) (string, error) + + GetSealerImageExtension(opts *common.GetImageAnnoOptions) (v1.ImageExtension, error) +} diff --git a/test/suites/build/fixtures/lite_build/Kubefile b/test/suites/build/fixtures/lite_build/Kubefile index dd914eb6fc9..d5aab91d0ef 100644 --- a/test/suites/build/fixtures/lite_build/Kubefile +++ b/test/suites/build/fixtures/lite_build/Kubefile @@ -3,7 +3,7 @@ COPY test1 . COPY recommended.yaml manifests COPY test2 . COPY test3 . -RUN wget -O redis.tar.gz http://download.redis.io/releases/redis-5.0.3.tar.gz && tar zxvf redis.tar.gz && rm -f redis.tar.gz -CMD ls -l +#RUN wget -O redis.tar.gz http://download.redis.io/releases/redis-5.0.3.tar.gz && tar zxvf redis.tar.gz && rm -f redis.tar.gz +#CMD ls -l COPY imageList manifests COPY test-plugins.yaml plugins \ No newline at end of file diff --git a/test/testhelper/settings/common.go b/test/testhelper/settings/common.go index 3502c6b10da..8271d95b4e2 100644 --- a/test/testhelper/settings/common.go +++ b/test/testhelper/settings/common.go @@ -78,8 +78,8 @@ var ( AccessKey = os.Getenv("ACCESSKEYID") AccessSecret = os.Getenv("ACCESSKEYSECRET") Region = os.Getenv("RegionID") - TestImageName = "" //default: registry.cn-qingdao.aliyuncs.com/sealer-io/kubernetes:v1.19.8 - TestNydusImageName = "" //default: registry.cn-qingdao.aliyuncs.com/sealer-io/kubernetes-nydus:v1.19.8 + TestImageName = "registry.cn-qingdao.aliyuncs.com/sealer-io/kubernetes:v1.19.8.test" //default: registry.cn-qingdao.aliyuncs.com/sealer-io/kubernetes:v1.19.8 + TestNydusImageName = "" //default: registry.cn-qingdao.aliyuncs.com/sealer-io/kubernetes-nydus:v1.19.8 ) func GetClusterWorkDir(clusterName string) string { diff --git a/types/api/v1/image_types.go b/types/api/v1/image_types.go index 4247b0f37a8..bf8370bdfe7 100644 --- a/types/api/v1/image_types.go +++ b/types/api/v1/image_types.go @@ -15,6 +15,8 @@ package v1 import ( + "strings" + "github.com/opencontainers/go-digest" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -95,13 +97,18 @@ type ImageArg struct { type Platform struct { Architecture string `json:"architecture,omitempty"` OS string `json:"os,omitempty"` - // OSVersion is an optional field specifying the operating system version. - OSVersion string `json:"os_version,omitempty"` // Variant is an optional field specifying a variant of the CPU, for // example `v7` to specify ARMv7 when architecture is `arm`. Variant string `json:"variant,omitempty"` } +func (p Platform) ToString() string { + str := p.OS + "/" + p.Architecture + "/" + p.Variant + str = strings.TrimSuffix(str, "/") + str = strings.TrimPrefix(str, "/") + return str +} + func init() { SchemeBuilder.Register(&Image{}, &ImageList{}) } diff --git a/types/api/v1/sealer_image_extension.go b/types/api/v1/sealer_image_extension.go new file mode 100644 index 00000000000..d0b587185f2 --- /dev/null +++ b/types/api/v1/sealer_image_extension.go @@ -0,0 +1,28 @@ +// Copyright © 2022 Alibaba Group Holding Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +const ( + SealerImageExtension = "sealer.image.extension" +) + +type ImageExtension struct { + // sealer image type, like AppImage + ImageType string `json:"image_type"` + // cmd list for launching + CmdSet []string `json:"cmd_set"` + // arg list for CmdSet + ArgSet map[string]string `json:"arg_set"` +} diff --git a/utils/os/readers.go b/utils/os/readers.go index 2cd3f95fb94..258fdb813cf 100644 --- a/utils/os/readers.go +++ b/utils/os/readers.go @@ -37,8 +37,7 @@ type fileReader struct { func (r fileReader) ReadLines() ([]string, error) { var lines []string - _, err := os.Stat(r.fileName) - if err != nil || os.IsNotExist(err) { + if _, err := os.Stat(r.fileName); err != nil || os.IsNotExist(err) { return nil, errors.New("no such file") } @@ -63,8 +62,7 @@ func (r fileReader) ReadLines() ([]string, error) { } func (r fileReader) ReadAll() ([]byte, error) { - _, err := os.Stat(r.fileName) - if err != nil || os.IsNotExist(err) { + if _, err := os.Stat(r.fileName); err != nil || os.IsNotExist(err) { return nil, errors.New("no such file") } diff --git a/utils/platform/platform.go b/utils/platform/platform.go index b80864ef105..321806e0f6f 100644 --- a/utils/platform/platform.go +++ b/utils/platform/platform.go @@ -49,7 +49,6 @@ func ParsePlatforms(v string) ([]*v1.Platform, error) { func Normalize(platform v1.Platform) v1.Platform { platform.OS = normalizeOS(platform.OS) platform.Architecture, platform.Variant = NormalizeArch(platform.Architecture, platform.Variant) - platform.OSVersion = "" return platform } diff --git a/vendor/github.com/Azure/go-ansiterm/go.mod b/vendor/github.com/Azure/go-ansiterm/go.mod new file mode 100644 index 00000000000..965cb8120ec --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/go.mod @@ -0,0 +1,5 @@ +module github.com/Azure/go-ansiterm + +go 1.16 + +require golang.org/x/sys v0.0.0-20210616094352-59db8d763f22 diff --git a/vendor/github.com/Azure/go-ansiterm/go.sum b/vendor/github.com/Azure/go-ansiterm/go.sum new file mode 100644 index 00000000000..9f05d9d3edd --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/go.sum @@ -0,0 +1,2 @@ +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22 h1:RqytpXGR1iVNX7psjB3ff8y7sNFinVFvkx1c8SjBkio= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go b/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go index a6732797263..5599082ae9c 100644 --- a/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go +++ b/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go @@ -10,6 +10,7 @@ import ( "syscall" "github.com/Azure/go-ansiterm" + windows "golang.org/x/sys/windows" ) // Windows keyboard constants @@ -162,15 +163,28 @@ func ensureInRange(n int16, min int16, max int16) int16 { func GetStdFile(nFile int) (*os.File, uintptr) { var file *os.File - switch nFile { - case syscall.STD_INPUT_HANDLE: + + // syscall uses negative numbers + // windows package uses very big uint32 + // Keep these switches split so we don't have to convert ints too much. + switch uint32(nFile) { + case windows.STD_INPUT_HANDLE: file = os.Stdin - case syscall.STD_OUTPUT_HANDLE: + case windows.STD_OUTPUT_HANDLE: file = os.Stdout - case syscall.STD_ERROR_HANDLE: + case windows.STD_ERROR_HANDLE: file = os.Stderr default: - panic(fmt.Errorf("Invalid standard handle identifier: %v", nFile)) + switch nFile { + case syscall.STD_INPUT_HANDLE: + file = os.Stdin + case syscall.STD_OUTPUT_HANDLE: + file = os.Stdout + case syscall.STD_ERROR_HANDLE: + file = os.Stderr + default: + panic(fmt.Errorf("Invalid standard handle identifier: %v", nFile)) + } } fd, err := syscall.GetStdHandle(nFile) diff --git a/vendor/github.com/BurntSushi/toml/.gitignore b/vendor/github.com/BurntSushi/toml/.gitignore index 0cd3800377d..cd11be96530 100644 --- a/vendor/github.com/BurntSushi/toml/.gitignore +++ b/vendor/github.com/BurntSushi/toml/.gitignore @@ -1,5 +1,2 @@ -TAGS -tags -.*.swp -tomlcheck/tomlcheck toml.test +/toml-test diff --git a/vendor/github.com/BurntSushi/toml/.travis.yml b/vendor/github.com/BurntSushi/toml/.travis.yml deleted file mode 100644 index 8b8afc4f0e0..00000000000 --- a/vendor/github.com/BurntSushi/toml/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -language: go -go: - - 1.1 - - 1.2 - - 1.3 - - 1.4 - - 1.5 - - 1.6 - - tip -install: - - go install ./... - - go get github.com/BurntSushi/toml-test -script: - - export PATH="$PATH:$HOME/gopath/bin" - - make test diff --git a/vendor/github.com/BurntSushi/toml/COMPATIBLE b/vendor/github.com/BurntSushi/toml/COMPATIBLE index 6efcfd0ce55..f621b01196c 100644 --- a/vendor/github.com/BurntSushi/toml/COMPATIBLE +++ b/vendor/github.com/BurntSushi/toml/COMPATIBLE @@ -1,3 +1 @@ -Compatible with TOML version -[v0.4.0](https://github.com/toml-lang/toml/blob/v0.4.0/versions/en/toml-v0.4.0.md) - +Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0). diff --git a/vendor/github.com/BurntSushi/toml/Makefile b/vendor/github.com/BurntSushi/toml/Makefile deleted file mode 100644 index 3600848d331..00000000000 --- a/vendor/github.com/BurntSushi/toml/Makefile +++ /dev/null @@ -1,19 +0,0 @@ -install: - go install ./... - -test: install - go test -v - toml-test toml-test-decoder - toml-test -encoder toml-test-encoder - -fmt: - gofmt -w *.go */*.go - colcheck *.go */*.go - -tags: - find ./ -name '*.go' -print0 | xargs -0 gotags > TAGS - -push: - git push origin master - git push github master - diff --git a/vendor/github.com/BurntSushi/toml/README.md b/vendor/github.com/BurntSushi/toml/README.md index 7c1b37ecc7a..cc13f8667fb 100644 --- a/vendor/github.com/BurntSushi/toml/README.md +++ b/vendor/github.com/BurntSushi/toml/README.md @@ -1,46 +1,36 @@ -## TOML parser and encoder for Go with reflection - TOML stands for Tom's Obvious, Minimal Language. This Go package provides a reflection interface similar to Go's standard library `json` and `xml` -packages. This package also supports the `encoding.TextUnmarshaler` and -`encoding.TextMarshaler` interfaces so that you can define custom data -representations. (There is an example of this below.) - -Spec: https://github.com/toml-lang/toml +packages. -Compatible with TOML version -[v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md) +Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0). -Documentation: https://godoc.org/github.com/BurntSushi/toml +Documentation: https://godocs.io/github.com/BurntSushi/toml -Installation: +See the [releases page](https://github.com/BurntSushi/toml/releases) for a +changelog; this information is also in the git tag annotations (e.g. `git show +v0.4.0`). -```bash -go get github.com/BurntSushi/toml -``` +This library requires Go 1.13 or newer; install it with: -Try the toml validator: + % go get github.com/BurntSushi/toml@latest -```bash -go get github.com/BurntSushi/toml/cmd/tomlv -tomlv some-toml-file.toml -``` +It also comes with a TOML validator CLI tool: -[![Build Status](https://travis-ci.org/BurntSushi/toml.svg?branch=master)](https://travis-ci.org/BurntSushi/toml) [![GoDoc](https://godoc.org/github.com/BurntSushi/toml?status.svg)](https://godoc.org/github.com/BurntSushi/toml) + % go install github.com/BurntSushi/toml/cmd/tomlv@latest + % tomlv some-toml-file.toml ### Testing +This package passes all tests in [toml-test] for both the decoder and the +encoder. -This package passes all tests in -[toml-test](https://github.com/BurntSushi/toml-test) for both the decoder -and the encoder. +[toml-test]: https://github.com/BurntSushi/toml-test ### Examples +This package works similar to how the Go standard library handles XML and JSON. +Namely, data is loaded into Go values via reflection. -This package works similarly to how the Go standard library handles `XML` -and `JSON`. Namely, data is loaded into Go values via reflection. - -For the simplest example, consider some TOML file as just a list of keys -and values: +For the simplest example, consider some TOML file as just a list of keys and +values: ```toml Age = 25 @@ -54,11 +44,11 @@ Which could be defined in Go as: ```go type Config struct { - Age int - Cats []string - Pi float64 - Perfection []int - DOB time.Time // requires `import time` + Age int + Cats []string + Pi float64 + Perfection []int + DOB time.Time // requires `import time` } ``` @@ -66,9 +56,8 @@ And then decoded with: ```go var conf Config -if _, err := toml.Decode(tomlData, &conf); err != nil { - // handle error -} +err := toml.Decode(tomlData, &conf) +// handle error ``` You can also use struct tags if your struct field name doesn't map to a TOML @@ -80,12 +69,14 @@ some_key_NAME = "wat" ```go type TOML struct { - ObscureKey string `toml:"some_key_NAME"` + ObscureKey string `toml:"some_key_NAME"` } ``` -### Using the `encoding.TextUnmarshaler` interface +Beware that like other most other decoders **only exported fields** are +considered when encoding and decoding; private fields are silently ignored. +### Using the `Marshaler` and `encoding.TextUnmarshaler` interfaces Here's an example that automatically parses duration strings into `time.Duration` values: @@ -103,19 +94,19 @@ Which can be decoded with: ```go type song struct { - Name string - Duration duration + Name string + Duration duration } type songs struct { - Song []song + Song []song } var favorites songs if _, err := toml.Decode(blob, &favorites); err != nil { - log.Fatal(err) + log.Fatal(err) } for _, s := range favorites.Song { - fmt.Printf("%s (%s)\n", s.Name, s.Duration) + fmt.Printf("%s (%s)\n", s.Name, s.Duration) } ``` @@ -134,8 +125,10 @@ func (d *duration) UnmarshalText(text []byte) error { } ``` -### More complex usage +To target TOML specifically you can implement `UnmarshalTOML` TOML interface in +a similar way. +### More complex usage Here's an example of how to load the example from the official spec page: ```toml @@ -180,23 +173,23 @@ And the corresponding Go types are: ```go type tomlConfig struct { - Title string - Owner ownerInfo - DB database `toml:"database"` + Title string + Owner ownerInfo + DB database `toml:"database"` Servers map[string]server Clients clients } type ownerInfo struct { Name string - Org string `toml:"organization"` - Bio string - DOB time.Time + Org string `toml:"organization"` + Bio string + DOB time.Time } type database struct { - Server string - Ports []int + Server string + Ports []int ConnMax int `toml:"connection_max"` Enabled bool } @@ -207,7 +200,7 @@ type server struct { } type clients struct { - Data [][]interface{} + Data [][]interface{} Hosts []string } ``` @@ -215,4 +208,4 @@ type clients struct { Note that a case insensitive match will be tried if an exact match can't be found. -A working example of the above can be found in `_examples/example.{go,toml}`. +A working example of the above can be found in `_example/example.{go,toml}`. diff --git a/vendor/github.com/BurntSushi/toml/decode.go b/vendor/github.com/BurntSushi/toml/decode.go index b0fd51d5b6e..e24f0c5d5c0 100644 --- a/vendor/github.com/BurntSushi/toml/decode.go +++ b/vendor/github.com/BurntSushi/toml/decode.go @@ -1,19 +1,16 @@ package toml import ( + "encoding" "fmt" "io" "io/ioutil" "math" + "os" "reflect" "strings" - "time" ) -func e(format string, args ...interface{}) error { - return fmt.Errorf("toml: "+format, args...) -} - // Unmarshaler is the interface implemented by objects that can unmarshal a // TOML description of themselves. type Unmarshaler interface { @@ -27,29 +24,27 @@ func Unmarshal(p []byte, v interface{}) error { } // Primitive is a TOML value that hasn't been decoded into a Go value. -// When using the various `Decode*` functions, the type `Primitive` may -// be given to any value, and its decoding will be delayed. // -// A `Primitive` value can be decoded using the `PrimitiveDecode` function. +// This type can be used for any value, which will cause decoding to be delayed. +// You can use the PrimitiveDecode() function to "manually" decode these values. // -// The underlying representation of a `Primitive` value is subject to change. -// Do not rely on it. +// NOTE: The underlying representation of a `Primitive` value is subject to +// change. Do not rely on it. // -// N.B. Primitive values are still parsed, so using them will only avoid -// the overhead of reflection. They can be useful when you don't know the -// exact type of TOML data until run time. +// NOTE: Primitive values are still parsed, so using them will only avoid the +// overhead of reflection. They can be useful when you don't know the exact type +// of TOML data until runtime. type Primitive struct { undecoded interface{} context Key } -// DEPRECATED! -// -// Use MetaData.PrimitiveDecode instead. -func PrimitiveDecode(primValue Primitive, v interface{}) error { - md := MetaData{decoded: make(map[string]bool)} - return md.unify(primValue.undecoded, rvalue(v)) -} +// The significand precision for float32 and float64 is 24 and 53 bits; this is +// the range a natural number can be stored in a float without loss of data. +const ( + maxSafeFloat32Int = 16777215 // 2^24-1 + maxSafeFloat64Int = 9007199254740991 // 2^53-1 +) // PrimitiveDecode is just like the other `Decode*` functions, except it // decodes a TOML value that has already been parsed. Valid primitive values @@ -68,79 +63,117 @@ func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error { return md.unify(primValue.undecoded, rvalue(v)) } -// Decode will decode the contents of `data` in TOML format into a pointer -// `v`. +// Decoder decodes TOML data. // -// TOML hashes correspond to Go structs or maps. (Dealer's choice. They can be -// used interchangeably.) +// TOML tables correspond to Go structs or maps (dealer's choice – they can be +// used interchangeably). // -// TOML arrays of tables correspond to either a slice of structs or a slice -// of maps. +// TOML table arrays correspond to either a slice of structs or a slice of maps. // -// TOML datetimes correspond to Go `time.Time` values. +// TOML datetimes correspond to Go time.Time values. Local datetimes are parsed +// in the local timezone. // -// All other TOML types (float, string, int, bool and array) correspond -// to the obvious Go types. +// All other TOML types (float, string, int, bool and array) correspond to the +// obvious Go types. // -// An exception to the above rules is if a type implements the -// encoding.TextUnmarshaler interface. In this case, any primitive TOML value -// (floats, strings, integers, booleans and datetimes) will be converted to -// a byte string and given to the value's UnmarshalText method. See the -// Unmarshaler example for a demonstration with time duration strings. +// An exception to the above rules is if a type implements the TextUnmarshaler +// interface, in which case any primitive TOML value (floats, strings, integers, +// booleans, datetimes) will be converted to a []byte and given to the value's +// UnmarshalText method. See the Unmarshaler example for a demonstration with +// time duration strings. // // Key mapping // -// TOML keys can map to either keys in a Go map or field names in a Go -// struct. The special `toml` struct tag may be used to map TOML keys to -// struct fields that don't match the key name exactly. (See the example.) -// A case insensitive match to struct names will be tried if an exact match -// can't be found. +// TOML keys can map to either keys in a Go map or field names in a Go struct. +// The special `toml` struct tag can be used to map TOML keys to struct fields +// that don't match the key name exactly (see the example). A case insensitive +// match to struct names will be tried if an exact match can't be found. // -// The mapping between TOML values and Go values is loose. That is, there -// may exist TOML values that cannot be placed into your representation, and -// there may be parts of your representation that do not correspond to -// TOML values. This loose mapping can be made stricter by using the IsDefined -// and/or Undecoded methods on the MetaData returned. +// The mapping between TOML values and Go values is loose. That is, there may +// exist TOML values that cannot be placed into your representation, and there +// may be parts of your representation that do not correspond to TOML values. +// This loose mapping can be made stricter by using the IsDefined and/or +// Undecoded methods on the MetaData returned. // -// This decoder will not handle cyclic types. If a cyclic type is passed, -// `Decode` will not terminate. -func Decode(data string, v interface{}) (MetaData, error) { +// This decoder does not handle cyclic types. Decode will not terminate if a +// cyclic type is passed. +type Decoder struct { + r io.Reader +} + +// NewDecoder creates a new Decoder. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{r: r} +} + +var ( + unmarshalToml = reflect.TypeOf((*Unmarshaler)(nil)).Elem() + unmarshalText = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() +) + +// Decode TOML data in to the pointer `v`. +func (dec *Decoder) Decode(v interface{}) (MetaData, error) { rv := reflect.ValueOf(v) if rv.Kind() != reflect.Ptr { - return MetaData{}, e("Decode of non-pointer %s", reflect.TypeOf(v)) + s := "%q" + if reflect.TypeOf(v) == nil { + s = "%v" + } + + return MetaData{}, e("cannot decode to non-pointer "+s, reflect.TypeOf(v)) } if rv.IsNil() { - return MetaData{}, e("Decode of nil %s", reflect.TypeOf(v)) + return MetaData{}, e("cannot decode to nil value of %q", reflect.TypeOf(v)) } - p, err := parse(data) + + // Check if this is a supported type: struct, map, interface{}, or something + // that implements UnmarshalTOML or UnmarshalText. + rv = indirect(rv) + rt := rv.Type() + if rv.Kind() != reflect.Struct && rv.Kind() != reflect.Map && + !(rv.Kind() == reflect.Interface && rv.NumMethod() == 0) && + !rt.Implements(unmarshalToml) && !rt.Implements(unmarshalText) { + return MetaData{}, e("cannot decode to type %s", rt) + } + + // TODO: parser should read from io.Reader? Or at the very least, make it + // read from []byte rather than string + data, err := ioutil.ReadAll(dec.r) if err != nil { return MetaData{}, err } - md := MetaData{ - p.mapping, p.types, p.ordered, - make(map[string]bool, len(p.ordered)), nil, - } - return md, md.unify(p.mapping, indirect(rv)) -} -// DecodeFile is just like Decode, except it will automatically read the -// contents of the file at `fpath` and decode it for you. -func DecodeFile(fpath string, v interface{}) (MetaData, error) { - bs, err := ioutil.ReadFile(fpath) + p, err := parse(string(data)) if err != nil { return MetaData{}, err } - return Decode(string(bs), v) + + md := MetaData{ + mapping: p.mapping, + types: p.types, + keys: p.ordered, + decoded: make(map[string]struct{}, len(p.ordered)), + context: nil, + } + return md, md.unify(p.mapping, rv) } -// DecodeReader is just like Decode, except it will consume all bytes -// from the reader and decode it for you. -func DecodeReader(r io.Reader, v interface{}) (MetaData, error) { - bs, err := ioutil.ReadAll(r) +// Decode the TOML data in to the pointer v. +// +// See the documentation on Decoder for a description of the decoding process. +func Decode(data string, v interface{}) (MetaData, error) { + return NewDecoder(strings.NewReader(data)).Decode(v) +} + +// DecodeFile is just like Decode, except it will automatically read the +// contents of the file at path and decode it for you. +func DecodeFile(path string, v interface{}) (MetaData, error) { + fp, err := os.Open(path) if err != nil { return MetaData{}, err } - return Decode(string(bs), v) + defer fp.Close() + return NewDecoder(fp).Decode(v) } // unify performs a sort of type unification based on the structure of `rv`, @@ -149,8 +182,8 @@ func DecodeReader(r io.Reader, v interface{}) (MetaData, error) { // Any type mismatch produces an error. Finding a type that we don't know // how to handle produces an unsupported type error. func (md *MetaData) unify(data interface{}, rv reflect.Value) error { - // Special case. Look for a `Primitive` value. + // TODO: #76 would make this superfluous after implemented. if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() { // Save the undecoded data and the key context into the primitive // value. @@ -170,25 +203,17 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error { } } - // Special case. Handle time.Time values specifically. - // TODO: Remove this code when we decide to drop support for Go 1.1. - // This isn't necessary in Go 1.2 because time.Time satisfies the encoding - // interfaces. - if rv.Type().AssignableTo(rvalue(time.Time{}).Type()) { - return md.unifyDatetime(data, rv) - } - // Special case. Look for a value satisfying the TextUnmarshaler interface. - if v, ok := rv.Interface().(TextUnmarshaler); ok { + if v, ok := rv.Interface().(encoding.TextUnmarshaler); ok { return md.unifyText(data, v) } - // BUG(burntsushi) + // TODO: // The behavior here is incorrect whenever a Go type satisfies the - // encoding.TextUnmarshaler interface but also corresponds to a TOML - // hash or array. In particular, the unmarshaler should only be applied - // to primitive TOML values. But at this point, it will be applied to - // all kinds of values and produce an incorrect error whenever those values - // are hashes or arrays (including arrays of tables). + // encoding.TextUnmarshaler interface but also corresponds to a TOML hash or + // array. In particular, the unmarshaler should only be applied to primitive + // TOML values. But at this point, it will be applied to all kinds of values + // and produce an incorrect error whenever those values are hashes or arrays + // (including arrays of tables). k := rv.Kind() @@ -223,9 +248,7 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error { return e("unsupported type %s", rv.Type()) } return md.unifyAnything(data, rv) - case reflect.Float32: - fallthrough - case reflect.Float64: + case reflect.Float32, reflect.Float64: return md.unifyFloat64(data, rv) } return e("unsupported type %s", rv.Kind()) @@ -259,17 +282,17 @@ func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error { for _, i := range f.index { subv = indirect(subv.Field(i)) } + if isUnifiable(subv) { - md.decoded[md.context.add(key).String()] = true + md.decoded[md.context.add(key).String()] = struct{}{} md.context = append(md.context, key) - if err := md.unify(datum, subv); err != nil { + err := md.unify(datum, subv) + if err != nil { return err } md.context = md.context[0 : len(md.context)-1] } else if f.name != "" { - // Bad user! No soup for you! - return e("cannot write unexported field %s.%s", - rv.Type().String(), f.name) + return e("cannot write unexported field %s.%s", rv.Type().String(), f.name) } } } @@ -277,27 +300,33 @@ func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error { } func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error { + if k := rv.Type().Key().Kind(); k != reflect.String { + return fmt.Errorf( + "toml: cannot decode to a map with non-string key type (%s in %q)", + k, rv.Type()) + } + tmap, ok := mapping.(map[string]interface{}) if !ok { if tmap == nil { return nil } - return badtype("map", mapping) + return md.badtype("map", mapping) } if rv.IsNil() { rv.Set(reflect.MakeMap(rv.Type())) } for k, v := range tmap { - md.decoded[md.context.add(k).String()] = true + md.decoded[md.context.add(k).String()] = struct{}{} md.context = append(md.context, k) - rvkey := indirect(reflect.New(rv.Type().Key())) rvval := reflect.Indirect(reflect.New(rv.Type().Elem())) if err := md.unify(v, rvval); err != nil { return err } md.context = md.context[0 : len(md.context)-1] + rvkey := indirect(reflect.New(rv.Type().Key())) rvkey.SetString(k) rv.SetMapIndex(rvkey, rvval) } @@ -310,12 +339,10 @@ func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error { if !datav.IsValid() { return nil } - return badtype("slice", data) + return md.badtype("slice", data) } - sliceLen := datav.Len() - if sliceLen != rv.Len() { - return e("expected array length %d; got TOML array of length %d", - rv.Len(), sliceLen) + if l := datav.Len(); l != rv.Len() { + return e("expected array length %d; got TOML array of length %d", rv.Len(), l) } return md.unifySliceArray(datav, rv) } @@ -326,7 +353,7 @@ func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error { if !datav.IsValid() { return nil } - return badtype("slice", data) + return md.badtype("slice", data) } n := datav.Len() if rv.IsNil() || rv.Cap() < n { @@ -337,37 +364,31 @@ func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error { } func (md *MetaData) unifySliceArray(data, rv reflect.Value) error { - sliceLen := data.Len() - for i := 0; i < sliceLen; i++ { - v := data.Index(i).Interface() - sliceval := indirect(rv.Index(i)) - if err := md.unify(v, sliceval); err != nil { + l := data.Len() + for i := 0; i < l; i++ { + err := md.unify(data.Index(i).Interface(), indirect(rv.Index(i))) + if err != nil { return err } } return nil } -func (md *MetaData) unifyDatetime(data interface{}, rv reflect.Value) error { - if _, ok := data.(time.Time); ok { - rv.Set(reflect.ValueOf(data)) - return nil - } - return badtype("time.Time", data) -} - func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error { if s, ok := data.(string); ok { rv.SetString(s) return nil } - return badtype("string", data) + return md.badtype("string", data) } func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error { if num, ok := data.(float64); ok { switch rv.Kind() { case reflect.Float32: + if num < -math.MaxFloat32 || num > math.MaxFloat32 { + return e("value %f is out of range for float32", num) + } fallthrough case reflect.Float64: rv.SetFloat(num) @@ -376,7 +397,26 @@ func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error { } return nil } - return badtype("float", data) + + if num, ok := data.(int64); ok { + switch rv.Kind() { + case reflect.Float32: + if num < -maxSafeFloat32Int || num > maxSafeFloat32Int { + return e("value %d is out of range for float32", num) + } + fallthrough + case reflect.Float64: + if num < -maxSafeFloat64Int || num > maxSafeFloat64Int { + return e("value %d is out of range for float64", num) + } + rv.SetFloat(float64(num)) + default: + panic("bug") + } + return nil + } + + return md.badtype("float", data) } func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error { @@ -423,7 +463,7 @@ func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error { } return nil } - return badtype("integer", data) + return md.badtype("integer", data) } func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error { @@ -431,7 +471,7 @@ func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error { rv.SetBool(b) return nil } - return badtype("boolean", data) + return md.badtype("boolean", data) } func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error { @@ -439,9 +479,15 @@ func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error { return nil } -func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error { +func (md *MetaData) unifyText(data interface{}, v encoding.TextUnmarshaler) error { var s string switch sdata := data.(type) { + case Marshaler: + text, err := sdata.MarshalTOML() + if err != nil { + return err + } + s = string(text) case TextMarshaler: text, err := sdata.MarshalText() if err != nil { @@ -459,7 +505,7 @@ func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error { case float64: s = fmt.Sprintf("%f", sdata) default: - return badtype("primitive (string-like)", data) + return md.badtype("primitive (string-like)", data) } if err := v.UnmarshalText([]byte(s)); err != nil { return err @@ -467,22 +513,27 @@ func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error { return nil } +func (md *MetaData) badtype(dst string, data interface{}) error { + return e("incompatible types: TOML key %q has type %T; destination has type %s", md.context, data, dst) +} + // rvalue returns a reflect.Value of `v`. All pointers are resolved. func rvalue(v interface{}) reflect.Value { return indirect(reflect.ValueOf(v)) } // indirect returns the value pointed to by a pointer. -// Pointers are followed until the value is not a pointer. -// New values are allocated for each nil pointer. // -// An exception to this rule is if the value satisfies an interface of -// interest to us (like encoding.TextUnmarshaler). +// Pointers are followed until the value is not a pointer. New values are +// allocated for each nil pointer. +// +// An exception to this rule is if the value satisfies an interface of interest +// to us (like encoding.TextUnmarshaler). func indirect(v reflect.Value) reflect.Value { if v.Kind() != reflect.Ptr { if v.CanSet() { pv := v.Addr() - if _, ok := pv.Interface().(TextUnmarshaler); ok { + if _, ok := pv.Interface().(encoding.TextUnmarshaler); ok { return pv } } @@ -498,12 +549,12 @@ func isUnifiable(rv reflect.Value) bool { if rv.CanSet() { return true } - if _, ok := rv.Interface().(TextUnmarshaler); ok { + if _, ok := rv.Interface().(encoding.TextUnmarshaler); ok { return true } return false } -func badtype(expected string, data interface{}) error { - return e("cannot load TOML value of type %T into a Go %s", data, expected) +func e(format string, args ...interface{}) error { + return fmt.Errorf("toml: "+format, args...) } diff --git a/vendor/github.com/BurntSushi/toml/decode_go116.go b/vendor/github.com/BurntSushi/toml/decode_go116.go new file mode 100644 index 00000000000..eddfb641b86 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/decode_go116.go @@ -0,0 +1,19 @@ +//go:build go1.16 +// +build go1.16 + +package toml + +import ( + "io/fs" +) + +// DecodeFS is just like Decode, except it will automatically read the contents +// of the file at `path` from a fs.FS instance. +func DecodeFS(fsys fs.FS, path string, v interface{}) (MetaData, error) { + fp, err := fsys.Open(path) + if err != nil { + return MetaData{}, err + } + defer fp.Close() + return NewDecoder(fp).Decode(v) +} diff --git a/vendor/github.com/BurntSushi/toml/deprecated.go b/vendor/github.com/BurntSushi/toml/deprecated.go new file mode 100644 index 00000000000..c6af3f239dd --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/deprecated.go @@ -0,0 +1,21 @@ +package toml + +import ( + "encoding" + "io" +) + +// Deprecated: use encoding.TextMarshaler +type TextMarshaler encoding.TextMarshaler + +// Deprecated: use encoding.TextUnmarshaler +type TextUnmarshaler encoding.TextUnmarshaler + +// Deprecated: use MetaData.PrimitiveDecode. +func PrimitiveDecode(primValue Primitive, v interface{}) error { + md := MetaData{decoded: make(map[string]struct{})} + return md.unify(primValue.undecoded, rvalue(v)) +} + +// Deprecated: use NewDecoder(reader).Decode(&value). +func DecodeReader(r io.Reader, v interface{}) (MetaData, error) { return NewDecoder(r).Decode(v) } diff --git a/vendor/github.com/BurntSushi/toml/doc.go b/vendor/github.com/BurntSushi/toml/doc.go index b371f396edc..099c4a77d2d 100644 --- a/vendor/github.com/BurntSushi/toml/doc.go +++ b/vendor/github.com/BurntSushi/toml/doc.go @@ -1,27 +1,13 @@ /* -Package toml provides facilities for decoding and encoding TOML configuration -files via reflection. There is also support for delaying decoding with -the Primitive type, and querying the set of keys in a TOML document with the -MetaData type. +Package toml implements decoding and encoding of TOML files. -The specification implemented: https://github.com/toml-lang/toml +This package supports TOML v1.0.0, as listed on https://toml.io -The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify -whether a file is a valid TOML document. It can also be used to print the -type of each key in a TOML document. +There is also support for delaying decoding with the Primitive type, and +querying the set of keys in a TOML document with the MetaData type. -Testing - -There are two important types of tests used for this package. The first is -contained inside '*_test.go' files and uses the standard Go unit testing -framework. These tests are primarily devoted to holistically testing the -decoder and encoder. - -The second type of testing is used to verify the implementation's adherence -to the TOML specification. These tests have been factored into their own -project: https://github.com/BurntSushi/toml-test - -The reason the tests are in a separate project is so that they can be used by -any implementation of TOML. Namely, it is language agnostic. +The github.com/BurntSushi/toml/cmd/tomlv package implements a TOML validator, +and can be used to verify if TOML document is valid. It can also be used to +print the type of each key. */ package toml diff --git a/vendor/github.com/BurntSushi/toml/encode.go b/vendor/github.com/BurntSushi/toml/encode.go index d905c21a246..dee4e6d3196 100644 --- a/vendor/github.com/BurntSushi/toml/encode.go +++ b/vendor/github.com/BurntSushi/toml/encode.go @@ -2,57 +2,106 @@ package toml import ( "bufio" + "encoding" "errors" "fmt" "io" + "math" "reflect" "sort" "strconv" "strings" "time" + + "github.com/BurntSushi/toml/internal" ) type tomlEncodeError struct{ error } var ( - errArrayMixedElementTypes = errors.New( - "toml: cannot encode array with mixed element types") - errArrayNilElement = errors.New( - "toml: cannot encode array with nil element") - errNonString = errors.New( - "toml: cannot encode a map with non-string key type") - errAnonNonStruct = errors.New( - "toml: cannot encode an anonymous field that is not a struct") - errArrayNoTable = errors.New( - "toml: TOML array element cannot contain a table") - errNoKey = errors.New( - "toml: top-level values must be Go maps or structs") - errAnything = errors.New("") // used in testing + errArrayNilElement = errors.New("toml: cannot encode array with nil element") + errNonString = errors.New("toml: cannot encode a map with non-string key type") + errNoKey = errors.New("toml: top-level values must be Go maps or structs") + errAnything = errors.New("") // used in testing ) -var quotedReplacer = strings.NewReplacer( - "\t", "\\t", - "\n", "\\n", - "\r", "\\r", +var dblQuotedReplacer = strings.NewReplacer( "\"", "\\\"", "\\", "\\\\", + "\x00", `\u0000`, + "\x01", `\u0001`, + "\x02", `\u0002`, + "\x03", `\u0003`, + "\x04", `\u0004`, + "\x05", `\u0005`, + "\x06", `\u0006`, + "\x07", `\u0007`, + "\b", `\b`, + "\t", `\t`, + "\n", `\n`, + "\x0b", `\u000b`, + "\f", `\f`, + "\r", `\r`, + "\x0e", `\u000e`, + "\x0f", `\u000f`, + "\x10", `\u0010`, + "\x11", `\u0011`, + "\x12", `\u0012`, + "\x13", `\u0013`, + "\x14", `\u0014`, + "\x15", `\u0015`, + "\x16", `\u0016`, + "\x17", `\u0017`, + "\x18", `\u0018`, + "\x19", `\u0019`, + "\x1a", `\u001a`, + "\x1b", `\u001b`, + "\x1c", `\u001c`, + "\x1d", `\u001d`, + "\x1e", `\u001e`, + "\x1f", `\u001f`, + "\x7f", `\u007f`, ) -// Encoder controls the encoding of Go values to a TOML document to some -// io.Writer. +// Marshaler is the interface implemented by types that can marshal themselves +// into valid TOML. +type Marshaler interface { + MarshalTOML() ([]byte, error) +} + +// Encoder encodes a Go to a TOML document. +// +// The mapping between Go values and TOML values should be precisely the same as +// for the Decode* functions. +// +// The toml.Marshaler and encoder.TextMarshaler interfaces are supported to +// encoding the value as custom TOML. +// +// If you want to write arbitrary binary data then you will need to use +// something like base64 since TOML does not have any binary types. // -// The indentation level can be controlled with the Indent field. +// When encoding TOML hashes (Go maps or structs), keys without any sub-hashes +// are encoded first. +// +// Go maps will be sorted alphabetically by key for deterministic output. +// +// Encoding Go values without a corresponding TOML representation will return an +// error. Examples of this includes maps with non-string keys, slices with nil +// elements, embedded non-struct types, and nested slices containing maps or +// structs. (e.g. [][]map[string]string is not allowed but []map[string]string +// is okay, as is []map[string][]string). +// +// NOTE: only exported keys are encoded due to the use of reflection. Unexported +// keys are silently discarded. type Encoder struct { - // A single indentation level. By default it is two spaces. + // String to use for a single indentation level; default is two spaces. Indent string - // hasWritten is whether we have written any output to w yet. - hasWritten bool w *bufio.Writer + hasWritten bool // written any output to w yet? } -// NewEncoder returns a TOML encoder that encodes Go values to the io.Writer -// given. By default, a single indentation level is 2 spaces. +// NewEncoder create a new Encoder. func NewEncoder(w io.Writer) *Encoder { return &Encoder{ w: bufio.NewWriter(w), @@ -60,29 +109,10 @@ func NewEncoder(w io.Writer) *Encoder { } } -// Encode writes a TOML representation of the Go value to the underlying -// io.Writer. If the value given cannot be encoded to a valid TOML document, -// then an error is returned. -// -// The mapping between Go values and TOML values should be precisely the same -// as for the Decode* functions. Similarly, the TextMarshaler interface is -// supported by encoding the resulting bytes as strings. (If you want to write -// arbitrary binary data then you will need to use something like base64 since -// TOML does not have any binary types.) -// -// When encoding TOML hashes (i.e., Go maps or structs), keys without any -// sub-hashes are encoded first. -// -// If a Go map is encoded, then its keys are sorted alphabetically for -// deterministic output. More control over this behavior may be provided if -// there is demand for it. +// Encode writes a TOML representation of the Go value to the Encoder's writer. // -// Encoding Go values without a corresponding TOML representation---like map -// types with non-string keys---will cause an error to be returned. Similarly -// for mixed arrays/slices, arrays/slices with nil elements, embedded -// non-struct types and nested slices containing maps or structs. -// (e.g., [][]map[string]string is not allowed but []map[string]string is OK -// and so is []map[string][]string.) +// An error is returned if the value given cannot be encoded to a valid TOML +// document. func (enc *Encoder) Encode(v interface{}) error { rv := eindirect(reflect.ValueOf(v)) if err := enc.safeEncode(Key([]string{}), rv); err != nil { @@ -106,13 +136,18 @@ func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) { } func (enc *Encoder) encode(key Key, rv reflect.Value) { - // Special case. Time needs to be in ISO8601 format. - // Special case. If we can marshal the type to text, then we used that. - // Basically, this prevents the encoder for handling these types as - // generic structs (or whatever the underlying type of a TextMarshaler is). - switch rv.Interface().(type) { - case time.Time, TextMarshaler: - enc.keyEqElement(key, rv) + // Special case: time needs to be in ISO8601 format. + // + // Special case: if we can marshal the type to text, then we used that. This + // prevents the encoder for handling these types as generic structs (or + // whatever the underlying type of a TextMarshaler is). + switch t := rv.Interface().(type) { + case time.Time, encoding.TextMarshaler, Marshaler: + enc.writeKeyValue(key, rv, false) + return + // TODO: #76 would make this superfluous after implemented. + case Primitive: + enc.encode(key, reflect.ValueOf(t.undecoded)) return } @@ -123,12 +158,12 @@ func (enc *Encoder) encode(key Key, rv reflect.Value) { reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String, reflect.Bool: - enc.keyEqElement(key, rv) + enc.writeKeyValue(key, rv, false) case reflect.Array, reflect.Slice: if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) { enc.eArrayOfTables(key, rv) } else { - enc.keyEqElement(key, rv) + enc.writeKeyValue(key, rv, false) } case reflect.Interface: if rv.IsNil() { @@ -148,55 +183,88 @@ func (enc *Encoder) encode(key Key, rv reflect.Value) { case reflect.Struct: enc.eTable(key, rv) default: - panic(e("unsupported type for key '%s': %s", key, k)) + encPanic(fmt.Errorf("unsupported type for key '%s': %s", key, k)) } } -// eElement encodes any value that can be an array element (primitives and -// arrays). +// eElement encodes any value that can be an array element. func (enc *Encoder) eElement(rv reflect.Value) { switch v := rv.Interface().(type) { - case time.Time: - // Special case time.Time as a primitive. Has to come before - // TextMarshaler below because time.Time implements - // encoding.TextMarshaler, but we need to always use UTC. - enc.wf(v.UTC().Format("2006-01-02T15:04:05Z")) + case time.Time: // Using TextMarshaler adds extra quotes, which we don't want. + format := time.RFC3339Nano + switch v.Location() { + case internal.LocalDatetime: + format = "2006-01-02T15:04:05.999999999" + case internal.LocalDate: + format = "2006-01-02" + case internal.LocalTime: + format = "15:04:05.999999999" + } + switch v.Location() { + default: + enc.wf(v.Format(format)) + case internal.LocalDatetime, internal.LocalDate, internal.LocalTime: + enc.wf(v.In(time.UTC).Format(format)) + } return - case TextMarshaler: - // Special case. Use text marshaler if it's available for this value. - if s, err := v.MarshalText(); err != nil { + case Marshaler: + s, err := v.MarshalTOML() + if err != nil { encPanic(err) - } else { - enc.writeQuoted(string(s)) } + enc.writeQuoted(string(s)) + return + case encoding.TextMarshaler: + s, err := v.MarshalText() + if err != nil { + encPanic(err) + } + enc.writeQuoted(string(s)) return } + switch rv.Kind() { + case reflect.String: + enc.writeQuoted(rv.String()) case reflect.Bool: enc.wf(strconv.FormatBool(rv.Bool())) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, - reflect.Int64: + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: enc.wf(strconv.FormatInt(rv.Int(), 10)) - case reflect.Uint, reflect.Uint8, reflect.Uint16, - reflect.Uint32, reflect.Uint64: + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: enc.wf(strconv.FormatUint(rv.Uint(), 10)) case reflect.Float32: - enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 32))) + f := rv.Float() + if math.IsNaN(f) { + enc.wf("nan") + } else if math.IsInf(f, 0) { + enc.wf("%cinf", map[bool]byte{true: '-', false: '+'}[math.Signbit(f)]) + } else { + enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 32))) + } case reflect.Float64: - enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 64))) + f := rv.Float() + if math.IsNaN(f) { + enc.wf("nan") + } else if math.IsInf(f, 0) { + enc.wf("%cinf", map[bool]byte{true: '-', false: '+'}[math.Signbit(f)]) + } else { + enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 64))) + } case reflect.Array, reflect.Slice: enc.eArrayOrSliceElement(rv) + case reflect.Struct: + enc.eStruct(nil, rv, true) + case reflect.Map: + enc.eMap(nil, rv, true) case reflect.Interface: enc.eElement(rv.Elem()) - case reflect.String: - enc.writeQuoted(rv.String()) default: - panic(e("unexpected primitive type: %s", rv.Kind())) + encPanic(fmt.Errorf("unexpected primitive type: %T", rv.Interface())) } } -// By the TOML spec, all floats must have a decimal with at least one -// number on either side. +// By the TOML spec, all floats must have a decimal with at least one number on +// either side. func floatAddDecimal(fstr string) string { if !strings.Contains(fstr, ".") { return fstr + ".0" @@ -205,7 +273,7 @@ func floatAddDecimal(fstr string) string { } func (enc *Encoder) writeQuoted(s string) { - enc.wf("\"%s\"", quotedReplacer.Replace(s)) + enc.wf("\"%s\"", dblQuotedReplacer.Replace(s)) } func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) { @@ -230,40 +298,39 @@ func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) { if isNil(trv) { continue } - panicIfInvalidKey(key) enc.newline() - enc.wf("%s[[%s]]", enc.indentStr(key), key.maybeQuotedAll()) + enc.wf("%s[[%s]]", enc.indentStr(key), key) enc.newline() - enc.eMapOrStruct(key, trv) + enc.eMapOrStruct(key, trv, false) } } func (enc *Encoder) eTable(key Key, rv reflect.Value) { - panicIfInvalidKey(key) if len(key) == 1 { // Output an extra newline between top-level tables. // (The newline isn't written if nothing else has been written though.) enc.newline() } if len(key) > 0 { - enc.wf("%s[%s]", enc.indentStr(key), key.maybeQuotedAll()) + enc.wf("%s[%s]", enc.indentStr(key), key) enc.newline() } - enc.eMapOrStruct(key, rv) + enc.eMapOrStruct(key, rv, false) } -func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value) { +func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value, inline bool) { switch rv := eindirect(rv); rv.Kind() { case reflect.Map: - enc.eMap(key, rv) + enc.eMap(key, rv, inline) case reflect.Struct: - enc.eStruct(key, rv) + enc.eStruct(key, rv, inline) default: + // Should never happen? panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String()) } } -func (enc *Encoder) eMap(key Key, rv reflect.Value) { +func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) { rt := rv.Type() if rt.Key().Kind() != reflect.String { encPanic(errNonString) @@ -274,114 +341,159 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value) { var mapKeysDirect, mapKeysSub []string for _, mapKey := range rv.MapKeys() { k := mapKey.String() - if typeIsHash(tomlTypeOfGo(rv.MapIndex(mapKey))) { + if typeIsTable(tomlTypeOfGo(rv.MapIndex(mapKey))) { mapKeysSub = append(mapKeysSub, k) } else { mapKeysDirect = append(mapKeysDirect, k) } } - var writeMapKeys = func(mapKeys []string) { + var writeMapKeys = func(mapKeys []string, trailC bool) { sort.Strings(mapKeys) - for _, mapKey := range mapKeys { - mrv := rv.MapIndex(reflect.ValueOf(mapKey)) - if isNil(mrv) { - // Don't write anything for nil fields. + for i, mapKey := range mapKeys { + val := rv.MapIndex(reflect.ValueOf(mapKey)) + if isNil(val) { continue } - enc.encode(key.add(mapKey), mrv) + + if inline { + enc.writeKeyValue(Key{mapKey}, val, true) + if trailC || i != len(mapKeys)-1 { + enc.wf(", ") + } + } else { + enc.encode(key.add(mapKey), val) + } } } - writeMapKeys(mapKeysDirect) - writeMapKeys(mapKeysSub) + + if inline { + enc.wf("{") + } + writeMapKeys(mapKeysDirect, len(mapKeysSub) > 0) + writeMapKeys(mapKeysSub, false) + if inline { + enc.wf("}") + } } -func (enc *Encoder) eStruct(key Key, rv reflect.Value) { +const is32Bit = (32 << (^uint(0) >> 63)) == 32 + +func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) { // Write keys for fields directly under this key first, because if we write - // a field that creates a new table, then all keys under it will be in that + // a field that creates a new table then all keys under it will be in that // table (not the one we're writing here). - rt := rv.Type() - var fieldsDirect, fieldsSub [][]int - var addFields func(rt reflect.Type, rv reflect.Value, start []int) + // + // Fields is a [][]int: for fieldsDirect this always has one entry (the + // struct index). For fieldsSub it contains two entries: the parent field + // index from tv, and the field indexes for the fields of the sub. + var ( + rt = rv.Type() + fieldsDirect, fieldsSub [][]int + addFields func(rt reflect.Type, rv reflect.Value, start []int) + ) addFields = func(rt reflect.Type, rv reflect.Value, start []int) { for i := 0; i < rt.NumField(); i++ { f := rt.Field(i) - // skip unexported fields - if f.PkgPath != "" && !f.Anonymous { + if f.PkgPath != "" && !f.Anonymous { /// Skip unexported fields. continue } + frv := rv.Field(i) + + // Treat anonymous struct fields with tag names as though they are + // not anonymous, like encoding/json does. + // + // Non-struct anonymous fields use the normal encoding logic. if f.Anonymous { t := f.Type switch t.Kind() { case reflect.Struct: - // Treat anonymous struct fields with - // tag names as though they are not - // anonymous, like encoding/json does. if getOptions(f.Tag).name == "" { - addFields(t, frv, f.Index) + addFields(t, frv, append(start, f.Index...)) continue } case reflect.Ptr: - if t.Elem().Kind() == reflect.Struct && - getOptions(f.Tag).name == "" { + if t.Elem().Kind() == reflect.Struct && getOptions(f.Tag).name == "" { if !frv.IsNil() { - addFields(t.Elem(), frv.Elem(), f.Index) + addFields(t.Elem(), frv.Elem(), append(start, f.Index...)) } continue } - // Fall through to the normal field encoding logic below - // for non-struct anonymous fields. } } - if typeIsHash(tomlTypeOfGo(frv)) { + if typeIsTable(tomlTypeOfGo(frv)) { fieldsSub = append(fieldsSub, append(start, f.Index...)) } else { - fieldsDirect = append(fieldsDirect, append(start, f.Index...)) + // Copy so it works correct on 32bit archs; not clear why this + // is needed. See #314, and https://www.reddit.com/r/golang/comments/pnx8v4 + // This also works fine on 64bit, but 32bit archs are somewhat + // rare and this is a wee bit faster. + if is32Bit { + copyStart := make([]int, len(start)) + copy(copyStart, start) + fieldsDirect = append(fieldsDirect, append(copyStart, f.Index...)) + } else { + fieldsDirect = append(fieldsDirect, append(start, f.Index...)) + } } } } addFields(rt, rv, nil) - var writeFields = func(fields [][]int) { + writeFields := func(fields [][]int) { for _, fieldIndex := range fields { - sft := rt.FieldByIndex(fieldIndex) - sf := rv.FieldByIndex(fieldIndex) - if isNil(sf) { - // Don't write anything for nil fields. + fieldType := rt.FieldByIndex(fieldIndex) + fieldVal := rv.FieldByIndex(fieldIndex) + + if isNil(fieldVal) { /// Don't write anything for nil fields. continue } - opts := getOptions(sft.Tag) + opts := getOptions(fieldType.Tag) if opts.skip { continue } - keyName := sft.Name + keyName := fieldType.Name if opts.name != "" { keyName = opts.name } - if opts.omitempty && isEmpty(sf) { + if opts.omitempty && isEmpty(fieldVal) { continue } - if opts.omitzero && isZero(sf) { + if opts.omitzero && isZero(fieldVal) { continue } - enc.encode(key.add(keyName), sf) + if inline { + enc.writeKeyValue(Key{keyName}, fieldVal, true) + if fieldIndex[0] != len(fields)-1 { + enc.wf(", ") + } + } else { + enc.encode(key.add(keyName), fieldVal) + } } } + + if inline { + enc.wf("{") + } writeFields(fieldsDirect) writeFields(fieldsSub) + if inline { + enc.wf("}") + } } -// tomlTypeName returns the TOML type name of the Go value's type. It is -// used to determine whether the types of array elements are mixed (which is -// forbidden). If the Go value is nil, then it is illegal for it to be an array -// element, and valueIsNil is returned as true. - -// Returns the TOML type of a Go value. The type may be `nil`, which means -// no concrete TOML type could be found. +// tomlTypeOfGo returns the TOML type name of the Go value's type. +// +// It is used to determine whether the types of array elements are mixed (which +// is forbidden). If the Go value is nil, then it is illegal for it to be an +// array element, and valueIsNil is returned as true. +// +// The type may be `nil`, which means no concrete TOML type could be found. func tomlTypeOfGo(rv reflect.Value) tomlType { if isNil(rv) || !rv.IsValid() { return nil @@ -408,19 +520,43 @@ func tomlTypeOfGo(rv reflect.Value) tomlType { case reflect.Map: return tomlHash case reflect.Struct: - switch rv.Interface().(type) { - case time.Time: + if _, ok := rv.Interface().(time.Time); ok { return tomlDatetime - case TextMarshaler: + } + if isMarshaler(rv) { return tomlString - default: - return tomlHash } + return tomlHash default: - panic("unexpected reflect.Kind: " + rv.Kind().String()) + if isMarshaler(rv) { + return tomlString + } + + encPanic(errors.New("unsupported type: " + rv.Kind().String())) + panic("unreachable") } } +func isMarshaler(rv reflect.Value) bool { + switch rv.Interface().(type) { + case encoding.TextMarshaler: + return true + case Marshaler: + return true + } + + // Someone used a pointer receiver: we can make it work for pointer values. + if rv.CanAddr() { + if _, ok := rv.Addr().Interface().(encoding.TextMarshaler); ok { + return true + } + if _, ok := rv.Addr().Interface().(Marshaler); ok { + return true + } + } + return false +} + // tomlArrayType returns the element type of a TOML array. The type returned // may be nil if it cannot be determined (e.g., a nil slice or a zero length // slize). This function may also panic if it finds a type that cannot be @@ -430,29 +566,18 @@ func tomlArrayType(rv reflect.Value) tomlType { if isNil(rv) || !rv.IsValid() || rv.Len() == 0 { return nil } - firstType := tomlTypeOfGo(rv.Index(0)) - if firstType == nil { - encPanic(errArrayNilElement) - } + /// Don't allow nil. rvlen := rv.Len() for i := 1; i < rvlen; i++ { - elem := rv.Index(i) - switch elemType := tomlTypeOfGo(elem); { - case elemType == nil: + if tomlTypeOfGo(rv.Index(i)) == nil { encPanic(errArrayNilElement) - case !typeEqual(firstType, elemType): - encPanic(errArrayMixedElementTypes) } } - // If we have a nested array, then we must make sure that the nested - // array contains ONLY primitives. - // This checks arbitrarily nested arrays. - if typeEqual(firstType, tomlArray) || typeEqual(firstType, tomlArrayHash) { - nest := tomlArrayType(eindirect(rv.Index(0))) - if typeEqual(nest, tomlHash) || typeEqual(nest, tomlArrayHash) { - encPanic(errArrayNoTable) - } + + firstType := tomlTypeOfGo(rv.Index(0)) + if firstType == nil { + encPanic(errArrayNilElement) } return firstType } @@ -511,18 +636,32 @@ func (enc *Encoder) newline() { } } -func (enc *Encoder) keyEqElement(key Key, val reflect.Value) { +// Write a key/value pair: +// +// key = +// +// This is also used for "k = v" in inline tables; so something like this will +// be written in three calls: +// +// ┌────────────────────┐ +// │ ┌───┐ ┌─────┐│ +// v v v v vv +// key = {k = v, k2 = v2} +// +func (enc *Encoder) writeKeyValue(key Key, val reflect.Value, inline bool) { if len(key) == 0 { encPanic(errNoKey) } - panicIfInvalidKey(key) enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1)) enc.eElement(val) - enc.newline() + if !inline { + enc.newline() + } } func (enc *Encoder) wf(format string, v ...interface{}) { - if _, err := fmt.Fprintf(enc.w, format, v...); err != nil { + _, err := fmt.Fprintf(enc.w, format, v...) + if err != nil { encPanic(err) } enc.hasWritten = true @@ -553,16 +692,3 @@ func isNil(rv reflect.Value) bool { return false } } - -func panicIfInvalidKey(key Key) { - for _, k := range key { - if len(k) == 0 { - encPanic(e("Key '%s' is not a valid table name. Key names "+ - "cannot be empty.", key.maybeQuotedAll())) - } - } -} - -func isValidKeyName(s string) bool { - return len(s) != 0 -} diff --git a/vendor/github.com/BurntSushi/toml/encoding_types.go b/vendor/github.com/BurntSushi/toml/encoding_types.go deleted file mode 100644 index d36e1dd6002..00000000000 --- a/vendor/github.com/BurntSushi/toml/encoding_types.go +++ /dev/null @@ -1,19 +0,0 @@ -// +build go1.2 - -package toml - -// In order to support Go 1.1, we define our own TextMarshaler and -// TextUnmarshaler types. For Go 1.2+, we just alias them with the -// standard library interfaces. - -import ( - "encoding" -) - -// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here -// so that Go 1.1 can be supported. -type TextMarshaler encoding.TextMarshaler - -// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined -// here so that Go 1.1 can be supported. -type TextUnmarshaler encoding.TextUnmarshaler diff --git a/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go b/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go deleted file mode 100644 index e8d503d0469..00000000000 --- a/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build !go1.2 - -package toml - -// These interfaces were introduced in Go 1.2, so we add them manually when -// compiling for Go 1.1. - -// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here -// so that Go 1.1 can be supported. -type TextMarshaler interface { - MarshalText() (text []byte, err error) -} - -// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined -// here so that Go 1.1 can be supported. -type TextUnmarshaler interface { - UnmarshalText(text []byte) error -} diff --git a/vendor/github.com/BurntSushi/toml/error.go b/vendor/github.com/BurntSushi/toml/error.go new file mode 100644 index 00000000000..36edc46554e --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/error.go @@ -0,0 +1,229 @@ +package toml + +import ( + "fmt" + "strings" +) + +// ParseError is returned when there is an error parsing the TOML syntax. +// +// For example invalid syntax, duplicate keys, etc. +// +// In addition to the error message itself, you can also print detailed location +// information with context by using ErrorWithLocation(): +// +// toml: error: Key 'fruit' was already created and cannot be used as an array. +// +// At line 4, column 2-7: +// +// 2 | fruit = [] +// 3 | +// 4 | [[fruit]] # Not allowed +// ^^^^^ +// +// Furthermore, the ErrorWithUsage() can be used to print the above with some +// more detailed usage guidance: +// +// toml: error: newlines not allowed within inline tables +// +// At line 1, column 18: +// +// 1 | x = [{ key = 42 # +// ^ +// +// Error help: +// +// Inline tables must always be on a single line: +// +// table = {key = 42, second = 43} +// +// It is invalid to split them over multiple lines like so: +// +// # INVALID +// table = { +// key = 42, +// second = 43 +// } +// +// Use regular for this: +// +// [table] +// key = 42 +// second = 43 +type ParseError struct { + Message string // Short technical message. + Usage string // Longer message with usage guidance; may be blank. + Position Position // Position of the error + LastKey string // Last parsed key, may be blank. + Line int // Line the error occurred. Deprecated: use Position. + + err error + input string +} + +// Position of an error. +type Position struct { + Line int // Line number, starting at 1. + Start int // Start of error, as byte offset starting at 0. + Len int // Lenght in bytes. +} + +func (pe ParseError) Error() string { + msg := pe.Message + if msg == "" { // Error from errorf() + msg = pe.err.Error() + } + + if pe.LastKey == "" { + return fmt.Sprintf("toml: line %d: %s", pe.Position.Line, msg) + } + return fmt.Sprintf("toml: line %d (last key %q): %s", + pe.Position.Line, pe.LastKey, msg) +} + +// ErrorWithUsage() returns the error with detailed location context. +// +// See the documentation on ParseError. +func (pe ParseError) ErrorWithPosition() string { + if pe.input == "" { // Should never happen, but just in case. + return pe.Error() + } + + var ( + lines = strings.Split(pe.input, "\n") + col = pe.column(lines) + b = new(strings.Builder) + ) + + msg := pe.Message + if msg == "" { + msg = pe.err.Error() + } + + // TODO: don't show control characters as literals? This may not show up + // well everywhere. + + if pe.Position.Len == 1 { + fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d:\n\n", + msg, pe.Position.Line, col+1) + } else { + fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d-%d:\n\n", + msg, pe.Position.Line, col, col+pe.Position.Len) + } + if pe.Position.Line > 2 { + fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-2, lines[pe.Position.Line-3]) + } + if pe.Position.Line > 1 { + fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-1, lines[pe.Position.Line-2]) + } + fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line, lines[pe.Position.Line-1]) + fmt.Fprintf(b, "% 10s%s%s\n", "", strings.Repeat(" ", col), strings.Repeat("^", pe.Position.Len)) + return b.String() +} + +// ErrorWithUsage() returns the error with detailed location context and usage +// guidance. +// +// See the documentation on ParseError. +func (pe ParseError) ErrorWithUsage() string { + m := pe.ErrorWithPosition() + if u, ok := pe.err.(interface{ Usage() string }); ok && u.Usage() != "" { + return m + "Error help:\n\n " + + strings.ReplaceAll(strings.TrimSpace(u.Usage()), "\n", "\n ") + + "\n" + } + return m +} + +func (pe ParseError) column(lines []string) int { + var pos, col int + for i := range lines { + ll := len(lines[i]) + 1 // +1 for the removed newline + if pos+ll >= pe.Position.Start { + col = pe.Position.Start - pos + if col < 0 { // Should never happen, but just in case. + col = 0 + } + break + } + pos += ll + } + + return col +} + +type ( + errLexControl struct{ r rune } + errLexEscape struct{ r rune } + errLexUTF8 struct{ b byte } + errLexInvalidNum struct{ v string } + errLexInvalidDate struct{ v string } + errLexInlineTableNL struct{} + errLexStringNL struct{} +) + +func (e errLexControl) Error() string { + return fmt.Sprintf("TOML files cannot contain control characters: '0x%02x'", e.r) +} +func (e errLexControl) Usage() string { return "" } + +func (e errLexEscape) Error() string { return fmt.Sprintf(`invalid escape in string '\%c'`, e.r) } +func (e errLexEscape) Usage() string { return usageEscape } +func (e errLexUTF8) Error() string { return fmt.Sprintf("invalid UTF-8 byte: 0x%02x", e.b) } +func (e errLexUTF8) Usage() string { return "" } +func (e errLexInvalidNum) Error() string { return fmt.Sprintf("invalid number: %q", e.v) } +func (e errLexInvalidNum) Usage() string { return "" } +func (e errLexInvalidDate) Error() string { return fmt.Sprintf("invalid date: %q", e.v) } +func (e errLexInvalidDate) Usage() string { return "" } +func (e errLexInlineTableNL) Error() string { return "newlines not allowed within inline tables" } +func (e errLexInlineTableNL) Usage() string { return usageInlineNewline } +func (e errLexStringNL) Error() string { return "strings cannot contain newlines" } +func (e errLexStringNL) Usage() string { return usageStringNewline } + +const usageEscape = ` +A '\' inside a "-delimited string is interpreted as an escape character. + +The following escape sequences are supported: +\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX + +To prevent a '\' from being recognized as an escape character, use either: + +- a ' or '''-delimited string; escape characters aren't processed in them; or +- write two backslashes to get a single backslash: '\\'. + +If you're trying to add a Windows path (e.g. "C:\Users\martin") then using '/' +instead of '\' will usually also work: "C:/Users/martin". +` + +const usageInlineNewline = ` +Inline tables must always be on a single line: + + table = {key = 42, second = 43} + +It is invalid to split them over multiple lines like so: + + # INVALID + table = { + key = 42, + second = 43 + } + +Use regular for this: + + [table] + key = 42 + second = 43 +` + +const usageStringNewline = ` +Strings must always be on a single line, and cannot span more than one line: + + # INVALID + string = "Hello, + world!" + +Instead use """ or ''' to split strings over multiple lines: + + string = """Hello, + world!""" +` diff --git a/vendor/github.com/BurntSushi/toml/go.mod b/vendor/github.com/BurntSushi/toml/go.mod new file mode 100644 index 00000000000..82989481df7 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/go.mod @@ -0,0 +1,3 @@ +module github.com/BurntSushi/toml + +go 1.16 diff --git a/vendor/github.com/BurntSushi/toml/internal/tz.go b/vendor/github.com/BurntSushi/toml/internal/tz.go new file mode 100644 index 00000000000..022f15bc2b8 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/internal/tz.go @@ -0,0 +1,36 @@ +package internal + +import "time" + +// Timezones used for local datetime, date, and time TOML types. +// +// The exact way times and dates without a timezone should be interpreted is not +// well-defined in the TOML specification and left to the implementation. These +// defaults to current local timezone offset of the computer, but this can be +// changed by changing these variables before decoding. +// +// TODO: +// Ideally we'd like to offer people the ability to configure the used timezone +// by setting Decoder.Timezone and Encoder.Timezone; however, this is a bit +// tricky: the reason we use three different variables for this is to support +// round-tripping – without these specific TZ names we wouldn't know which +// format to use. +// +// There isn't a good way to encode this right now though, and passing this sort +// of information also ties in to various related issues such as string format +// encoding, encoding of comments, etc. +// +// So, for the time being, just put this in internal until we can write a good +// comprehensive API for doing all of this. +// +// The reason they're exported is because they're referred from in e.g. +// internal/tag. +// +// Note that this behaviour is valid according to the TOML spec as the exact +// behaviour is left up to implementations. +var ( + localOffset = func() int { _, o := time.Now().Zone(); return o }() + LocalDatetime = time.FixedZone("datetime-local", localOffset) + LocalDate = time.FixedZone("date-local", localOffset) + LocalTime = time.FixedZone("time-local", localOffset) +) diff --git a/vendor/github.com/BurntSushi/toml/lex.go b/vendor/github.com/BurntSushi/toml/lex.go index e0a742a8870..63ef20f4745 100644 --- a/vendor/github.com/BurntSushi/toml/lex.go +++ b/vendor/github.com/BurntSushi/toml/lex.go @@ -2,6 +2,8 @@ package toml import ( "fmt" + "reflect" + "runtime" "strings" "unicode" "unicode/utf8" @@ -29,33 +31,20 @@ const ( itemArrayTableStart itemArrayTableEnd itemKeyStart + itemKeyEnd itemCommentStart itemInlineTableStart itemInlineTableEnd ) -const ( - eof = 0 - comma = ',' - tableStart = '[' - tableEnd = ']' - arrayTableStart = '[' - arrayTableEnd = ']' - tableSep = '.' - keySep = '=' - arrayStart = '[' - arrayEnd = ']' - commentStart = '#' - stringStart = '"' - stringEnd = '"' - rawStringStart = '\'' - rawStringEnd = '\'' - inlineTableStart = '{' - inlineTableEnd = '}' -) +const eof = 0 type stateFn func(lx *lexer) stateFn +func (p Position) String() string { + return fmt.Sprintf("at line %d; start %d; length %d", p.Line, p.Start, p.Len) +} + type lexer struct { input string start int @@ -64,26 +53,26 @@ type lexer struct { state stateFn items chan item - // Allow for backing up up to three runes. - // This is necessary because TOML contains 3-rune tokens (""" and '''). - prevWidths [3]int - nprev int // how many of prevWidths are in use - // If we emit an eof, we can still back up, but it is not OK to call - // next again. - atEOF bool + // Allow for backing up up to 4 runes. This is necessary because TOML + // contains 3-rune tokens (""" and '''). + prevWidths [4]int + nprev int // how many of prevWidths are in use + atEOF bool // If we emit an eof, we can still back up, but it is not OK to call next again. // A stack of state functions used to maintain context. - // The idea is to reuse parts of the state machine in various places. - // For example, values can appear at the top level or within arbitrarily - // nested arrays. The last state on the stack is used after a value has - // been lexed. Similarly for comments. + // + // The idea is to reuse parts of the state machine in various places. For + // example, values can appear at the top level or within arbitrarily nested + // arrays. The last state on the stack is used after a value has been lexed. + // Similarly for comments. stack []stateFn } type item struct { - typ itemType - val string - line int + typ itemType + val string + err error + pos Position } func (lx *lexer) nextItem() item { @@ -93,6 +82,7 @@ func (lx *lexer) nextItem() item { return item default: lx.state = lx.state(lx) + //fmt.Printf(" STATE %-24s current: %-10q stack: %s\n", lx.state, lx.current(), lx.stack) } } } @@ -101,9 +91,9 @@ func lex(input string) *lexer { lx := &lexer{ input: input, state: lexTop, - line: 1, items: make(chan item, 10), stack: make([]stateFn, 0, 10), + line: 1, } return lx } @@ -125,19 +115,31 @@ func (lx *lexer) current() string { return lx.input[lx.start:lx.pos] } +func (lx lexer) getPos() Position { + p := Position{ + Line: lx.line, + Start: lx.start, + Len: lx.pos - lx.start, + } + if p.Len <= 0 { + p.Len = 1 + } + return p +} + func (lx *lexer) emit(typ itemType) { - lx.items <- item{typ, lx.current(), lx.line} + lx.items <- item{typ: typ, pos: lx.getPos(), val: lx.current()} lx.start = lx.pos } func (lx *lexer) emitTrim(typ itemType) { - lx.items <- item{typ, strings.TrimSpace(lx.current()), lx.line} + lx.items <- item{typ: typ, pos: lx.getPos(), val: strings.TrimSpace(lx.current())} lx.start = lx.pos } func (lx *lexer) next() (r rune) { if lx.atEOF { - panic("next called after EOF") + panic("BUG in lexer: next called after EOF") } if lx.pos >= len(lx.input) { lx.atEOF = true @@ -147,12 +149,25 @@ func (lx *lexer) next() (r rune) { if lx.input[lx.pos] == '\n' { lx.line++ } + lx.prevWidths[3] = lx.prevWidths[2] lx.prevWidths[2] = lx.prevWidths[1] lx.prevWidths[1] = lx.prevWidths[0] - if lx.nprev < 3 { + if lx.nprev < 4 { lx.nprev++ } + r, w := utf8.DecodeRuneInString(lx.input[lx.pos:]) + if r == utf8.RuneError { + lx.error(errLexUTF8{lx.input[lx.pos]}) + return utf8.RuneError + } + + // Note: don't use peek() here, as this calls next(). + if isControl(r) || (r == '\r' && (len(lx.input)-1 == lx.pos || lx.input[lx.pos+1] != '\n')) { + lx.errorControlChar(r) + return utf8.RuneError + } + lx.prevWidths[0] = w lx.pos += w return r @@ -163,19 +178,21 @@ func (lx *lexer) ignore() { lx.start = lx.pos } -// backup steps back one rune. Can be called only twice between calls to next. +// backup steps back one rune. Can be called 4 times between calls to next. func (lx *lexer) backup() { if lx.atEOF { lx.atEOF = false return } if lx.nprev < 1 { - panic("backed up too far") + panic("BUG in lexer: backed up too far") } w := lx.prevWidths[0] lx.prevWidths[0] = lx.prevWidths[1] lx.prevWidths[1] = lx.prevWidths[2] + lx.prevWidths[2] = lx.prevWidths[3] lx.nprev-- + lx.pos -= w if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' { lx.line-- @@ -211,18 +228,58 @@ func (lx *lexer) skip(pred func(rune) bool) { } } -// errorf stops all lexing by emitting an error and returning `nil`. +// error stops all lexing by emitting an error and returning `nil`. +// // Note that any value that is a character is escaped if it's a special // character (newlines, tabs, etc.). +func (lx *lexer) error(err error) stateFn { + if lx.atEOF { + return lx.errorPrevLine(err) + } + lx.items <- item{typ: itemError, pos: lx.getPos(), err: err} + return nil +} + +// errorfPrevline is like error(), but sets the position to the last column of +// the previous line. +// +// This is so that unexpected EOF or NL errors don't show on a new blank line. +func (lx *lexer) errorPrevLine(err error) stateFn { + pos := lx.getPos() + pos.Line-- + pos.Len = 1 + pos.Start = lx.pos - 1 + lx.items <- item{typ: itemError, pos: pos, err: err} + return nil +} + +// errorPos is like error(), but allows explicitly setting the position. +func (lx *lexer) errorPos(start, length int, err error) stateFn { + pos := lx.getPos() + pos.Start = start + pos.Len = length + lx.items <- item{typ: itemError, pos: pos, err: err} + return nil +} + +// errorf is like error, and creates a new error. func (lx *lexer) errorf(format string, values ...interface{}) stateFn { - lx.items <- item{ - itemError, - fmt.Sprintf(format, values...), - lx.line, + if lx.atEOF { + pos := lx.getPos() + pos.Line-- + pos.Len = 1 + pos.Start = lx.pos - 1 + lx.items <- item{typ: itemError, pos: pos, err: fmt.Errorf(format, values...)} + return nil } + lx.items <- item{typ: itemError, pos: lx.getPos(), err: fmt.Errorf(format, values...)} return nil } +func (lx *lexer) errorControlChar(cc rune) stateFn { + return lx.errorPos(lx.pos-1, 1, errLexControl{cc}) +} + // lexTop consumes elements at the top level of TOML data. func lexTop(lx *lexer) stateFn { r := lx.next() @@ -230,10 +287,10 @@ func lexTop(lx *lexer) stateFn { return lexSkip(lx, lexTop) } switch r { - case commentStart: + case '#': lx.push(lexTop) return lexCommentStart - case tableStart: + case '[': return lexTableStart case eof: if lx.pos > lx.start { @@ -256,7 +313,7 @@ func lexTop(lx *lexer) stateFn { func lexTopEnd(lx *lexer) stateFn { r := lx.next() switch { - case r == commentStart: + case r == '#': // a comment will read to a newline for us. lx.push(lexTop) return lexCommentStart @@ -269,8 +326,9 @@ func lexTopEnd(lx *lexer) stateFn { lx.emit(itemEOF) return nil } - return lx.errorf("expected a top-level item to end with a newline, "+ - "comment, or EOF, but got %q instead", r) + return lx.errorf( + "expected a top-level item to end with a newline, comment, or EOF, but got %q instead", + r) } // lexTable lexes the beginning of a table. Namely, it makes sure that @@ -279,7 +337,7 @@ func lexTopEnd(lx *lexer) stateFn { // It also handles the case that this is an item in an array of tables. // e.g., '[[name]]'. func lexTableStart(lx *lexer) stateFn { - if lx.peek() == arrayTableStart { + if lx.peek() == '[' { lx.next() lx.emit(itemArrayTableStart) lx.push(lexArrayTableEnd) @@ -296,9 +354,8 @@ func lexTableEnd(lx *lexer) stateFn { } func lexArrayTableEnd(lx *lexer) stateFn { - if r := lx.next(); r != arrayTableEnd { - return lx.errorf("expected end of table array name delimiter %q, "+ - "but got %q instead", arrayTableEnd, r) + if r := lx.next(); r != ']' { + return lx.errorf("expected end of table array name delimiter ']', but got %q instead", r) } lx.emit(itemArrayTableEnd) return lexTopEnd @@ -307,33 +364,20 @@ func lexArrayTableEnd(lx *lexer) stateFn { func lexTableNameStart(lx *lexer) stateFn { lx.skip(isWhitespace) switch r := lx.peek(); { - case r == tableEnd || r == eof: - return lx.errorf("unexpected end of table name " + - "(table names cannot be empty)") - case r == tableSep: - return lx.errorf("unexpected table separator " + - "(table names cannot be empty)") - case r == stringStart || r == rawStringStart: + case r == ']' || r == eof: + return lx.errorf("unexpected end of table name (table names cannot be empty)") + case r == '.': + return lx.errorf("unexpected table separator (table names cannot be empty)") + case r == '"' || r == '\'': lx.ignore() lx.push(lexTableNameEnd) - return lexValue // reuse string lexing + return lexQuotedName default: - return lexBareTableName + lx.push(lexTableNameEnd) + return lexBareName } } -// lexBareTableName lexes the name of a table. It assumes that at least one -// valid character for the table has already been read. -func lexBareTableName(lx *lexer) stateFn { - r := lx.next() - if isBareKeyChar(r) { - return lexBareTableName - } - lx.backup() - lx.emit(itemText) - return lexTableNameEnd -} - // lexTableNameEnd reads the end of a piece of a table name, optionally // consuming whitespace. func lexTableNameEnd(lx *lexer) stateFn { @@ -341,69 +385,107 @@ func lexTableNameEnd(lx *lexer) stateFn { switch r := lx.next(); { case isWhitespace(r): return lexTableNameEnd - case r == tableSep: + case r == '.': lx.ignore() return lexTableNameStart - case r == tableEnd: + case r == ']': return lx.pop() default: - return lx.errorf("expected '.' or ']' to end table name, "+ - "but got %q instead", r) + return lx.errorf("expected '.' or ']' to end table name, but got %q instead", r) } } -// lexKeyStart consumes a key name up until the first non-whitespace character. -// lexKeyStart will ignore whitespace. -func lexKeyStart(lx *lexer) stateFn { - r := lx.peek() +// lexBareName lexes one part of a key or table. +// +// It assumes that at least one valid character for the table has already been +// read. +// +// Lexes only one part, e.g. only 'a' inside 'a.b'. +func lexBareName(lx *lexer) stateFn { + r := lx.next() + if isBareKeyChar(r) { + return lexBareName + } + lx.backup() + lx.emit(itemText) + return lx.pop() +} + +// lexBareName lexes one part of a key or table. +// +// It assumes that at least one valid character for the table has already been +// read. +// +// Lexes only one part, e.g. only '"a"' inside '"a".b'. +func lexQuotedName(lx *lexer) stateFn { + r := lx.next() switch { - case r == keySep: - return lx.errorf("unexpected key separator %q", keySep) - case isWhitespace(r) || isNL(r): - lx.next() - return lexSkip(lx, lexKeyStart) - case r == stringStart || r == rawStringStart: - lx.ignore() - lx.emit(itemKeyStart) - lx.push(lexKeyEnd) - return lexValue // reuse string lexing + case isWhitespace(r): + return lexSkip(lx, lexValue) + case r == '"': + lx.ignore() // ignore the '"' + return lexString + case r == '\'': + lx.ignore() // ignore the "'" + return lexRawString + case r == eof: + return lx.errorf("unexpected EOF; expected value") default: + return lx.errorf("expected value but found %q instead", r) + } +} + +// lexKeyStart consumes all key parts until a '='. +func lexKeyStart(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.peek(); { + case r == '=' || r == eof: + return lx.errorf("unexpected '=': key name appears blank") + case r == '.': + return lx.errorf("unexpected '.': keys cannot start with a '.'") + case r == '"' || r == '\'': lx.ignore() + fallthrough + default: // Bare key lx.emit(itemKeyStart) - return lexBareKey + return lexKeyNameStart } } -// lexBareKey consumes the text of a bare key. Assumes that the first character -// (which is not whitespace) has not yet been consumed. -func lexBareKey(lx *lexer) stateFn { - switch r := lx.next(); { - case isBareKeyChar(r): - return lexBareKey - case isWhitespace(r): - lx.backup() - lx.emit(itemText) - return lexKeyEnd - case r == keySep: - lx.backup() - lx.emit(itemText) - return lexKeyEnd +func lexKeyNameStart(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.peek(); { + case r == '=' || r == eof: + return lx.errorf("unexpected '='") + case r == '.': + return lx.errorf("unexpected '.'") + case r == '"' || r == '\'': + lx.ignore() + lx.push(lexKeyEnd) + return lexQuotedName default: - return lx.errorf("bare keys cannot contain %q", r) + lx.push(lexKeyEnd) + return lexBareName } } // lexKeyEnd consumes the end of a key and trims whitespace (up to the key // separator). func lexKeyEnd(lx *lexer) stateFn { + lx.skip(isWhitespace) switch r := lx.next(); { - case r == keySep: - return lexSkip(lx, lexValue) case isWhitespace(r): return lexSkip(lx, lexKeyEnd) + case r == eof: + return lx.errorf("unexpected EOF; expected key separator '='") + case r == '.': + lx.ignore() + return lexKeyNameStart + case r == '=': + lx.emit(itemKeyEnd) + return lexSkip(lx, lexValue) default: - return lx.errorf("expected key separator %q, but got %q instead", - keySep, r) + return lx.errorf("expected '.' or '=', but got %q instead", r) } } @@ -422,17 +504,17 @@ func lexValue(lx *lexer) stateFn { return lexNumberOrDateStart } switch r { - case arrayStart: + case '[': lx.ignore() lx.emit(itemArray) return lexArrayValue - case inlineTableStart: + case '{': lx.ignore() lx.emit(itemInlineTableStart) return lexInlineTableValue - case stringStart: - if lx.accept(stringStart) { - if lx.accept(stringStart) { + case '"': + if lx.accept('"') { + if lx.accept('"') { lx.ignore() // Ignore """ return lexMultilineString } @@ -440,9 +522,9 @@ func lexValue(lx *lexer) stateFn { } lx.ignore() // ignore the '"' return lexString - case rawStringStart: - if lx.accept(rawStringStart) { - if lx.accept(rawStringStart) { + case '\'': + if lx.accept('\'') { + if lx.accept('\'') { lx.ignore() // Ignore """ return lexMultilineRawString } @@ -450,10 +532,15 @@ func lexValue(lx *lexer) stateFn { } lx.ignore() // ignore the "'" return lexRawString - case '+', '-': - return lexNumberStart case '.': // special error case, be kind to users return lx.errorf("floats must start with a digit, not '.'") + case 'i', 'n': + if (lx.accept('n') && lx.accept('f')) || (lx.accept('a') && lx.accept('n')) { + lx.emit(itemFloat) + return lx.pop() + } + case '-', '+': + return lexDecimalNumberStart } if unicode.IsLetter(r) { // Be permissive here; lexBool will give a nice error if the @@ -463,6 +550,9 @@ func lexValue(lx *lexer) stateFn { lx.backup() return lexBool } + if r == eof { + return lx.errorf("unexpected EOF; expected value") + } return lx.errorf("expected value but found %q instead", r) } @@ -473,14 +563,12 @@ func lexArrayValue(lx *lexer) stateFn { switch { case isWhitespace(r) || isNL(r): return lexSkip(lx, lexArrayValue) - case r == commentStart: + case r == '#': lx.push(lexArrayValue) return lexCommentStart - case r == comma: + case r == ',': return lx.errorf("unexpected comma") - case r == arrayEnd: - // NOTE(caleb): The spec isn't clear about whether you can have - // a trailing comma or not, so we'll allow it. + case r == ']': return lexArrayEnd } @@ -493,23 +581,20 @@ func lexArrayValue(lx *lexer) stateFn { // the next value (or the end of the array): it ignores whitespace and newlines // and expects either a ',' or a ']'. func lexArrayValueEnd(lx *lexer) stateFn { - r := lx.next() - switch { + switch r := lx.next(); { case isWhitespace(r) || isNL(r): return lexSkip(lx, lexArrayValueEnd) - case r == commentStart: + case r == '#': lx.push(lexArrayValueEnd) return lexCommentStart - case r == comma: + case r == ',': lx.ignore() return lexArrayValue // move on to the next value - case r == arrayEnd: + case r == ']': return lexArrayEnd + default: + return lx.errorf("expected a comma (',') or array terminator (']'), but got %s", runeOrEOF(r)) } - return lx.errorf( - "expected a comma or array terminator %q, but got %q instead", - arrayEnd, r, - ) } // lexArrayEnd finishes the lexing of an array. @@ -528,13 +613,13 @@ func lexInlineTableValue(lx *lexer) stateFn { case isWhitespace(r): return lexSkip(lx, lexInlineTableValue) case isNL(r): - return lx.errorf("newlines not allowed within inline tables") - case r == commentStart: + return lx.errorPrevLine(errLexInlineTableNL{}) + case r == '#': lx.push(lexInlineTableValue) return lexCommentStart - case r == comma: + case r == ',': return lx.errorf("unexpected comma") - case r == inlineTableEnd: + case r == '}': return lexInlineTableEnd } lx.backup() @@ -546,23 +631,33 @@ func lexInlineTableValue(lx *lexer) stateFn { // key/value pair and the next pair (or the end of the table): // it ignores whitespace and expects either a ',' or a '}'. func lexInlineTableValueEnd(lx *lexer) stateFn { - r := lx.next() - switch { + switch r := lx.next(); { case isWhitespace(r): return lexSkip(lx, lexInlineTableValueEnd) case isNL(r): - return lx.errorf("newlines not allowed within inline tables") - case r == commentStart: + return lx.errorPrevLine(errLexInlineTableNL{}) + case r == '#': lx.push(lexInlineTableValueEnd) return lexCommentStart - case r == comma: + case r == ',': lx.ignore() + lx.skip(isWhitespace) + if lx.peek() == '}' { + return lx.errorf("trailing comma not allowed in inline tables") + } return lexInlineTableValue - case r == inlineTableEnd: + case r == '}': return lexInlineTableEnd + default: + return lx.errorf("expected a comma or an inline table terminator '}', but got %s instead", runeOrEOF(r)) } - return lx.errorf("expected a comma or an inline table terminator %q, "+ - "but got %q instead", inlineTableEnd, r) +} + +func runeOrEOF(r rune) string { + if r == eof { + return "end of file" + } + return "'" + string(r) + "'" } // lexInlineTableEnd finishes the lexing of an inline table. @@ -579,13 +674,13 @@ func lexString(lx *lexer) stateFn { r := lx.next() switch { case r == eof: - return lx.errorf("unexpected EOF") + return lx.errorf(`unexpected EOF; expected '"'`) case isNL(r): - return lx.errorf("strings cannot contain newlines") + return lx.errorPrevLine(errLexStringNL{}) case r == '\\': lx.push(lexString) return lexStringEscape - case r == stringEnd: + case r == '"': lx.backup() lx.emit(itemString) lx.next() @@ -598,19 +693,37 @@ func lexString(lx *lexer) stateFn { // lexMultilineString consumes the inner contents of a string. It assumes that // the beginning '"""' has already been consumed and ignored. func lexMultilineString(lx *lexer) stateFn { - switch lx.next() { + r := lx.next() + switch r { + default: + return lexMultilineString case eof: - return lx.errorf("unexpected EOF") + return lx.errorf(`unexpected EOF; expected '"""'`) case '\\': return lexMultilineStringEscape - case stringEnd: - if lx.accept(stringEnd) { - if lx.accept(stringEnd) { - lx.backup() + case '"': + /// Found " → try to read two more "". + if lx.accept('"') { + if lx.accept('"') { + /// Peek ahead: the string can contain " and "", including at the + /// end: """str""""" + /// 6 or more at the end, however, is an error. + if lx.peek() == '"' { + /// Check if we already lexed 5 's; if so we have 6 now, and + /// that's just too many man! + if strings.HasSuffix(lx.current(), `"""""`) { + return lx.errorf(`unexpected '""""""'`) + } + lx.backup() + lx.backup() + return lexMultilineString + } + + lx.backup() /// backup: don't include the """ in the item. lx.backup() lx.backup() lx.emit(itemMultilineString) - lx.next() + lx.next() /// Read over ''' again and discard it. lx.next() lx.next() lx.ignore() @@ -618,8 +731,8 @@ func lexMultilineString(lx *lexer) stateFn { } lx.backup() } + return lexMultilineString } - return lexMultilineString } // lexRawString consumes a raw string. Nothing can be escaped in such a string. @@ -627,35 +740,54 @@ func lexMultilineString(lx *lexer) stateFn { func lexRawString(lx *lexer) stateFn { r := lx.next() switch { + default: + return lexRawString case r == eof: - return lx.errorf("unexpected EOF") + return lx.errorf(`unexpected EOF; expected "'"`) case isNL(r): - return lx.errorf("strings cannot contain newlines") - case r == rawStringEnd: + return lx.errorPrevLine(errLexStringNL{}) + case r == '\'': lx.backup() lx.emit(itemRawString) lx.next() lx.ignore() return lx.pop() } - return lexRawString } // lexMultilineRawString consumes a raw string. Nothing can be escaped in such // a string. It assumes that the beginning "'''" has already been consumed and // ignored. func lexMultilineRawString(lx *lexer) stateFn { - switch lx.next() { + r := lx.next() + switch r { + default: + return lexMultilineRawString case eof: - return lx.errorf("unexpected EOF") - case rawStringEnd: - if lx.accept(rawStringEnd) { - if lx.accept(rawStringEnd) { - lx.backup() + return lx.errorf(`unexpected EOF; expected "'''"`) + case '\'': + /// Found ' → try to read two more ''. + if lx.accept('\'') { + if lx.accept('\'') { + /// Peek ahead: the string can contain ' and '', including at the + /// end: '''str''''' + /// 6 or more at the end, however, is an error. + if lx.peek() == '\'' { + /// Check if we already lexed 5 's; if so we have 6 now, and + /// that's just too many man! + if strings.HasSuffix(lx.current(), "'''''") { + return lx.errorf(`unexpected "''''''"`) + } + lx.backup() + lx.backup() + return lexMultilineRawString + } + + lx.backup() /// backup: don't include the ''' in the item. lx.backup() lx.backup() lx.emit(itemRawMultilineString) - lx.next() + lx.next() /// Read over ''' again and discard it. lx.next() lx.next() lx.ignore() @@ -663,8 +795,8 @@ func lexMultilineRawString(lx *lexer) stateFn { } lx.backup() } + return lexMultilineRawString } - return lexMultilineRawString } // lexMultilineStringEscape consumes an escaped character. It assumes that the @@ -694,6 +826,10 @@ func lexStringEscape(lx *lexer) stateFn { fallthrough case '"': fallthrough + case ' ', '\t': + // Inside """ .. """ strings you can use \ to escape newlines, and any + // amount of whitespace can be between the \ and \n. + fallthrough case '\\': return lx.pop() case 'u': @@ -701,9 +837,7 @@ func lexStringEscape(lx *lexer) stateFn { case 'U': return lexLongUnicodeEscape } - return lx.errorf("invalid escape character %q; only the following "+ - "escape characters are allowed: "+ - `\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX`, r) + return lx.error(errLexEscape{r}) } func lexShortUnicodeEscape(lx *lexer) stateFn { @@ -711,8 +845,9 @@ func lexShortUnicodeEscape(lx *lexer) stateFn { for i := 0; i < 4; i++ { r = lx.next() if !isHexadecimal(r) { - return lx.errorf(`expected four hexadecimal digits after '\u', `+ - "but got %q instead", lx.current()) + return lx.errorf( + `expected four hexadecimal digits after '\u', but got %q instead`, + lx.current()) } } return lx.pop() @@ -723,28 +858,33 @@ func lexLongUnicodeEscape(lx *lexer) stateFn { for i := 0; i < 8; i++ { r = lx.next() if !isHexadecimal(r) { - return lx.errorf(`expected eight hexadecimal digits after '\U', `+ - "but got %q instead", lx.current()) + return lx.errorf( + `expected eight hexadecimal digits after '\U', but got %q instead`, + lx.current()) } } return lx.pop() } -// lexNumberOrDateStart consumes either an integer, a float, or datetime. +// lexNumberOrDateStart processes the first character of a value which begins +// with a digit. It exists to catch values starting with '0', so that +// lexBaseNumberOrDate can differentiate base prefixed integers from other +// types. func lexNumberOrDateStart(lx *lexer) stateFn { r := lx.next() - if isDigit(r) { - return lexNumberOrDate - } switch r { - case '_': - return lexNumber - case 'e', 'E': - return lexFloat - case '.': - return lx.errorf("floats must start with a digit, not '.'") + case '0': + return lexBaseNumberOrDate } - return lx.errorf("expected a digit but got %q", r) + + if !isDigit(r) { + // The only way to reach this state is if the value starts + // with a digit, so specifically treat anything else as an + // error. + return lx.errorf("expected a digit but got %q", r) + } + + return lexNumberOrDate } // lexNumberOrDate consumes either an integer, float or datetime. @@ -754,10 +894,10 @@ func lexNumberOrDate(lx *lexer) stateFn { return lexNumberOrDate } switch r { - case '-': + case '-', ':': return lexDatetime case '_': - return lexNumber + return lexDecimalNumber case '.', 'e', 'E': return lexFloat } @@ -775,41 +915,156 @@ func lexDatetime(lx *lexer) stateFn { return lexDatetime } switch r { - case '-', 'T', ':', '.', 'Z', '+': + case '-', ':', 'T', 't', ' ', '.', 'Z', 'z', '+': return lexDatetime } lx.backup() - lx.emit(itemDatetime) + lx.emitTrim(itemDatetime) return lx.pop() } -// lexNumberStart consumes either an integer or a float. It assumes that a sign -// has already been read, but that *no* digits have been consumed. -// lexNumberStart will move to the appropriate integer or float states. -func lexNumberStart(lx *lexer) stateFn { - // We MUST see a digit. Even floats have to start with a digit. +// lexHexInteger consumes a hexadecimal integer after seeing the '0x' prefix. +func lexHexInteger(lx *lexer) stateFn { r := lx.next() - if !isDigit(r) { - if r == '.' { - return lx.errorf("floats must start with a digit, not '.'") + if isHexadecimal(r) { + return lexHexInteger + } + switch r { + case '_': + return lexHexInteger + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexOctalInteger consumes an octal integer after seeing the '0o' prefix. +func lexOctalInteger(lx *lexer) stateFn { + r := lx.next() + if isOctal(r) { + return lexOctalInteger + } + switch r { + case '_': + return lexOctalInteger + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexBinaryInteger consumes a binary integer after seeing the '0b' prefix. +func lexBinaryInteger(lx *lexer) stateFn { + r := lx.next() + if isBinary(r) { + return lexBinaryInteger + } + switch r { + case '_': + return lexBinaryInteger + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexDecimalNumber consumes a decimal float or integer. +func lexDecimalNumber(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexDecimalNumber + } + switch r { + case '.', 'e', 'E': + return lexFloat + case '_': + return lexDecimalNumber + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexDecimalNumber consumes the first digit of a number beginning with a sign. +// It assumes the sign has already been consumed. Values which start with a sign +// are only allowed to be decimal integers or floats. +// +// The special "nan" and "inf" values are also recognized. +func lexDecimalNumberStart(lx *lexer) stateFn { + r := lx.next() + + // Special error cases to give users better error messages + switch r { + case 'i': + if !lx.accept('n') || !lx.accept('f') { + return lx.errorf("invalid float: '%s'", lx.current()) } - return lx.errorf("expected a digit but got %q", r) + lx.emit(itemFloat) + return lx.pop() + case 'n': + if !lx.accept('a') || !lx.accept('n') { + return lx.errorf("invalid float: '%s'", lx.current()) + } + lx.emit(itemFloat) + return lx.pop() + case '0': + p := lx.peek() + switch p { + case 'b', 'o', 'x': + return lx.errorf("cannot use sign with non-decimal numbers: '%s%c'", lx.current(), p) + } + case '.': + return lx.errorf("floats must start with a digit, not '.'") } - return lexNumber + + if isDigit(r) { + return lexDecimalNumber + } + + return lx.errorf("expected a digit but got %q", r) } -// lexNumber consumes an integer or a float after seeing the first digit. -func lexNumber(lx *lexer) stateFn { +// lexBaseNumberOrDate differentiates between the possible values which +// start with '0'. It assumes that before reaching this state, the initial '0' +// has been consumed. +func lexBaseNumberOrDate(lx *lexer) stateFn { r := lx.next() + // Note: All datetimes start with at least two digits, so we don't + // handle date characters (':', '-', etc.) here. if isDigit(r) { - return lexNumber + return lexNumberOrDate } switch r { case '_': - return lexNumber + // Can only be decimal, because there can't be an underscore + // between the '0' and the base designator, and dates can't + // contain underscores. + return lexDecimalNumber case '.', 'e', 'E': return lexFloat + case 'b': + r = lx.peek() + if !isBinary(r) { + lx.errorf("not a binary number: '%s%c'", lx.current(), r) + } + return lexBinaryInteger + case 'o': + r = lx.peek() + if !isOctal(r) { + lx.errorf("not an octal number: '%s%c'", lx.current(), r) + } + return lexOctalInteger + case 'x': + r = lx.peek() + if !isHexadecimal(r) { + lx.errorf("not a hexidecimal number: '%s%c'", lx.current(), r) + } + return lexHexInteger } lx.backup() @@ -867,49 +1122,31 @@ func lexCommentStart(lx *lexer) stateFn { // It will consume *up to* the first newline character, and pass control // back to the last state on the stack. func lexComment(lx *lexer) stateFn { - r := lx.peek() - if isNL(r) || r == eof { + switch r := lx.next(); { + case isNL(r) || r == eof: + lx.backup() lx.emit(itemText) return lx.pop() + default: + return lexComment } - lx.next() - return lexComment } // lexSkip ignores all slurped input and moves on to the next state. func lexSkip(lx *lexer, nextState stateFn) stateFn { - return func(lx *lexer) stateFn { - lx.ignore() - return nextState - } -} - -// isWhitespace returns true if `r` is a whitespace character according -// to the spec. -func isWhitespace(r rune) bool { - return r == '\t' || r == ' ' -} - -func isNL(r rune) bool { - return r == '\n' || r == '\r' -} - -func isDigit(r rune) bool { - return r >= '0' && r <= '9' -} - -func isHexadecimal(r rune) bool { - return (r >= '0' && r <= '9') || - (r >= 'a' && r <= 'f') || - (r >= 'A' && r <= 'F') + lx.ignore() + return nextState } -func isBareKeyChar(r rune) bool { - return (r >= 'A' && r <= 'Z') || - (r >= 'a' && r <= 'z') || - (r >= '0' && r <= '9') || - r == '_' || - r == '-' +func (s stateFn) String() string { + name := runtime.FuncForPC(reflect.ValueOf(s).Pointer()).Name() + if i := strings.LastIndexByte(name, '.'); i > -1 { + name = name[i+1:] + } + if s == nil { + name = "" + } + return name + "()" } func (itype itemType) String() string { @@ -938,12 +1175,18 @@ func (itype itemType) String() string { return "TableEnd" case itemKeyStart: return "KeyStart" + case itemKeyEnd: + return "KeyEnd" case itemArray: return "Array" case itemArrayEnd: return "ArrayEnd" case itemCommentStart: return "CommentStart" + case itemInlineTableStart: + return "InlineTableStart" + case itemInlineTableEnd: + return "InlineTableEnd" } panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype))) } @@ -951,3 +1194,26 @@ func (itype itemType) String() string { func (item item) String() string { return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val) } + +func isWhitespace(r rune) bool { return r == '\t' || r == ' ' } +func isNL(r rune) bool { return r == '\n' || r == '\r' } +func isControl(r rune) bool { // Control characters except \t, \r, \n + switch r { + case '\t', '\r', '\n': + return false + default: + return (r >= 0x00 && r <= 0x1f) || r == 0x7f + } +} +func isDigit(r rune) bool { return r >= '0' && r <= '9' } +func isBinary(r rune) bool { return r == '0' || r == '1' } +func isOctal(r rune) bool { return r >= '0' && r <= '7' } +func isHexadecimal(r rune) bool { + return (r >= '0' && r <= '9') || (r >= 'a' && r <= 'f') || (r >= 'A' && r <= 'F') +} +func isBareKeyChar(r rune) bool { + return (r >= 'A' && r <= 'Z') || + (r >= 'a' && r <= 'z') || + (r >= '0' && r <= '9') || + r == '_' || r == '-' +} diff --git a/vendor/github.com/BurntSushi/toml/decode_meta.go b/vendor/github.com/BurntSushi/toml/meta.go similarity index 64% rename from vendor/github.com/BurntSushi/toml/decode_meta.go rename to vendor/github.com/BurntSushi/toml/meta.go index b9914a6798c..868619fb975 100644 --- a/vendor/github.com/BurntSushi/toml/decode_meta.go +++ b/vendor/github.com/BurntSushi/toml/meta.go @@ -1,33 +1,39 @@ package toml -import "strings" +import ( + "strings" +) -// MetaData allows access to meta information about TOML data that may not -// be inferrable via reflection. In particular, whether a key has been defined -// and the TOML type of a key. +// MetaData allows access to meta information about TOML data that's not +// accessible otherwise. +// +// It allows checking if a key is defined in the TOML data, whether any keys +// were undecoded, and the TOML type of a key. type MetaData struct { + context Key // Used only during decoding. + mapping map[string]interface{} types map[string]tomlType keys []Key - decoded map[string]bool - context Key // Used only during decoding. + decoded map[string]struct{} } -// IsDefined returns true if the key given exists in the TOML data. The key -// should be specified hierarchially. e.g., +// IsDefined reports if the key exists in the TOML data. // -// // access the TOML key 'a.b.c' -// IsDefined("a", "b", "c") +// The key should be specified hierarchically, for example to access the TOML +// key "a.b.c" you would use IsDefined("a", "b", "c"). Keys are case sensitive. // -// IsDefined will return false if an empty key given. Keys are case sensitive. +// Returns false for an empty key. func (md *MetaData) IsDefined(key ...string) bool { if len(key) == 0 { return false } - var hash map[string]interface{} - var ok bool - var hashOrVal interface{} = md.mapping + var ( + hash map[string]interface{} + ok bool + hashOrVal interface{} = md.mapping + ) for _, k := range key { if hash, ok = hashOrVal.(map[string]interface{}); !ok { return false @@ -41,58 +47,20 @@ func (md *MetaData) IsDefined(key ...string) bool { // Type returns a string representation of the type of the key specified. // -// Type will return the empty string if given an empty key or a key that -// does not exist. Keys are case sensitive. +// Type will return the empty string if given an empty key or a key that does +// not exist. Keys are case sensitive. func (md *MetaData) Type(key ...string) string { - fullkey := strings.Join(key, ".") - if typ, ok := md.types[fullkey]; ok { + if typ, ok := md.types[Key(key).String()]; ok { return typ.typeString() } return "" } -// Key is the type of any TOML key, including key groups. Use (MetaData).Keys -// to get values of this type. -type Key []string - -func (k Key) String() string { - return strings.Join(k, ".") -} - -func (k Key) maybeQuotedAll() string { - var ss []string - for i := range k { - ss = append(ss, k.maybeQuoted(i)) - } - return strings.Join(ss, ".") -} - -func (k Key) maybeQuoted(i int) string { - quote := false - for _, c := range k[i] { - if !isBareKeyChar(c) { - quote = true - break - } - } - if quote { - return "\"" + strings.Replace(k[i], "\"", "\\\"", -1) + "\"" - } - return k[i] -} - -func (k Key) add(piece string) Key { - newKey := make(Key, len(k)+1) - copy(newKey, k) - newKey[len(k)] = piece - return newKey -} - // Keys returns a slice of every key in the TOML data, including key groups. -// Each key is itself a slice, where the first element is the top of the -// hierarchy and the last is the most specific. // -// The list will have the same order as the keys appeared in the TOML data. +// Each key is itself a slice, where the first element is the top of the +// hierarchy and the last is the most specific. The list will have the same +// order as the keys appeared in the TOML data. // // All keys returned are non-empty. func (md *MetaData) Keys() []Key { @@ -113,9 +81,40 @@ func (md *MetaData) Keys() []Key { func (md *MetaData) Undecoded() []Key { undecoded := make([]Key, 0, len(md.keys)) for _, key := range md.keys { - if !md.decoded[key.String()] { + if _, ok := md.decoded[key.String()]; !ok { undecoded = append(undecoded, key) } } return undecoded } + +// Key represents any TOML key, including key groups. Use (MetaData).Keys to get +// values of this type. +type Key []string + +func (k Key) String() string { + ss := make([]string, len(k)) + for i := range k { + ss[i] = k.maybeQuoted(i) + } + return strings.Join(ss, ".") +} + +func (k Key) maybeQuoted(i int) string { + if k[i] == "" { + return `""` + } + for _, c := range k[i] { + if !isBareKeyChar(c) { + return `"` + dblQuotedReplacer.Replace(k[i]) + `"` + } + } + return k[i] +} + +func (k Key) add(piece string) Key { + newKey := make(Key, len(k)+1) + copy(newKey, k) + newKey[len(k)] = piece + return newKey +} diff --git a/vendor/github.com/BurntSushi/toml/parse.go b/vendor/github.com/BurntSushi/toml/parse.go index 50869ef9266..8269cca1701 100644 --- a/vendor/github.com/BurntSushi/toml/parse.go +++ b/vendor/github.com/BurntSushi/toml/parse.go @@ -5,54 +5,63 @@ import ( "strconv" "strings" "time" - "unicode" "unicode/utf8" + + "github.com/BurntSushi/toml/internal" ) type parser struct { - mapping map[string]interface{} - types map[string]tomlType - lx *lexer - - // A list of keys in the order that they appear in the TOML data. - ordered []Key - - // the full key for the current hash in scope - context Key - - // the base key name for everything except hashes - currentKey string - - // rough approximation of line number - approxLine int - - // A map of 'key.group.names' to whether they were created implicitly. - implicits map[string]bool -} - -type parseError string - -func (pe parseError) Error() string { - return string(pe) + lx *lexer + context Key // Full key for the current hash in scope. + currentKey string // Base key name for everything except hashes. + pos Position // Current position in the TOML file. + + ordered []Key // List of keys in the order that they appear in the TOML data. + mapping map[string]interface{} // Map keyname → key value. + types map[string]tomlType // Map keyname → TOML type. + implicits map[string]struct{} // Record implicit keys (e.g. "key.group.names"). } func parse(data string) (p *parser, err error) { defer func() { if r := recover(); r != nil { - var ok bool - if err, ok = r.(parseError); ok { + if pErr, ok := r.(ParseError); ok { + pErr.input = data + err = pErr return } panic(r) } }() + // Read over BOM; do this here as the lexer calls utf8.DecodeRuneInString() + // which mangles stuff. + if strings.HasPrefix(data, "\xff\xfe") || strings.HasPrefix(data, "\xfe\xff") { + data = data[2:] + } + + // Examine first few bytes for NULL bytes; this probably means it's a UTF-16 + // file (second byte in surrogate pair being NULL). Again, do this here to + // avoid having to deal with UTF-8/16 stuff in the lexer. + ex := 6 + if len(data) < 6 { + ex = len(data) + } + if i := strings.IndexRune(data[:ex], 0); i > -1 { + return nil, ParseError{ + Message: "files cannot contain NULL bytes; probably using UTF-16; TOML files must be UTF-8", + Position: Position{Line: 1, Start: i, Len: 1}, + Line: 1, + input: data, + } + } + p = &parser{ mapping: make(map[string]interface{}), types: make(map[string]tomlType), lx: lex(data), ordered: make([]Key, 0), - implicits: make(map[string]bool), + implicits: make(map[string]struct{}), } for { item := p.next() @@ -65,20 +74,48 @@ func parse(data string) (p *parser, err error) { return p, nil } +func (p *parser) panicItemf(it item, format string, v ...interface{}) { + panic(ParseError{ + Message: fmt.Sprintf(format, v...), + Position: it.pos, + Line: it.pos.Len, + LastKey: p.current(), + }) +} + func (p *parser) panicf(format string, v ...interface{}) { - msg := fmt.Sprintf("Near line %d (last key parsed '%s'): %s", - p.approxLine, p.current(), fmt.Sprintf(format, v...)) - panic(parseError(msg)) + panic(ParseError{ + Message: fmt.Sprintf(format, v...), + Position: p.pos, + Line: p.pos.Line, + LastKey: p.current(), + }) } func (p *parser) next() item { it := p.lx.nextItem() + //fmt.Printf("ITEM %-18s line %-3d │ %q\n", it.typ, it.line, it.val) if it.typ == itemError { - p.panicf("%s", it.val) + if it.err != nil { + panic(ParseError{ + Position: it.pos, + Line: it.pos.Line, + LastKey: p.current(), + err: it.err, + }) + } + + p.panicItemf(it, "%s", it.val) } return it } +func (p *parser) nextPos() item { + it := p.next() + p.pos = it.pos + return it +} + func (p *parser) bug(format string, v ...interface{}) { panic(fmt.Sprintf("BUG: "+format+"\n\n", v...)) } @@ -97,44 +134,59 @@ func (p *parser) assertEqual(expected, got itemType) { func (p *parser) topLevel(item item) { switch item.typ { - case itemCommentStart: - p.approxLine = item.line + case itemCommentStart: // # .. p.expect(itemText) - case itemTableStart: - kg := p.next() - p.approxLine = kg.line + case itemTableStart: // [ .. ] + name := p.nextPos() var key Key - for ; kg.typ != itemTableEnd && kg.typ != itemEOF; kg = p.next() { - key = append(key, p.keyString(kg)) + for ; name.typ != itemTableEnd && name.typ != itemEOF; name = p.next() { + key = append(key, p.keyString(name)) } - p.assertEqual(itemTableEnd, kg.typ) + p.assertEqual(itemTableEnd, name.typ) - p.establishContext(key, false) + p.addContext(key, false) p.setType("", tomlHash) p.ordered = append(p.ordered, key) - case itemArrayTableStart: - kg := p.next() - p.approxLine = kg.line + case itemArrayTableStart: // [[ .. ]] + name := p.nextPos() var key Key - for ; kg.typ != itemArrayTableEnd && kg.typ != itemEOF; kg = p.next() { - key = append(key, p.keyString(kg)) + for ; name.typ != itemArrayTableEnd && name.typ != itemEOF; name = p.next() { + key = append(key, p.keyString(name)) } - p.assertEqual(itemArrayTableEnd, kg.typ) + p.assertEqual(itemArrayTableEnd, name.typ) - p.establishContext(key, true) + p.addContext(key, true) p.setType("", tomlArrayHash) p.ordered = append(p.ordered, key) - case itemKeyStart: - kname := p.next() - p.approxLine = kname.line - p.currentKey = p.keyString(kname) - - val, typ := p.value(p.next()) - p.setValue(p.currentKey, val) - p.setType(p.currentKey, typ) + case itemKeyStart: // key = .. + outerContext := p.context + /// Read all the key parts (e.g. 'a' and 'b' in 'a.b') + k := p.nextPos() + var key Key + for ; k.typ != itemKeyEnd && k.typ != itemEOF; k = p.next() { + key = append(key, p.keyString(k)) + } + p.assertEqual(itemKeyEnd, k.typ) + + /// The current key is the last part. + p.currentKey = key[len(key)-1] + + /// All the other parts (if any) are the context; need to set each part + /// as implicit. + context := key[:len(key)-1] + for i := range context { + p.addImplicitContext(append(p.context, context[i:i+1]...)) + } + + /// Set value. + val, typ := p.value(p.next(), false) + p.set(p.currentKey, val, typ) p.ordered = append(p.ordered, p.context.add(p.currentKey)) + + /// Remove the context we added (preserving any context from [tbl] lines). + p.context = outerContext p.currentKey = "" default: p.bug("Unexpected type at top level: %s", item.typ) @@ -148,180 +200,262 @@ func (p *parser) keyString(it item) string { return it.val case itemString, itemMultilineString, itemRawString, itemRawMultilineString: - s, _ := p.value(it) + s, _ := p.value(it, false) return s.(string) default: p.bug("Unexpected key type: %s", it.typ) - panic("unreachable") } + panic("unreachable") } +var datetimeRepl = strings.NewReplacer( + "z", "Z", + "t", "T", + " ", "T") + // value translates an expected value from the lexer into a Go value wrapped // as an empty interface. -func (p *parser) value(it item) (interface{}, tomlType) { +func (p *parser) value(it item, parentIsArray bool) (interface{}, tomlType) { switch it.typ { case itemString: - return p.replaceEscapes(it.val), p.typeOfPrimitive(it) + return p.replaceEscapes(it, it.val), p.typeOfPrimitive(it) case itemMultilineString: - trimmed := stripFirstNewline(stripEscapedWhitespace(it.val)) - return p.replaceEscapes(trimmed), p.typeOfPrimitive(it) + return p.replaceEscapes(it, stripFirstNewline(stripEscapedNewlines(it.val))), p.typeOfPrimitive(it) case itemRawString: return it.val, p.typeOfPrimitive(it) case itemRawMultilineString: return stripFirstNewline(it.val), p.typeOfPrimitive(it) + case itemInteger: + return p.valueInteger(it) + case itemFloat: + return p.valueFloat(it) case itemBool: switch it.val { case "true": return true, p.typeOfPrimitive(it) case "false": return false, p.typeOfPrimitive(it) + default: + p.bug("Expected boolean value, but got '%s'.", it.val) } - p.bug("Expected boolean value, but got '%s'.", it.val) - case itemInteger: - if !numUnderscoresOK(it.val) { - p.panicf("Invalid integer %q: underscores must be surrounded by digits", - it.val) - } - val := strings.Replace(it.val, "_", "", -1) - num, err := strconv.ParseInt(val, 10, 64) - if err != nil { - // Distinguish integer values. Normally, it'd be a bug if the lexer - // provides an invalid integer, but it's possible that the number is - // out of range of valid values (which the lexer cannot determine). - // So mark the former as a bug but the latter as a legitimate user - // error. - if e, ok := err.(*strconv.NumError); ok && - e.Err == strconv.ErrRange { - - p.panicf("Integer '%s' is out of the range of 64-bit "+ - "signed integers.", it.val) - } else { - p.bug("Expected integer value, but got '%s'.", it.val) - } + case itemDatetime: + return p.valueDatetime(it) + case itemArray: + return p.valueArray(it) + case itemInlineTableStart: + return p.valueInlineTable(it, parentIsArray) + default: + p.bug("Unexpected value type: %s", it.typ) + } + panic("unreachable") +} + +func (p *parser) valueInteger(it item) (interface{}, tomlType) { + if !numUnderscoresOK(it.val) { + p.panicItemf(it, "Invalid integer %q: underscores must be surrounded by digits", it.val) + } + if numHasLeadingZero(it.val) { + p.panicItemf(it, "Invalid integer %q: cannot have leading zeroes", it.val) + } + + num, err := strconv.ParseInt(it.val, 0, 64) + if err != nil { + // Distinguish integer values. Normally, it'd be a bug if the lexer + // provides an invalid integer, but it's possible that the number is + // out of range of valid values (which the lexer cannot determine). + // So mark the former as a bug but the latter as a legitimate user + // error. + if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange { + p.panicItemf(it, "Integer '%s' is out of the range of 64-bit signed integers.", it.val) + } else { + p.bug("Expected integer value, but got '%s'.", it.val) } - return num, p.typeOfPrimitive(it) - case itemFloat: - parts := strings.FieldsFunc(it.val, func(r rune) bool { - switch r { - case '.', 'e', 'E': - return true - } - return false - }) - for _, part := range parts { - if !numUnderscoresOK(part) { - p.panicf("Invalid float %q: underscores must be "+ - "surrounded by digits", it.val) - } + } + return num, p.typeOfPrimitive(it) +} + +func (p *parser) valueFloat(it item) (interface{}, tomlType) { + parts := strings.FieldsFunc(it.val, func(r rune) bool { + switch r { + case '.', 'e', 'E': + return true } - if !numPeriodsOK(it.val) { - // As a special case, numbers like '123.' or '1.e2', - // which are valid as far as Go/strconv are concerned, - // must be rejected because TOML says that a fractional - // part consists of '.' followed by 1+ digits. - p.panicf("Invalid float %q: '.' must be followed "+ - "by one or more digits", it.val) - } - val := strings.Replace(it.val, "_", "", -1) - num, err := strconv.ParseFloat(val, 64) - if err != nil { - if e, ok := err.(*strconv.NumError); ok && - e.Err == strconv.ErrRange { - - p.panicf("Float '%s' is out of the range of 64-bit "+ - "IEEE-754 floating-point numbers.", it.val) - } else { - p.panicf("Invalid float value: %q", it.val) - } + return false + }) + for _, part := range parts { + if !numUnderscoresOK(part) { + p.panicItemf(it, "Invalid float %q: underscores must be surrounded by digits", it.val) } - return num, p.typeOfPrimitive(it) - case itemDatetime: - var t time.Time - var ok bool - var err error - for _, format := range []string{ - "2006-01-02T15:04:05Z07:00", - "2006-01-02T15:04:05", - "2006-01-02", - } { - t, err = time.ParseInLocation(format, it.val, time.Local) - if err == nil { - ok = true - break - } + } + if len(parts) > 0 && numHasLeadingZero(parts[0]) { + p.panicItemf(it, "Invalid float %q: cannot have leading zeroes", it.val) + } + if !numPeriodsOK(it.val) { + // As a special case, numbers like '123.' or '1.e2', + // which are valid as far as Go/strconv are concerned, + // must be rejected because TOML says that a fractional + // part consists of '.' followed by 1+ digits. + p.panicItemf(it, "Invalid float %q: '.' must be followed by one or more digits", it.val) + } + val := strings.Replace(it.val, "_", "", -1) + if val == "+nan" || val == "-nan" { // Go doesn't support this, but TOML spec does. + val = "nan" + } + num, err := strconv.ParseFloat(val, 64) + if err != nil { + if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange { + p.panicItemf(it, "Float '%s' is out of the range of 64-bit IEEE-754 floating-point numbers.", it.val) + } else { + p.panicItemf(it, "Invalid float value: %q", it.val) } - if !ok { - p.panicf("Invalid TOML Datetime: %q.", it.val) + } + return num, p.typeOfPrimitive(it) +} + +var dtTypes = []struct { + fmt string + zone *time.Location +}{ + {time.RFC3339Nano, time.Local}, + {"2006-01-02T15:04:05.999999999", internal.LocalDatetime}, + {"2006-01-02", internal.LocalDate}, + {"15:04:05.999999999", internal.LocalTime}, +} + +func (p *parser) valueDatetime(it item) (interface{}, tomlType) { + it.val = datetimeRepl.Replace(it.val) + var ( + t time.Time + ok bool + err error + ) + for _, dt := range dtTypes { + t, err = time.ParseInLocation(dt.fmt, it.val, dt.zone) + if err == nil { + ok = true + break } - return t, p.typeOfPrimitive(it) - case itemArray: - array := make([]interface{}, 0) - types := make([]tomlType, 0) + } + if !ok { + p.panicItemf(it, "Invalid TOML Datetime: %q.", it.val) + } + return t, p.typeOfPrimitive(it) +} - for it = p.next(); it.typ != itemArrayEnd; it = p.next() { - if it.typ == itemCommentStart { - p.expect(itemText) - continue - } +func (p *parser) valueArray(it item) (interface{}, tomlType) { + p.setType(p.currentKey, tomlArray) + + // p.setType(p.currentKey, typ) + var ( + types []tomlType + + // Initialize to a non-nil empty slice. This makes it consistent with + // how S = [] decodes into a non-nil slice inside something like struct + // { S []string }. See #338 + array = []interface{}{} + ) + for it = p.next(); it.typ != itemArrayEnd; it = p.next() { + if it.typ == itemCommentStart { + p.expect(itemText) + continue + } - val, typ := p.value(it) - array = append(array, val) - types = append(types, typ) + val, typ := p.value(it, true) + array = append(array, val) + types = append(types, typ) + + // XXX: types isn't used here, we need it to record the accurate type + // information. + // + // Not entirely sure how to best store this; could use "key[0]", + // "key[1]" notation, or maybe store it on the Array type? + } + return array, tomlArray +} + +func (p *parser) valueInlineTable(it item, parentIsArray bool) (interface{}, tomlType) { + var ( + hash = make(map[string]interface{}) + outerContext = p.context + outerKey = p.currentKey + ) + + p.context = append(p.context, p.currentKey) + prevContext := p.context + p.currentKey = "" + + p.addImplicit(p.context) + p.addContext(p.context, parentIsArray) + + /// Loop over all table key/value pairs. + for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() { + if it.typ == itemCommentStart { + p.expect(itemText) + continue } - return array, p.typeOfArray(types) - case itemInlineTableStart: - var ( - hash = make(map[string]interface{}) - outerContext = p.context - outerKey = p.currentKey - ) - p.context = append(p.context, p.currentKey) - p.currentKey = "" - for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() { - if it.typ != itemKeyStart { - p.bug("Expected key start but instead found %q, around line %d", - it.val, p.approxLine) - } - if it.typ == itemCommentStart { - p.expect(itemText) - continue - } + /// Read all key parts. + k := p.nextPos() + var key Key + for ; k.typ != itemKeyEnd && k.typ != itemEOF; k = p.next() { + key = append(key, p.keyString(k)) + } + p.assertEqual(itemKeyEnd, k.typ) - // retrieve key - k := p.next() - p.approxLine = k.line - kname := p.keyString(k) + /// The current key is the last part. + p.currentKey = key[len(key)-1] - // retrieve value - p.currentKey = kname - val, typ := p.value(p.next()) - // make sure we keep metadata up to date - p.setType(kname, typ) - p.ordered = append(p.ordered, p.context.add(p.currentKey)) - hash[kname] = val + /// All the other parts (if any) are the context; need to set each part + /// as implicit. + context := key[:len(key)-1] + for i := range context { + p.addImplicitContext(append(p.context, context[i:i+1]...)) } - p.context = outerContext - p.currentKey = outerKey - return hash, tomlHash + + /// Set the value. + val, typ := p.value(p.next(), false) + p.set(p.currentKey, val, typ) + p.ordered = append(p.ordered, p.context.add(p.currentKey)) + hash[p.currentKey] = val + + /// Restore context. + p.context = prevContext } - p.bug("Unexpected value type: %s", it.typ) - panic("unreachable") + p.context = outerContext + p.currentKey = outerKey + return hash, tomlHash +} + +// numHasLeadingZero checks if this number has leading zeroes, allowing for '0', +// +/- signs, and base prefixes. +func numHasLeadingZero(s string) bool { + if len(s) > 1 && s[0] == '0' && !(s[1] == 'b' || s[1] == 'o' || s[1] == 'x') { // Allow 0b, 0o, 0x + return true + } + if len(s) > 2 && (s[0] == '-' || s[0] == '+') && s[1] == '0' { + return true + } + return false } // numUnderscoresOK checks whether each underscore in s is surrounded by // characters that are not underscores. func numUnderscoresOK(s string) bool { + switch s { + case "nan", "+nan", "-nan", "inf", "-inf", "+inf": + return true + } accept := false for _, r := range s { if r == '_' { if !accept { return false } - accept = false - continue } - accept = true + + // isHexadecimal is a superset of all the permissable characters + // surrounding an underscore. + accept = isHexadecimal(r) } return accept } @@ -338,13 +472,12 @@ func numPeriodsOK(s string) bool { return !period } -// establishContext sets the current context of the parser, -// where the context is either a hash or an array of hashes. Which one is -// set depends on the value of the `array` parameter. +// Set the current context of the parser, where the context is either a hash or +// an array of hashes, depending on the value of the `array` parameter. // // Establishing the context also makes sure that the key isn't a duplicate, and // will create implicit hashes automatically. -func (p *parser) establishContext(key Key, array bool) { +func (p *parser) addContext(key Key, array bool) { var ok bool // Always start at the top level and drill down for our context. @@ -383,7 +516,7 @@ func (p *parser) establishContext(key Key, array bool) { // list of tables for it. k := key[len(key)-1] if _, ok := hashContext[k]; !ok { - hashContext[k] = make([]map[string]interface{}, 0, 5) + hashContext[k] = make([]map[string]interface{}, 0, 4) } // Add a new table. But make sure the key hasn't already been used @@ -391,8 +524,7 @@ func (p *parser) establishContext(key Key, array bool) { if hash, ok := hashContext[k].([]map[string]interface{}); ok { hashContext[k] = append(hash, make(map[string]interface{})) } else { - p.panicf("Key '%s' was already created and cannot be used as "+ - "an array.", keyContext) + p.panicf("Key '%s' was already created and cannot be used as an array.", key) } } else { p.setValue(key[len(key)-1], make(map[string]interface{})) @@ -400,15 +532,22 @@ func (p *parser) establishContext(key Key, array bool) { p.context = append(p.context, key[len(key)-1]) } +// set calls setValue and setType. +func (p *parser) set(key string, val interface{}, typ tomlType) { + p.setValue(key, val) + p.setType(key, typ) +} + // setValue sets the given key to the given value in the current context. // It will make sure that the key hasn't already been defined, account for // implicit key groups. func (p *parser) setValue(key string, value interface{}) { - var tmpHash interface{} - var ok bool - - hash := p.mapping - keyContext := make(Key, 0) + var ( + tmpHash interface{} + ok bool + hash = p.mapping + keyContext Key + ) for _, k := range p.context { keyContext = append(keyContext, k) if tmpHash, ok = hash[k]; !ok { @@ -422,24 +561,26 @@ func (p *parser) setValue(key string, value interface{}) { case map[string]interface{}: hash = t default: - p.bug("Expected hash to have type 'map[string]interface{}', but "+ - "it has '%T' instead.", tmpHash) + p.panicf("Key '%s' has already been defined.", keyContext) } } keyContext = append(keyContext, key) if _, ok := hash[key]; ok { - // Typically, if the given key has already been set, then we have - // to raise an error since duplicate keys are disallowed. However, - // it's possible that a key was previously defined implicitly. In this - // case, it is allowed to be redefined concretely. (See the - // `tests/valid/implicit-and-explicit-after.toml` test in `toml-test`.) + // Normally redefining keys isn't allowed, but the key could have been + // defined implicitly and it's allowed to be redefined concretely. (See + // the `valid/implicit-and-explicit-after.toml` in toml-test) // // But we have to make sure to stop marking it as an implicit. (So that // another redefinition provokes an error.) // // Note that since it has already been defined (as a hash), we don't // want to overwrite it. So our business is done. + if p.isArray(keyContext) { + p.removeImplicit(keyContext) + hash[key] = value + return + } if p.isImplicit(keyContext) { p.removeImplicit(keyContext) return @@ -449,40 +590,39 @@ func (p *parser) setValue(key string, value interface{}) { // key, which is *always* wrong. p.panicf("Key '%s' has already been defined.", keyContext) } + hash[key] = value } -// setType sets the type of a particular value at a given key. -// It should be called immediately AFTER setValue. +// setType sets the type of a particular value at a given key. It should be +// called immediately AFTER setValue. // // Note that if `key` is empty, then the type given will be applied to the // current context (which is either a table or an array of tables). func (p *parser) setType(key string, typ tomlType) { keyContext := make(Key, 0, len(p.context)+1) - for _, k := range p.context { - keyContext = append(keyContext, k) - } + keyContext = append(keyContext, p.context...) if len(key) > 0 { // allow type setting for hashes keyContext = append(keyContext, key) } + // Special case to make empty keys ("" = 1) work. + // Without it it will set "" rather than `""`. + // TODO: why is this needed? And why is this only needed here? + if len(keyContext) == 0 { + keyContext = Key{""} + } p.types[keyContext.String()] = typ } -// addImplicit sets the given Key as having been created implicitly. -func (p *parser) addImplicit(key Key) { - p.implicits[key.String()] = true -} - -// removeImplicit stops tagging the given key as having been implicitly -// created. -func (p *parser) removeImplicit(key Key) { - p.implicits[key.String()] = false -} - -// isImplicit returns true if the key group pointed to by the key was created -// implicitly. -func (p *parser) isImplicit(key Key) bool { - return p.implicits[key.String()] +// Implicit keys need to be created when tables are implied in "a.b.c.d = 1" and +// "[a.b.c]" (the "a", "b", and "c" hashes are never created explicitly). +func (p *parser) addImplicit(key Key) { p.implicits[key.String()] = struct{}{} } +func (p *parser) removeImplicit(key Key) { delete(p.implicits, key.String()) } +func (p *parser) isImplicit(key Key) bool { _, ok := p.implicits[key.String()]; return ok } +func (p *parser) isArray(key Key) bool { return p.types[key.String()] == tomlArray } +func (p *parser) addImplicitContext(key Key) { + p.addImplicit(key) + p.addContext(key, false) } // current returns the full key name of the current context. @@ -497,24 +637,58 @@ func (p *parser) current() string { } func stripFirstNewline(s string) string { - if len(s) == 0 || s[0] != '\n' { - return s + if len(s) > 0 && s[0] == '\n' { + return s[1:] } - return s[1:] + if len(s) > 1 && s[0] == '\r' && s[1] == '\n' { + return s[2:] + } + return s } -func stripEscapedWhitespace(s string) string { - esc := strings.Split(s, "\\\n") - if len(esc) > 1 { - for i := 1; i < len(esc); i++ { - esc[i] = strings.TrimLeftFunc(esc[i], unicode.IsSpace) +// Remove newlines inside triple-quoted strings if a line ends with "\". +func stripEscapedNewlines(s string) string { + split := strings.Split(s, "\n") + if len(split) < 1 { + return s + } + + escNL := false // Keep track of the last non-blank line was escaped. + for i, line := range split { + line = strings.TrimRight(line, " \t\r") + + if len(line) == 0 || line[len(line)-1] != '\\' { + split[i] = strings.TrimRight(split[i], "\r") + if !escNL && i != len(split)-1 { + split[i] += "\n" + } + continue + } + + escBS := true + for j := len(line) - 1; j >= 0 && line[j] == '\\'; j-- { + escBS = !escBS + } + if escNL { + line = strings.TrimLeft(line, " \t\r") + } + escNL = !escBS + + if escBS { + split[i] += "\n" + continue + } + + split[i] = line[:len(line)-1] // Remove \ + if len(split)-1 > i { + split[i+1] = strings.TrimLeft(split[i+1], " \t\r") } } - return strings.Join(esc, "") + return strings.Join(split, "") } -func (p *parser) replaceEscapes(str string) string { - var replaced []rune +func (p *parser) replaceEscapes(it item, str string) string { + replaced := make([]rune, 0, len(str)) s := []byte(str) r := 0 for r < len(s) { @@ -533,6 +707,9 @@ func (p *parser) replaceEscapes(str string) string { default: p.bug("Expected valid escape code after \\, but got %q.", s[r]) return "" + case ' ', '\t': + p.panicItemf(it, "invalid escape: '\\%c'", s[r]) + return "" case 'b': replaced = append(replaced, rune(0x0008)) r += 1 @@ -558,14 +735,14 @@ func (p *parser) replaceEscapes(str string) string { // At this point, we know we have a Unicode escape of the form // `uXXXX` at [r, r+5). (Because the lexer guarantees this // for us.) - escaped := p.asciiEscapeToUnicode(s[r+1 : r+5]) + escaped := p.asciiEscapeToUnicode(it, s[r+1:r+5]) replaced = append(replaced, escaped) r += 5 case 'U': // At this point, we know we have a Unicode escape of the form // `uXXXX` at [r, r+9). (Because the lexer guarantees this // for us.) - escaped := p.asciiEscapeToUnicode(s[r+1 : r+9]) + escaped := p.asciiEscapeToUnicode(it, s[r+1:r+9]) replaced = append(replaced, escaped) r += 9 } @@ -573,20 +750,14 @@ func (p *parser) replaceEscapes(str string) string { return string(replaced) } -func (p *parser) asciiEscapeToUnicode(bs []byte) rune { +func (p *parser) asciiEscapeToUnicode(it item, bs []byte) rune { s := string(bs) hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32) if err != nil { - p.bug("Could not parse '%s' as a hexadecimal number, but the "+ - "lexer claims it's OK: %s", s, err) + p.bug("Could not parse '%s' as a hexadecimal number, but the lexer claims it's OK: %s", s, err) } if !utf8.ValidRune(rune(hex)) { - p.panicf("Escaped character '\\u%s' is not valid UTF-8.", s) + p.panicItemf(it, "Escaped character '\\u%s' is not valid UTF-8.", s) } return rune(hex) } - -func isStringType(ty itemType) bool { - return ty == itemString || ty == itemMultilineString || - ty == itemRawString || ty == itemRawMultilineString -} diff --git a/vendor/github.com/BurntSushi/toml/session.vim b/vendor/github.com/BurntSushi/toml/session.vim deleted file mode 100644 index 562164be060..00000000000 --- a/vendor/github.com/BurntSushi/toml/session.vim +++ /dev/null @@ -1 +0,0 @@ -au BufWritePost *.go silent!make tags > /dev/null 2>&1 diff --git a/vendor/github.com/BurntSushi/toml/type_fields.go b/vendor/github.com/BurntSushi/toml/type_fields.go index 608997c22f6..254ca82e549 100644 --- a/vendor/github.com/BurntSushi/toml/type_fields.go +++ b/vendor/github.com/BurntSushi/toml/type_fields.go @@ -70,8 +70,8 @@ func typeFields(t reflect.Type) []field { next := []field{{typ: t}} // Count of queued names for current level and the next. - count := map[reflect.Type]int{} - nextCount := map[reflect.Type]int{} + var count map[reflect.Type]int + var nextCount map[reflect.Type]int // Types already visited at an earlier level. visited := map[reflect.Type]bool{} diff --git a/vendor/github.com/BurntSushi/toml/type_check.go b/vendor/github.com/BurntSushi/toml/type_toml.go similarity index 75% rename from vendor/github.com/BurntSushi/toml/type_check.go rename to vendor/github.com/BurntSushi/toml/type_toml.go index c73f8afc1a6..4e90d77373b 100644 --- a/vendor/github.com/BurntSushi/toml/type_check.go +++ b/vendor/github.com/BurntSushi/toml/type_toml.go @@ -16,7 +16,7 @@ func typeEqual(t1, t2 tomlType) bool { return t1.typeString() == t2.typeString() } -func typeIsHash(t tomlType) bool { +func typeIsTable(t tomlType) bool { return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash) } @@ -68,24 +68,3 @@ func (p *parser) typeOfPrimitive(lexItem item) tomlType { p.bug("Cannot infer primitive type of lex item '%s'.", lexItem) panic("unreachable") } - -// typeOfArray returns a tomlType for an array given a list of types of its -// values. -// -// In the current spec, if an array is homogeneous, then its type is always -// "Array". If the array is not homogeneous, an error is generated. -func (p *parser) typeOfArray(types []tomlType) tomlType { - // Empty arrays are cool. - if len(types) == 0 { - return tomlArray - } - - theType := types[0] - for _, t := range types[1:] { - if !typeEqual(theType, t) { - p.panicf("Array contains values of type '%s' and '%s', but "+ - "arrays must be homogeneous.", theType, t) - } - } - return tomlArray -} diff --git a/vendor/github.com/Microsoft/go-winio/README.md b/vendor/github.com/Microsoft/go-winio/README.md index 5680010575b..683be1dcf9c 100644 --- a/vendor/github.com/Microsoft/go-winio/README.md +++ b/vendor/github.com/Microsoft/go-winio/README.md @@ -1,4 +1,4 @@ -# go-winio +# go-winio [![Build Status](https://github.com/microsoft/go-winio/actions/workflows/ci.yml/badge.svg)](https://github.com/microsoft/go-winio/actions/workflows/ci.yml) This repository contains utilities for efficiently performing Win32 IO operations in Go. Currently, this is focused on accessing named pipes and other file handles, and @@ -11,12 +11,27 @@ package. Please see the LICENSE file for licensing information. -This project has adopted the [Microsoft Open Source Code of -Conduct](https://opensource.microsoft.com/codeofconduct/). For more information -see the [Code of Conduct -FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact -[opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional -questions or comments. +## Contributing +This project welcomes contributions and suggestions. Most contributions require you to agree to a Contributor License Agreement (CLA) +declaring that you have the right to, and actually do, grant us the rights to use your contribution. For details, visit https://cla.microsoft.com. + +When you submit a pull request, a CLA-bot will automatically determine whether you need to provide a CLA and decorate the PR +appropriately (e.g., label, comment). Simply follow the instructions provided by the bot. You will only need to do this once across all repos using our CLA. + +We also require that contributors sign their commits using git commit -s or git commit --signoff to certify they either authored the work themselves +or otherwise have permission to use it in this project. Please see https://developercertificate.org/ for more info, as well as to make sure that you can +attest to the rules listed. Our CI uses the DCO Github app to ensure that all commits in a given PR are signed-off. + + +## Code of Conduct + +This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). +For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or +contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. + + + +## Special Thanks Thanks to natefinch for the inspiration for this library. See https://github.com/natefinch/npipe for another named pipe implementation. diff --git a/vendor/github.com/Microsoft/go-winio/backuptar/noop.go b/vendor/github.com/Microsoft/go-winio/backuptar/noop.go new file mode 100644 index 00000000000..d39eccf0238 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/backuptar/noop.go @@ -0,0 +1,4 @@ +// +build !windows +// This file only exists to allow go get on non-Windows platforms. + +package backuptar diff --git a/vendor/github.com/Microsoft/go-winio/backuptar/strconv.go b/vendor/github.com/Microsoft/go-winio/backuptar/strconv.go new file mode 100644 index 00000000000..34160966399 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/backuptar/strconv.go @@ -0,0 +1,68 @@ +package backuptar + +import ( + "archive/tar" + "fmt" + "strconv" + "strings" + "time" +) + +// Functions copied from https://github.com/golang/go/blob/master/src/archive/tar/strconv.go +// as we need to manage the LIBARCHIVE.creationtime PAXRecord manually. +// Idea taken from containerd which did the same thing. + +// parsePAXTime takes a string of the form %d.%d as described in the PAX +// specification. Note that this implementation allows for negative timestamps, +// which is allowed for by the PAX specification, but not always portable. +func parsePAXTime(s string) (time.Time, error) { + const maxNanoSecondDigits = 9 + + // Split string into seconds and sub-seconds parts. + ss, sn := s, "" + if pos := strings.IndexByte(s, '.'); pos >= 0 { + ss, sn = s[:pos], s[pos+1:] + } + + // Parse the seconds. + secs, err := strconv.ParseInt(ss, 10, 64) + if err != nil { + return time.Time{}, tar.ErrHeader + } + if len(sn) == 0 { + return time.Unix(secs, 0), nil // No sub-second values + } + + // Parse the nanoseconds. + if strings.Trim(sn, "0123456789") != "" { + return time.Time{}, tar.ErrHeader + } + if len(sn) < maxNanoSecondDigits { + sn += strings.Repeat("0", maxNanoSecondDigits-len(sn)) // Right pad + } else { + sn = sn[:maxNanoSecondDigits] // Right truncate + } + nsecs, _ := strconv.ParseInt(sn, 10, 64) // Must succeed + if len(ss) > 0 && ss[0] == '-' { + return time.Unix(secs, -1*nsecs), nil // Negative correction + } + return time.Unix(secs, nsecs), nil +} + +// formatPAXTime converts ts into a time of the form %d.%d as described in the +// PAX specification. This function is capable of negative timestamps. +func formatPAXTime(ts time.Time) (s string) { + secs, nsecs := ts.Unix(), ts.Nanosecond() + if nsecs == 0 { + return strconv.FormatInt(secs, 10) + } + + // If seconds is negative, then perform correction. + sign := "" + if secs < 0 { + sign = "-" // Remember sign + secs = -(secs + 1) // Add a second to secs + nsecs = -(nsecs - 1e9) // Take that second away from nsecs + } + return strings.TrimRight(fmt.Sprintf("%s%d.%09d", sign, secs, nsecs), "0") +} diff --git a/vendor/github.com/Microsoft/go-winio/backuptar/tar.go b/vendor/github.com/Microsoft/go-winio/backuptar/tar.go new file mode 100644 index 00000000000..689e4da6bda --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/backuptar/tar.go @@ -0,0 +1,480 @@ +// +build windows + +package backuptar + +import ( + "archive/tar" + "encoding/base64" + "fmt" + "io" + "io/ioutil" + "path/filepath" + "strconv" + "strings" + "syscall" + "time" + + "github.com/Microsoft/go-winio" + "golang.org/x/sys/windows" +) + +const ( + c_ISUID = 04000 // Set uid + c_ISGID = 02000 // Set gid + c_ISVTX = 01000 // Save text (sticky bit) + c_ISDIR = 040000 // Directory + c_ISFIFO = 010000 // FIFO + c_ISREG = 0100000 // Regular file + c_ISLNK = 0120000 // Symbolic link + c_ISBLK = 060000 // Block special file + c_ISCHR = 020000 // Character special file + c_ISSOCK = 0140000 // Socket +) + +const ( + hdrFileAttributes = "MSWINDOWS.fileattr" + hdrSecurityDescriptor = "MSWINDOWS.sd" + hdrRawSecurityDescriptor = "MSWINDOWS.rawsd" + hdrMountPoint = "MSWINDOWS.mountpoint" + hdrEaPrefix = "MSWINDOWS.xattr." + + hdrCreationTime = "LIBARCHIVE.creationtime" +) + +// zeroReader is an io.Reader that always returns 0s. +type zeroReader struct{} + +func (zr zeroReader) Read(b []byte) (int, error) { + for i := range b { + b[i] = 0 + } + return len(b), nil +} + +func copySparse(t *tar.Writer, br *winio.BackupStreamReader) error { + curOffset := int64(0) + for { + bhdr, err := br.Next() + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + if err != nil { + return err + } + if bhdr.Id != winio.BackupSparseBlock { + return fmt.Errorf("unexpected stream %d", bhdr.Id) + } + + // We can't seek backwards, since we have already written that data to the tar.Writer. + if bhdr.Offset < curOffset { + return fmt.Errorf("cannot seek back from %d to %d", curOffset, bhdr.Offset) + } + // archive/tar does not support writing sparse files + // so just write zeroes to catch up to the current offset. + if _, err := io.CopyN(t, zeroReader{}, bhdr.Offset-curOffset); err != nil { + return fmt.Errorf("seek to offset %d: %s", bhdr.Offset, err) + } + if bhdr.Size == 0 { + // A sparse block with size = 0 is used to mark the end of the sparse blocks. + break + } + n, err := io.Copy(t, br) + if err != nil { + return err + } + if n != bhdr.Size { + return fmt.Errorf("copied %d bytes instead of %d at offset %d", n, bhdr.Size, bhdr.Offset) + } + curOffset = bhdr.Offset + n + } + return nil +} + +// BasicInfoHeader creates a tar header from basic file information. +func BasicInfoHeader(name string, size int64, fileInfo *winio.FileBasicInfo) *tar.Header { + hdr := &tar.Header{ + Format: tar.FormatPAX, + Name: filepath.ToSlash(name), + Size: size, + Typeflag: tar.TypeReg, + ModTime: time.Unix(0, fileInfo.LastWriteTime.Nanoseconds()), + ChangeTime: time.Unix(0, fileInfo.ChangeTime.Nanoseconds()), + AccessTime: time.Unix(0, fileInfo.LastAccessTime.Nanoseconds()), + PAXRecords: make(map[string]string), + } + hdr.PAXRecords[hdrFileAttributes] = fmt.Sprintf("%d", fileInfo.FileAttributes) + hdr.PAXRecords[hdrCreationTime] = formatPAXTime(time.Unix(0, fileInfo.CreationTime.Nanoseconds())) + + if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 { + hdr.Mode |= c_ISDIR + hdr.Size = 0 + hdr.Typeflag = tar.TypeDir + } + return hdr +} + +// WriteTarFileFromBackupStream writes a file to a tar writer using data from a Win32 backup stream. +// +// This encodes Win32 metadata as tar pax vendor extensions starting with MSWINDOWS. +// +// The additional Win32 metadata is: +// +// MSWINDOWS.fileattr: The Win32 file attributes, as a decimal value +// +// MSWINDOWS.rawsd: The Win32 security descriptor, in raw binary format +// +// MSWINDOWS.mountpoint: If present, this is a mount point and not a symlink, even though the type is '2' (symlink) +func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size int64, fileInfo *winio.FileBasicInfo) error { + name = filepath.ToSlash(name) + hdr := BasicInfoHeader(name, size, fileInfo) + + // If r can be seeked, then this function is two-pass: pass 1 collects the + // tar header data, and pass 2 copies the data stream. If r cannot be + // seeked, then some header data (in particular EAs) will be silently lost. + var ( + restartPos int64 + err error + ) + sr, readTwice := r.(io.Seeker) + if readTwice { + if restartPos, err = sr.Seek(0, io.SeekCurrent); err != nil { + readTwice = false + } + } + + br := winio.NewBackupStreamReader(r) + var dataHdr *winio.BackupHeader + for dataHdr == nil { + bhdr, err := br.Next() + if err == io.EOF { + break + } + if err != nil { + return err + } + switch bhdr.Id { + case winio.BackupData: + hdr.Mode |= c_ISREG + if !readTwice { + dataHdr = bhdr + } + case winio.BackupSecurity: + sd, err := ioutil.ReadAll(br) + if err != nil { + return err + } + hdr.PAXRecords[hdrRawSecurityDescriptor] = base64.StdEncoding.EncodeToString(sd) + + case winio.BackupReparseData: + hdr.Mode |= c_ISLNK + hdr.Typeflag = tar.TypeSymlink + reparseBuffer, err := ioutil.ReadAll(br) + rp, err := winio.DecodeReparsePoint(reparseBuffer) + if err != nil { + return err + } + if rp.IsMountPoint { + hdr.PAXRecords[hdrMountPoint] = "1" + } + hdr.Linkname = rp.Target + + case winio.BackupEaData: + eab, err := ioutil.ReadAll(br) + if err != nil { + return err + } + eas, err := winio.DecodeExtendedAttributes(eab) + if err != nil { + return err + } + for _, ea := range eas { + // Use base64 encoding for the binary value. Note that there + // is no way to encode the EA's flags, since their use doesn't + // make any sense for persisted EAs. + hdr.PAXRecords[hdrEaPrefix+ea.Name] = base64.StdEncoding.EncodeToString(ea.Value) + } + + case winio.BackupAlternateData, winio.BackupLink, winio.BackupPropertyData, winio.BackupObjectId, winio.BackupTxfsData: + // ignore these streams + default: + return fmt.Errorf("%s: unknown stream ID %d", name, bhdr.Id) + } + } + + err = t.WriteHeader(hdr) + if err != nil { + return err + } + + if readTwice { + // Get back to the data stream. + if _, err = sr.Seek(restartPos, io.SeekStart); err != nil { + return err + } + for dataHdr == nil { + bhdr, err := br.Next() + if err == io.EOF { + break + } + if err != nil { + return err + } + if bhdr.Id == winio.BackupData { + dataHdr = bhdr + } + } + } + + // The logic for copying file contents is fairly complicated due to the need for handling sparse files, + // and the weird ways they are represented by BackupRead. A normal file will always either have a data stream + // with size and content, or no data stream at all (if empty). However, for a sparse file, the content can also + // be represented using a series of sparse block streams following the data stream. Additionally, the way sparse + // files are handled by BackupRead has changed in the OS recently. The specifics of the representation are described + // in the list at the bottom of this block comment. + // + // Sparse files can be represented in four different ways, based on the specifics of the file. + // - Size = 0: + // Previously: BackupRead yields no data stream and no sparse block streams. + // Recently: BackupRead yields a data stream with size = 0. There are no following sparse block streams. + // - Size > 0, no allocated ranges: + // BackupRead yields a data stream with size = 0. Following is a single sparse block stream with + // size = 0 and offset = . + // - Size > 0, one allocated range: + // BackupRead yields a data stream with size = containing the file contents. There are no + // sparse block streams. This is the case if you take a normal file with contents and simply set the + // sparse flag on it. + // - Size > 0, multiple allocated ranges: + // BackupRead yields a data stream with size = 0. Following are sparse block streams for each allocated + // range of the file containing the range contents. Finally there is a sparse block stream with + // size = 0 and offset = . + + if dataHdr != nil { + // A data stream was found. Copy the data. + // We assume that we will either have a data stream size > 0 XOR have sparse block streams. + if dataHdr.Size > 0 || (dataHdr.Attributes&winio.StreamSparseAttributes) == 0 { + if size != dataHdr.Size { + return fmt.Errorf("%s: mismatch between file size %d and header size %d", name, size, dataHdr.Size) + } + if _, err = io.Copy(t, br); err != nil { + return fmt.Errorf("%s: copying contents from data stream: %s", name, err) + } + } else if size > 0 { + // As of a recent OS change, BackupRead now returns a data stream for empty sparse files. + // These files have no sparse block streams, so skip the copySparse call if file size = 0. + if err = copySparse(t, br); err != nil { + return fmt.Errorf("%s: copying contents from sparse block stream: %s", name, err) + } + } + } + + // Look for streams after the data stream. The only ones we handle are alternate data streams. + // Other streams may have metadata that could be serialized, but the tar header has already + // been written. In practice, this means that we don't get EA or TXF metadata. + for { + bhdr, err := br.Next() + if err == io.EOF { + break + } + if err != nil { + return err + } + switch bhdr.Id { + case winio.BackupAlternateData: + altName := bhdr.Name + if strings.HasSuffix(altName, ":$DATA") { + altName = altName[:len(altName)-len(":$DATA")] + } + if (bhdr.Attributes & winio.StreamSparseAttributes) == 0 { + hdr = &tar.Header{ + Format: hdr.Format, + Name: name + altName, + Mode: hdr.Mode, + Typeflag: tar.TypeReg, + Size: bhdr.Size, + ModTime: hdr.ModTime, + AccessTime: hdr.AccessTime, + ChangeTime: hdr.ChangeTime, + } + err = t.WriteHeader(hdr) + if err != nil { + return err + } + _, err = io.Copy(t, br) + if err != nil { + return err + } + + } else { + // Unsupported for now, since the size of the alternate stream is not present + // in the backup stream until after the data has been read. + return fmt.Errorf("%s: tar of sparse alternate data streams is unsupported", name) + } + case winio.BackupEaData, winio.BackupLink, winio.BackupPropertyData, winio.BackupObjectId, winio.BackupTxfsData: + // ignore these streams + default: + return fmt.Errorf("%s: unknown stream ID %d after data", name, bhdr.Id) + } + } + return nil +} + +// FileInfoFromHeader retrieves basic Win32 file information from a tar header, using the additional metadata written by +// WriteTarFileFromBackupStream. +func FileInfoFromHeader(hdr *tar.Header) (name string, size int64, fileInfo *winio.FileBasicInfo, err error) { + name = hdr.Name + if hdr.Typeflag == tar.TypeReg || hdr.Typeflag == tar.TypeRegA { + size = hdr.Size + } + fileInfo = &winio.FileBasicInfo{ + LastAccessTime: windows.NsecToFiletime(hdr.AccessTime.UnixNano()), + LastWriteTime: windows.NsecToFiletime(hdr.ModTime.UnixNano()), + ChangeTime: windows.NsecToFiletime(hdr.ChangeTime.UnixNano()), + // Default to ModTime, we'll pull hdrCreationTime below if present + CreationTime: windows.NsecToFiletime(hdr.ModTime.UnixNano()), + } + if attrStr, ok := hdr.PAXRecords[hdrFileAttributes]; ok { + attr, err := strconv.ParseUint(attrStr, 10, 32) + if err != nil { + return "", 0, nil, err + } + fileInfo.FileAttributes = uint32(attr) + } else { + if hdr.Typeflag == tar.TypeDir { + fileInfo.FileAttributes |= syscall.FILE_ATTRIBUTE_DIRECTORY + } + } + if creationTimeStr, ok := hdr.PAXRecords[hdrCreationTime]; ok { + creationTime, err := parsePAXTime(creationTimeStr) + if err != nil { + return "", 0, nil, err + } + fileInfo.CreationTime = windows.NsecToFiletime(creationTime.UnixNano()) + } + return +} + +// WriteBackupStreamFromTarFile writes a Win32 backup stream from the current tar file. Since this function may process multiple +// tar file entries in order to collect all the alternate data streams for the file, it returns the next +// tar file that was not processed, or io.EOF is there are no more. +func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) (*tar.Header, error) { + bw := winio.NewBackupStreamWriter(w) + var sd []byte + var err error + // Maintaining old SDDL-based behavior for backward compatibility. All new tar headers written + // by this library will have raw binary for the security descriptor. + if sddl, ok := hdr.PAXRecords[hdrSecurityDescriptor]; ok { + sd, err = winio.SddlToSecurityDescriptor(sddl) + if err != nil { + return nil, err + } + } + if sdraw, ok := hdr.PAXRecords[hdrRawSecurityDescriptor]; ok { + sd, err = base64.StdEncoding.DecodeString(sdraw) + if err != nil { + return nil, err + } + } + if len(sd) != 0 { + bhdr := winio.BackupHeader{ + Id: winio.BackupSecurity, + Size: int64(len(sd)), + } + err := bw.WriteHeader(&bhdr) + if err != nil { + return nil, err + } + _, err = bw.Write(sd) + if err != nil { + return nil, err + } + } + var eas []winio.ExtendedAttribute + for k, v := range hdr.PAXRecords { + if !strings.HasPrefix(k, hdrEaPrefix) { + continue + } + data, err := base64.StdEncoding.DecodeString(v) + if err != nil { + return nil, err + } + eas = append(eas, winio.ExtendedAttribute{ + Name: k[len(hdrEaPrefix):], + Value: data, + }) + } + if len(eas) != 0 { + eadata, err := winio.EncodeExtendedAttributes(eas) + if err != nil { + return nil, err + } + bhdr := winio.BackupHeader{ + Id: winio.BackupEaData, + Size: int64(len(eadata)), + } + err = bw.WriteHeader(&bhdr) + if err != nil { + return nil, err + } + _, err = bw.Write(eadata) + if err != nil { + return nil, err + } + } + if hdr.Typeflag == tar.TypeSymlink { + _, isMountPoint := hdr.PAXRecords[hdrMountPoint] + rp := winio.ReparsePoint{ + Target: filepath.FromSlash(hdr.Linkname), + IsMountPoint: isMountPoint, + } + reparse := winio.EncodeReparsePoint(&rp) + bhdr := winio.BackupHeader{ + Id: winio.BackupReparseData, + Size: int64(len(reparse)), + } + err := bw.WriteHeader(&bhdr) + if err != nil { + return nil, err + } + _, err = bw.Write(reparse) + if err != nil { + return nil, err + } + } + if hdr.Typeflag == tar.TypeReg || hdr.Typeflag == tar.TypeRegA { + bhdr := winio.BackupHeader{ + Id: winio.BackupData, + Size: hdr.Size, + } + err := bw.WriteHeader(&bhdr) + if err != nil { + return nil, err + } + _, err = io.Copy(bw, t) + if err != nil { + return nil, err + } + } + // Copy all the alternate data streams and return the next non-ADS header. + for { + ahdr, err := t.Next() + if err != nil { + return nil, err + } + if ahdr.Typeflag != tar.TypeReg || !strings.HasPrefix(ahdr.Name, hdr.Name+":") { + return ahdr, nil + } + bhdr := winio.BackupHeader{ + Id: winio.BackupAlternateData, + Size: ahdr.Size, + Name: ahdr.Name[len(hdr.Name):] + ":$DATA", + } + err = bw.WriteHeader(&bhdr) + if err != nil { + return nil, err + } + _, err = io.Copy(bw, t) + if err != nil { + return nil, err + } + } +} diff --git a/vendor/github.com/Microsoft/go-winio/pkg/security/grantvmgroupaccess.go b/vendor/github.com/Microsoft/go-winio/pkg/security/grantvmgroupaccess.go new file mode 100644 index 00000000000..fca241590cc --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/pkg/security/grantvmgroupaccess.go @@ -0,0 +1,161 @@ +// +build windows + +package security + +import ( + "os" + "syscall" + "unsafe" + + "github.com/pkg/errors" +) + +type ( + accessMask uint32 + accessMode uint32 + desiredAccess uint32 + inheritMode uint32 + objectType uint32 + shareMode uint32 + securityInformation uint32 + trusteeForm uint32 + trusteeType uint32 + + explicitAccess struct { + accessPermissions accessMask + accessMode accessMode + inheritance inheritMode + trustee trustee + } + + trustee struct { + multipleTrustee *trustee + multipleTrusteeOperation int32 + trusteeForm trusteeForm + trusteeType trusteeType + name uintptr + } +) + +const ( + accessMaskDesiredPermission accessMask = 1 << 31 // GENERIC_READ + + accessModeGrant accessMode = 1 + + desiredAccessReadControl desiredAccess = 0x20000 + desiredAccessWriteDac desiredAccess = 0x40000 + + gvmga = "GrantVmGroupAccess:" + + inheritModeNoInheritance inheritMode = 0x0 + inheritModeSubContainersAndObjectsInherit inheritMode = 0x3 + + objectTypeFileObject objectType = 0x1 + + securityInformationDACL securityInformation = 0x4 + + shareModeRead shareMode = 0x1 + shareModeWrite shareMode = 0x2 + + sidVmGroup = "S-1-5-83-0" + + trusteeFormIsSid trusteeForm = 0 + + trusteeTypeWellKnownGroup trusteeType = 5 +) + +// GrantVMGroupAccess sets the DACL for a specified file or directory to +// include Grant ACE entries for the VM Group SID. This is a golang re- +// implementation of the same function in vmcompute, just not exported in +// RS5. Which kind of sucks. Sucks a lot :/ +func GrantVmGroupAccess(name string) error { + // Stat (to determine if `name` is a directory). + s, err := os.Stat(name) + if err != nil { + return errors.Wrapf(err, "%s os.Stat %s", gvmga, name) + } + + // Get a handle to the file/directory. Must defer Close on success. + fd, err := createFile(name, s.IsDir()) + if err != nil { + return err // Already wrapped + } + defer syscall.CloseHandle(fd) + + // Get the current DACL and Security Descriptor. Must defer LocalFree on success. + ot := objectTypeFileObject + si := securityInformationDACL + sd := uintptr(0) + origDACL := uintptr(0) + if err := getSecurityInfo(fd, uint32(ot), uint32(si), nil, nil, &origDACL, nil, &sd); err != nil { + return errors.Wrapf(err, "%s GetSecurityInfo %s", gvmga, name) + } + defer syscall.LocalFree((syscall.Handle)(unsafe.Pointer(sd))) + + // Generate a new DACL which is the current DACL with the required ACEs added. + // Must defer LocalFree on success. + newDACL, err := generateDACLWithAcesAdded(name, s.IsDir(), origDACL) + if err != nil { + return err // Already wrapped + } + defer syscall.LocalFree((syscall.Handle)(unsafe.Pointer(newDACL))) + + // And finally use SetSecurityInfo to apply the updated DACL. + if err := setSecurityInfo(fd, uint32(ot), uint32(si), uintptr(0), uintptr(0), newDACL, uintptr(0)); err != nil { + return errors.Wrapf(err, "%s SetSecurityInfo %s", gvmga, name) + } + + return nil +} + +// createFile is a helper function to call [Nt]CreateFile to get a handle to +// the file or directory. +func createFile(name string, isDir bool) (syscall.Handle, error) { + namep := syscall.StringToUTF16(name) + da := uint32(desiredAccessReadControl | desiredAccessWriteDac) + sm := uint32(shareModeRead | shareModeWrite) + fa := uint32(syscall.FILE_ATTRIBUTE_NORMAL) + if isDir { + fa = uint32(fa | syscall.FILE_FLAG_BACKUP_SEMANTICS) + } + fd, err := syscall.CreateFile(&namep[0], da, sm, nil, syscall.OPEN_EXISTING, fa, 0) + if err != nil { + return 0, errors.Wrapf(err, "%s syscall.CreateFile %s", gvmga, name) + } + return fd, nil +} + +// generateDACLWithAcesAdded generates a new DACL with the two needed ACEs added. +// The caller is responsible for LocalFree of the returned DACL on success. +func generateDACLWithAcesAdded(name string, isDir bool, origDACL uintptr) (uintptr, error) { + // Generate pointers to the SIDs based on the string SIDs + sid, err := syscall.StringToSid(sidVmGroup) + if err != nil { + return 0, errors.Wrapf(err, "%s syscall.StringToSid %s %s", gvmga, name, sidVmGroup) + } + + inheritance := inheritModeNoInheritance + if isDir { + inheritance = inheritModeSubContainersAndObjectsInherit + } + + eaArray := []explicitAccess{ + explicitAccess{ + accessPermissions: accessMaskDesiredPermission, + accessMode: accessModeGrant, + inheritance: inheritance, + trustee: trustee{ + trusteeForm: trusteeFormIsSid, + trusteeType: trusteeTypeWellKnownGroup, + name: uintptr(unsafe.Pointer(sid)), + }, + }, + } + + modifiedDACL := uintptr(0) + if err := setEntriesInAcl(uintptr(uint32(1)), uintptr(unsafe.Pointer(&eaArray[0])), origDACL, &modifiedDACL); err != nil { + return 0, errors.Wrapf(err, "%s SetEntriesInAcl %s", gvmga, name) + } + + return modifiedDACL, nil +} diff --git a/vendor/github.com/Microsoft/go-winio/pkg/security/syscall_windows.go b/vendor/github.com/Microsoft/go-winio/pkg/security/syscall_windows.go new file mode 100644 index 00000000000..d7096716ce2 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/pkg/security/syscall_windows.go @@ -0,0 +1,7 @@ +package security + +//go:generate go run mksyscall_windows.go -output zsyscall_windows.go syscall_windows.go + +//sys getSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, ppsidOwner **uintptr, ppsidGroup **uintptr, ppDacl *uintptr, ppSacl *uintptr, ppSecurityDescriptor *uintptr) (win32err error) = advapi32.GetSecurityInfo +//sys setSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, psidOwner uintptr, psidGroup uintptr, pDacl uintptr, pSacl uintptr) (win32err error) = advapi32.SetSecurityInfo +//sys setEntriesInAcl(count uintptr, pListOfEEs uintptr, oldAcl uintptr, newAcl *uintptr) (win32err error) = advapi32.SetEntriesInAclW diff --git a/vendor/github.com/Microsoft/go-winio/pkg/security/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/pkg/security/zsyscall_windows.go new file mode 100644 index 00000000000..4084680e0f0 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/pkg/security/zsyscall_windows.go @@ -0,0 +1,70 @@ +// Code generated by 'go generate'; DO NOT EDIT. + +package security + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return errERROR_EINVAL + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") + + procGetSecurityInfo = modadvapi32.NewProc("GetSecurityInfo") + procSetEntriesInAclW = modadvapi32.NewProc("SetEntriesInAclW") + procSetSecurityInfo = modadvapi32.NewProc("SetSecurityInfo") +) + +func getSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, ppsidOwner **uintptr, ppsidGroup **uintptr, ppDacl *uintptr, ppSacl *uintptr, ppSecurityDescriptor *uintptr) (win32err error) { + r0, _, _ := syscall.Syscall9(procGetSecurityInfo.Addr(), 8, uintptr(handle), uintptr(objectType), uintptr(si), uintptr(unsafe.Pointer(ppsidOwner)), uintptr(unsafe.Pointer(ppsidGroup)), uintptr(unsafe.Pointer(ppDacl)), uintptr(unsafe.Pointer(ppSacl)), uintptr(unsafe.Pointer(ppSecurityDescriptor)), 0) + if r0 != 0 { + win32err = syscall.Errno(r0) + } + return +} + +func setEntriesInAcl(count uintptr, pListOfEEs uintptr, oldAcl uintptr, newAcl *uintptr) (win32err error) { + r0, _, _ := syscall.Syscall6(procSetEntriesInAclW.Addr(), 4, uintptr(count), uintptr(pListOfEEs), uintptr(oldAcl), uintptr(unsafe.Pointer(newAcl)), 0, 0) + if r0 != 0 { + win32err = syscall.Errno(r0) + } + return +} + +func setSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, psidOwner uintptr, psidGroup uintptr, pDacl uintptr, pSacl uintptr) (win32err error) { + r0, _, _ := syscall.Syscall9(procSetSecurityInfo.Addr(), 7, uintptr(handle), uintptr(objectType), uintptr(si), uintptr(psidOwner), uintptr(psidGroup), uintptr(pDacl), uintptr(pSacl), 0, 0) + if r0 != 0 { + win32err = syscall.Errno(r0) + } + return +} diff --git a/vendor/github.com/Microsoft/go-winio/privilege.go b/vendor/github.com/Microsoft/go-winio/privilege.go index 9c83d36fe53..c3dd7c21769 100644 --- a/vendor/github.com/Microsoft/go-winio/privilege.go +++ b/vendor/github.com/Microsoft/go-winio/privilege.go @@ -28,8 +28,9 @@ const ( ERROR_NOT_ALL_ASSIGNED syscall.Errno = 1300 - SeBackupPrivilege = "SeBackupPrivilege" - SeRestorePrivilege = "SeRestorePrivilege" + SeBackupPrivilege = "SeBackupPrivilege" + SeRestorePrivilege = "SeRestorePrivilege" + SeSecurityPrivilege = "SeSecurityPrivilege" ) const ( diff --git a/vendor/github.com/Microsoft/go-winio/vhd/vhd.go b/vendor/github.com/Microsoft/go-winio/vhd/vhd.go new file mode 100644 index 00000000000..a33a36c0ffb --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/vhd/vhd.go @@ -0,0 +1,323 @@ +// +build windows + +package vhd + +import ( + "fmt" + "syscall" + + "github.com/Microsoft/go-winio/pkg/guid" + "github.com/pkg/errors" + "golang.org/x/sys/windows" +) + +//go:generate go run mksyscall_windows.go -output zvhd_windows.go vhd.go + +//sys createVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (win32err error) = virtdisk.CreateVirtualDisk +//sys openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *OpenVirtualDiskParameters, handle *syscall.Handle) (win32err error) = virtdisk.OpenVirtualDisk +//sys attachVirtualDisk(handle syscall.Handle, securityDescriptor *uintptr, attachVirtualDiskFlag uint32, providerSpecificFlags uint32, parameters *AttachVirtualDiskParameters, overlapped *syscall.Overlapped) (win32err error) = virtdisk.AttachVirtualDisk +//sys detachVirtualDisk(handle syscall.Handle, detachVirtualDiskFlags uint32, providerSpecificFlags uint32) (win32err error) = virtdisk.DetachVirtualDisk +//sys getVirtualDiskPhysicalPath(handle syscall.Handle, diskPathSizeInBytes *uint32, buffer *uint16) (win32err error) = virtdisk.GetVirtualDiskPhysicalPath + +type ( + CreateVirtualDiskFlag uint32 + VirtualDiskFlag uint32 + AttachVirtualDiskFlag uint32 + DetachVirtualDiskFlag uint32 + VirtualDiskAccessMask uint32 +) + +type VirtualStorageType struct { + DeviceID uint32 + VendorID guid.GUID +} + +type CreateVersion2 struct { + UniqueID guid.GUID + MaximumSize uint64 + BlockSizeInBytes uint32 + SectorSizeInBytes uint32 + PhysicalSectorSizeInByte uint32 + ParentPath *uint16 // string + SourcePath *uint16 // string + OpenFlags uint32 + ParentVirtualStorageType VirtualStorageType + SourceVirtualStorageType VirtualStorageType + ResiliencyGUID guid.GUID +} + +type CreateVirtualDiskParameters struct { + Version uint32 // Must always be set to 2 + Version2 CreateVersion2 +} + +type OpenVersion2 struct { + GetInfoOnly bool + ReadOnly bool + ResiliencyGUID guid.GUID +} + +type OpenVirtualDiskParameters struct { + Version uint32 // Must always be set to 2 + Version2 OpenVersion2 +} + +type AttachVersion2 struct { + RestrictedOffset uint64 + RestrictedLength uint64 +} + +type AttachVirtualDiskParameters struct { + Version uint32 // Must always be set to 2 + Version2 AttachVersion2 +} + +const ( + VIRTUAL_STORAGE_TYPE_DEVICE_VHDX = 0x3 + + // Access Mask for opening a VHD + VirtualDiskAccessNone VirtualDiskAccessMask = 0x00000000 + VirtualDiskAccessAttachRO VirtualDiskAccessMask = 0x00010000 + VirtualDiskAccessAttachRW VirtualDiskAccessMask = 0x00020000 + VirtualDiskAccessDetach VirtualDiskAccessMask = 0x00040000 + VirtualDiskAccessGetInfo VirtualDiskAccessMask = 0x00080000 + VirtualDiskAccessCreate VirtualDiskAccessMask = 0x00100000 + VirtualDiskAccessMetaOps VirtualDiskAccessMask = 0x00200000 + VirtualDiskAccessRead VirtualDiskAccessMask = 0x000d0000 + VirtualDiskAccessAll VirtualDiskAccessMask = 0x003f0000 + VirtualDiskAccessWritable VirtualDiskAccessMask = 0x00320000 + + // Flags for creating a VHD + CreateVirtualDiskFlagNone CreateVirtualDiskFlag = 0x0 + CreateVirtualDiskFlagFullPhysicalAllocation CreateVirtualDiskFlag = 0x1 + CreateVirtualDiskFlagPreventWritesToSourceDisk CreateVirtualDiskFlag = 0x2 + CreateVirtualDiskFlagDoNotCopyMetadataFromParent CreateVirtualDiskFlag = 0x4 + CreateVirtualDiskFlagCreateBackingStorage CreateVirtualDiskFlag = 0x8 + CreateVirtualDiskFlagUseChangeTrackingSourceLimit CreateVirtualDiskFlag = 0x10 + CreateVirtualDiskFlagPreserveParentChangeTrackingState CreateVirtualDiskFlag = 0x20 + CreateVirtualDiskFlagVhdSetUseOriginalBackingStorage CreateVirtualDiskFlag = 0x40 + CreateVirtualDiskFlagSparseFile CreateVirtualDiskFlag = 0x80 + CreateVirtualDiskFlagPmemCompatible CreateVirtualDiskFlag = 0x100 + CreateVirtualDiskFlagSupportCompressedVolumes CreateVirtualDiskFlag = 0x200 + + // Flags for opening a VHD + OpenVirtualDiskFlagNone VirtualDiskFlag = 0x00000000 + OpenVirtualDiskFlagNoParents VirtualDiskFlag = 0x00000001 + OpenVirtualDiskFlagBlankFile VirtualDiskFlag = 0x00000002 + OpenVirtualDiskFlagBootDrive VirtualDiskFlag = 0x00000004 + OpenVirtualDiskFlagCachedIO VirtualDiskFlag = 0x00000008 + OpenVirtualDiskFlagCustomDiffChain VirtualDiskFlag = 0x00000010 + OpenVirtualDiskFlagParentCachedIO VirtualDiskFlag = 0x00000020 + OpenVirtualDiskFlagVhdsetFileOnly VirtualDiskFlag = 0x00000040 + OpenVirtualDiskFlagIgnoreRelativeParentLocator VirtualDiskFlag = 0x00000080 + OpenVirtualDiskFlagNoWriteHardening VirtualDiskFlag = 0x00000100 + OpenVirtualDiskFlagSupportCompressedVolumes VirtualDiskFlag = 0x00000200 + + // Flags for attaching a VHD + AttachVirtualDiskFlagNone AttachVirtualDiskFlag = 0x00000000 + AttachVirtualDiskFlagReadOnly AttachVirtualDiskFlag = 0x00000001 + AttachVirtualDiskFlagNoDriveLetter AttachVirtualDiskFlag = 0x00000002 + AttachVirtualDiskFlagPermanentLifetime AttachVirtualDiskFlag = 0x00000004 + AttachVirtualDiskFlagNoLocalHost AttachVirtualDiskFlag = 0x00000008 + AttachVirtualDiskFlagNoSecurityDescriptor AttachVirtualDiskFlag = 0x00000010 + AttachVirtualDiskFlagBypassDefaultEncryptionPolicy AttachVirtualDiskFlag = 0x00000020 + AttachVirtualDiskFlagNonPnp AttachVirtualDiskFlag = 0x00000040 + AttachVirtualDiskFlagRestrictedRange AttachVirtualDiskFlag = 0x00000080 + AttachVirtualDiskFlagSinglePartition AttachVirtualDiskFlag = 0x00000100 + AttachVirtualDiskFlagRegisterVolume AttachVirtualDiskFlag = 0x00000200 + + // Flags for detaching a VHD + DetachVirtualDiskFlagNone DetachVirtualDiskFlag = 0x0 +) + +// CreateVhdx is a helper function to create a simple vhdx file at the given path using +// default values. +func CreateVhdx(path string, maxSizeInGb, blockSizeInMb uint32) error { + params := CreateVirtualDiskParameters{ + Version: 2, + Version2: CreateVersion2{ + MaximumSize: uint64(maxSizeInGb) * 1024 * 1024 * 1024, + BlockSizeInBytes: blockSizeInMb * 1024 * 1024, + }, + } + + handle, err := CreateVirtualDisk(path, VirtualDiskAccessNone, CreateVirtualDiskFlagNone, ¶ms) + if err != nil { + return err + } + + if err := syscall.CloseHandle(handle); err != nil { + return err + } + return nil +} + +// DetachVirtualDisk detaches a virtual hard disk by handle. +func DetachVirtualDisk(handle syscall.Handle) (err error) { + if err := detachVirtualDisk(handle, 0, 0); err != nil { + return errors.Wrap(err, "failed to detach virtual disk") + } + return nil +} + +// DetachVhd detaches a vhd found at `path`. +func DetachVhd(path string) error { + handle, err := OpenVirtualDisk( + path, + VirtualDiskAccessNone, + OpenVirtualDiskFlagCachedIO|OpenVirtualDiskFlagIgnoreRelativeParentLocator, + ) + if err != nil { + return err + } + defer syscall.CloseHandle(handle) + return DetachVirtualDisk(handle) +} + +// AttachVirtualDisk attaches a virtual hard disk for use. +func AttachVirtualDisk(handle syscall.Handle, attachVirtualDiskFlag AttachVirtualDiskFlag, parameters *AttachVirtualDiskParameters) (err error) { + // Supports both version 1 and 2 of the attach parameters as version 2 wasn't present in RS5. + if err := attachVirtualDisk( + handle, + nil, + uint32(attachVirtualDiskFlag), + 0, + parameters, + nil, + ); err != nil { + return errors.Wrap(err, "failed to attach virtual disk") + } + return nil +} + +// AttachVhd attaches a virtual hard disk at `path` for use. Attaches using version 2 +// of the ATTACH_VIRTUAL_DISK_PARAMETERS. +func AttachVhd(path string) (err error) { + handle, err := OpenVirtualDisk( + path, + VirtualDiskAccessNone, + OpenVirtualDiskFlagCachedIO|OpenVirtualDiskFlagIgnoreRelativeParentLocator, + ) + if err != nil { + return err + } + + defer syscall.CloseHandle(handle) + params := AttachVirtualDiskParameters{Version: 2} + if err := AttachVirtualDisk( + handle, + AttachVirtualDiskFlagNone, + ¶ms, + ); err != nil { + return errors.Wrap(err, "failed to attach virtual disk") + } + return nil +} + +// OpenVirtualDisk obtains a handle to a VHD opened with supplied access mask and flags. +func OpenVirtualDisk(vhdPath string, virtualDiskAccessMask VirtualDiskAccessMask, openVirtualDiskFlags VirtualDiskFlag) (syscall.Handle, error) { + parameters := OpenVirtualDiskParameters{Version: 2} + handle, err := OpenVirtualDiskWithParameters( + vhdPath, + virtualDiskAccessMask, + openVirtualDiskFlags, + ¶meters, + ) + if err != nil { + return 0, err + } + return handle, nil +} + +// OpenVirtualDiskWithParameters obtains a handle to a VHD opened with supplied access mask, flags and parameters. +func OpenVirtualDiskWithParameters(vhdPath string, virtualDiskAccessMask VirtualDiskAccessMask, openVirtualDiskFlags VirtualDiskFlag, parameters *OpenVirtualDiskParameters) (syscall.Handle, error) { + var ( + handle syscall.Handle + defaultType VirtualStorageType + ) + if parameters.Version != 2 { + return handle, fmt.Errorf("only version 2 VHDs are supported, found version: %d", parameters.Version) + } + if err := openVirtualDisk( + &defaultType, + vhdPath, + uint32(virtualDiskAccessMask), + uint32(openVirtualDiskFlags), + parameters, + &handle, + ); err != nil { + return 0, errors.Wrap(err, "failed to open virtual disk") + } + return handle, nil +} + +// CreateVirtualDisk creates a virtual harddisk and returns a handle to the disk. +func CreateVirtualDisk(path string, virtualDiskAccessMask VirtualDiskAccessMask, createVirtualDiskFlags CreateVirtualDiskFlag, parameters *CreateVirtualDiskParameters) (syscall.Handle, error) { + var ( + handle syscall.Handle + defaultType VirtualStorageType + ) + if parameters.Version != 2 { + return handle, fmt.Errorf("only version 2 VHDs are supported, found version: %d", parameters.Version) + } + + if err := createVirtualDisk( + &defaultType, + path, + uint32(virtualDiskAccessMask), + nil, + uint32(createVirtualDiskFlags), + 0, + parameters, + nil, + &handle, + ); err != nil { + return handle, errors.Wrap(err, "failed to create virtual disk") + } + return handle, nil +} + +// GetVirtualDiskPhysicalPath takes a handle to a virtual hard disk and returns the physical +// path of the disk on the machine. This path is in the form \\.\PhysicalDriveX where X is an integer +// that represents the particular enumeration of the physical disk on the caller's system. +func GetVirtualDiskPhysicalPath(handle syscall.Handle) (_ string, err error) { + var ( + diskPathSizeInBytes uint32 = 256 * 2 // max path length 256 wide chars + diskPhysicalPathBuf [256]uint16 + ) + if err := getVirtualDiskPhysicalPath( + handle, + &diskPathSizeInBytes, + &diskPhysicalPathBuf[0], + ); err != nil { + return "", errors.Wrap(err, "failed to get disk physical path") + } + return windows.UTF16ToString(diskPhysicalPathBuf[:]), nil +} + +// CreateDiffVhd is a helper function to create a differencing virtual disk. +func CreateDiffVhd(diffVhdPath, baseVhdPath string, blockSizeInMB uint32) error { + // Setting `ParentPath` is how to signal to create a differencing disk. + createParams := &CreateVirtualDiskParameters{ + Version: 2, + Version2: CreateVersion2{ + ParentPath: windows.StringToUTF16Ptr(baseVhdPath), + BlockSizeInBytes: blockSizeInMB * 1024 * 1024, + OpenFlags: uint32(OpenVirtualDiskFlagCachedIO), + }, + } + + vhdHandle, err := CreateVirtualDisk( + diffVhdPath, + VirtualDiskAccessNone, + CreateVirtualDiskFlagNone, + createParams, + ) + if err != nil { + return fmt.Errorf("failed to create differencing vhd: %s", err) + } + if err := syscall.CloseHandle(vhdHandle); err != nil { + return fmt.Errorf("failed to close differencing vhd handle: %s", err) + } + return nil +} diff --git a/vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go b/vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go new file mode 100644 index 00000000000..7fb5f3651b9 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go @@ -0,0 +1,106 @@ +// Code generated by 'go generate'; DO NOT EDIT. + +package vhd + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return errERROR_EINVAL + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modvirtdisk = windows.NewLazySystemDLL("virtdisk.dll") + + procAttachVirtualDisk = modvirtdisk.NewProc("AttachVirtualDisk") + procCreateVirtualDisk = modvirtdisk.NewProc("CreateVirtualDisk") + procDetachVirtualDisk = modvirtdisk.NewProc("DetachVirtualDisk") + procGetVirtualDiskPhysicalPath = modvirtdisk.NewProc("GetVirtualDiskPhysicalPath") + procOpenVirtualDisk = modvirtdisk.NewProc("OpenVirtualDisk") +) + +func attachVirtualDisk(handle syscall.Handle, securityDescriptor *uintptr, attachVirtualDiskFlag uint32, providerSpecificFlags uint32, parameters *AttachVirtualDiskParameters, overlapped *syscall.Overlapped) (win32err error) { + r0, _, _ := syscall.Syscall6(procAttachVirtualDisk.Addr(), 6, uintptr(handle), uintptr(unsafe.Pointer(securityDescriptor)), uintptr(attachVirtualDiskFlag), uintptr(providerSpecificFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(overlapped))) + if r0 != 0 { + win32err = syscall.Errno(r0) + } + return +} + +func createVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (win32err error) { + var _p0 *uint16 + _p0, win32err = syscall.UTF16PtrFromString(path) + if win32err != nil { + return + } + return _createVirtualDisk(virtualStorageType, _p0, virtualDiskAccessMask, securityDescriptor, createVirtualDiskFlags, providerSpecificFlags, parameters, overlapped, handle) +} + +func _createVirtualDisk(virtualStorageType *VirtualStorageType, path *uint16, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (win32err error) { + r0, _, _ := syscall.Syscall9(procCreateVirtualDisk.Addr(), 9, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(unsafe.Pointer(securityDescriptor)), uintptr(createVirtualDiskFlags), uintptr(providerSpecificFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(handle))) + if r0 != 0 { + win32err = syscall.Errno(r0) + } + return +} + +func detachVirtualDisk(handle syscall.Handle, detachVirtualDiskFlags uint32, providerSpecificFlags uint32) (win32err error) { + r0, _, _ := syscall.Syscall(procDetachVirtualDisk.Addr(), 3, uintptr(handle), uintptr(detachVirtualDiskFlags), uintptr(providerSpecificFlags)) + if r0 != 0 { + win32err = syscall.Errno(r0) + } + return +} + +func getVirtualDiskPhysicalPath(handle syscall.Handle, diskPathSizeInBytes *uint32, buffer *uint16) (win32err error) { + r0, _, _ := syscall.Syscall(procGetVirtualDiskPhysicalPath.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(diskPathSizeInBytes)), uintptr(unsafe.Pointer(buffer))) + if r0 != 0 { + win32err = syscall.Errno(r0) + } + return +} + +func openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *OpenVirtualDiskParameters, handle *syscall.Handle) (win32err error) { + var _p0 *uint16 + _p0, win32err = syscall.UTF16PtrFromString(path) + if win32err != nil { + return + } + return _openVirtualDisk(virtualStorageType, _p0, virtualDiskAccessMask, openVirtualDiskFlags, parameters, handle) +} + +func _openVirtualDisk(virtualStorageType *VirtualStorageType, path *uint16, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *OpenVirtualDiskParameters, handle *syscall.Handle) (win32err error) { + r0, _, _ := syscall.Syscall6(procOpenVirtualDisk.Addr(), 6, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(openVirtualDiskFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(handle))) + if r0 != 0 { + win32err = syscall.Errno(r0) + } + return +} diff --git a/vendor/github.com/Microsoft/hcsshim/.gitattributes b/vendor/github.com/Microsoft/hcsshim/.gitattributes new file mode 100644 index 00000000000..94f480de94e --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/.gitattributes @@ -0,0 +1 @@ +* text=auto eol=lf \ No newline at end of file diff --git a/vendor/github.com/Microsoft/hcsshim/.gitignore b/vendor/github.com/Microsoft/hcsshim/.gitignore new file mode 100644 index 00000000000..54ed6f06c9d --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/.gitignore @@ -0,0 +1,38 @@ +# Binaries for programs and plugins +*.exe +*.dll +*.so +*.dylib + +# Ignore vscode setting files +.vscode/ + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 +.glide/ + +# Ignore gcs bin directory +service/bin/ +service/pkg/ + +*.img +*.vhd +*.tar.gz + +# Make stuff +.rootfs-done +bin/* +rootfs/* +*.o +/build/ + +deps/* +out/* + +.idea/ +.vscode/ \ No newline at end of file diff --git a/vendor/github.com/Microsoft/hcsshim/.golangci.yml b/vendor/github.com/Microsoft/hcsshim/.golangci.yml new file mode 100644 index 00000000000..2400e7f1e02 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/.golangci.yml @@ -0,0 +1,99 @@ +run: + timeout: 8m + +linters: + enable: + - stylecheck + +linters-settings: + stylecheck: + # https://staticcheck.io/docs/checks + checks: ["all"] + + +issues: + # This repo has a LOT of generated schema files, operating system bindings, and other things that ST1003 from stylecheck won't like + # (screaming case Windows api constants for example). There's also some structs that we *could* change the initialisms to be Go + # friendly (Id -> ID) but they're exported and it would be a breaking change. This makes it so that most new code, code that isn't + # supposed to be a pretty faithful mapping to an OS call/constants, or non-generated code still checks if we're following idioms, + # while ignoring the things that are just noise or would be more of a hassle than it'd be worth to change. + exclude-rules: + - path: layer.go + linters: + - stylecheck + Text: "ST1003:" + + - path: hcsshim.go + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\hcs\\schema2\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\wclayer\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: hcn\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\hcs\\schema1\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\hns\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: ext4\\internal\\compactext4\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: ext4\\internal\\format\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\guestrequest\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\guest\\prot\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\windevice\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\winapi\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\vmcompute\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\regstate\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\hcserror\\ + linters: + - stylecheck + Text: "ST1003:" \ No newline at end of file diff --git a/vendor/github.com/Microsoft/hcsshim/CODEOWNERS b/vendor/github.com/Microsoft/hcsshim/CODEOWNERS new file mode 100644 index 00000000000..f4c5a07d14b --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/CODEOWNERS @@ -0,0 +1 @@ +* @microsoft/containerplat \ No newline at end of file diff --git a/vendor/github.com/Microsoft/hcsshim/LICENSE b/vendor/github.com/Microsoft/hcsshim/LICENSE new file mode 100644 index 00000000000..49d21669aee --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Microsoft + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/Microsoft/hcsshim/Makefile b/vendor/github.com/Microsoft/hcsshim/Makefile new file mode 100644 index 00000000000..a8f5516cd0b --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/Makefile @@ -0,0 +1,87 @@ +BASE:=base.tar.gz + +GO:=go +GO_FLAGS:=-ldflags "-s -w" # strip Go binaries +CGO_ENABLED:=0 +GOMODVENDOR:= + +CFLAGS:=-O2 -Wall +LDFLAGS:=-static -s # strip C binaries + +GO_FLAGS_EXTRA:= +ifeq "$(GOMODVENDOR)" "1" +GO_FLAGS_EXTRA += -mod=vendor +endif +GO_BUILD:=CGO_ENABLED=$(CGO_ENABLED) $(GO) build $(GO_FLAGS) $(GO_FLAGS_EXTRA) + +SRCROOT=$(dir $(abspath $(firstword $(MAKEFILE_LIST)))) + +# The link aliases for gcstools +GCS_TOOLS=\ + generichook + +.PHONY: all always rootfs test + +all: out/initrd.img out/rootfs.tar.gz + +clean: + find -name '*.o' -print0 | xargs -0 -r rm + rm -rf bin deps rootfs out + +test: + cd $(SRCROOT) && go test -v ./internal/guest/... + +out/delta.tar.gz: bin/init bin/vsockexec bin/cmd/gcs bin/cmd/gcstools Makefile + @mkdir -p out + rm -rf rootfs + mkdir -p rootfs/bin/ + cp bin/init rootfs/ + cp bin/vsockexec rootfs/bin/ + cp bin/cmd/gcs rootfs/bin/ + cp bin/cmd/gcstools rootfs/bin/ + for tool in $(GCS_TOOLS); do ln -s gcstools rootfs/bin/$$tool; done + git -C $(SRCROOT) rev-parse HEAD > rootfs/gcs.commit && \ + git -C $(SRCROOT) rev-parse --abbrev-ref HEAD > rootfs/gcs.branch + tar -zcf $@ -C rootfs . + rm -rf rootfs + +out/rootfs.tar.gz: out/initrd.img + rm -rf rootfs-conv + mkdir rootfs-conv + gunzip -c out/initrd.img | (cd rootfs-conv && cpio -imd) + tar -zcf $@ -C rootfs-conv . + rm -rf rootfs-conv + +out/initrd.img: $(BASE) out/delta.tar.gz $(SRCROOT)/hack/catcpio.sh + $(SRCROOT)/hack/catcpio.sh "$(BASE)" out/delta.tar.gz > out/initrd.img.uncompressed + gzip -c out/initrd.img.uncompressed > $@ + rm out/initrd.img.uncompressed + +-include deps/cmd/gcs.gomake +-include deps/cmd/gcstools.gomake + +# Implicit rule for includes that define Go targets. +%.gomake: $(SRCROOT)/Makefile + @mkdir -p $(dir $@) + @/bin/echo $(@:deps/%.gomake=bin/%): $(SRCROOT)/hack/gomakedeps.sh > $@.new + @/bin/echo -e '\t@mkdir -p $$(dir $$@) $(dir $@)' >> $@.new + @/bin/echo -e '\t$$(GO_BUILD) -o $$@.new $$(SRCROOT)/$$(@:bin/%=%)' >> $@.new + @/bin/echo -e '\tGO="$(GO)" $$(SRCROOT)/hack/gomakedeps.sh $$@ $$(SRCROOT)/$$(@:bin/%=%) $$(GO_FLAGS) $$(GO_FLAGS_EXTRA) > $(@:%.gomake=%.godeps).new' >> $@.new + @/bin/echo -e '\tmv $(@:%.gomake=%.godeps).new $(@:%.gomake=%.godeps)' >> $@.new + @/bin/echo -e '\tmv $$@.new $$@' >> $@.new + @/bin/echo -e '-include $(@:%.gomake=%.godeps)' >> $@.new + mv $@.new $@ + +VPATH=$(SRCROOT) + +bin/vsockexec: vsockexec/vsockexec.o vsockexec/vsock.o + @mkdir -p bin + $(CC) $(LDFLAGS) -o $@ $^ + +bin/init: init/init.o vsockexec/vsock.o + @mkdir -p bin + $(CC) $(LDFLAGS) -o $@ $^ + +%.o: %.c + @mkdir -p $(dir $@) + $(CC) $(CFLAGS) $(CPPFLAGS) -c -o $@ $< diff --git a/vendor/github.com/Microsoft/hcsshim/Protobuild.toml b/vendor/github.com/Microsoft/hcsshim/Protobuild.toml new file mode 100644 index 00000000000..ee18671aa64 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/Protobuild.toml @@ -0,0 +1,49 @@ +version = "unstable" +generator = "gogoctrd" +plugins = ["grpc", "fieldpath"] + +# Control protoc include paths. Below are usually some good defaults, but feel +# free to try it without them if it works for your project. +[includes] + # Include paths that will be added before all others. Typically, you want to + # treat the root of the project as an include, but this may not be necessary. + before = ["./protobuf"] + + # Paths that should be treated as include roots in relation to the vendor + # directory. These will be calculated with the vendor directory nearest the + # target package. + packages = ["github.com/gogo/protobuf"] + + # Paths that will be added untouched to the end of the includes. We use + # `/usr/local/include` to pickup the common install location of protobuf. + # This is the default. + after = ["/usr/local/include"] + +# This section maps protobuf imports to Go packages. These will become +# `-M` directives in the call to the go protobuf generator. +[packages] + "gogoproto/gogo.proto" = "github.com/gogo/protobuf/gogoproto" + "google/protobuf/any.proto" = "github.com/gogo/protobuf/types" + "google/protobuf/empty.proto" = "github.com/gogo/protobuf/types" + "google/protobuf/struct.proto" = "github.com/gogo/protobuf/types" + "google/protobuf/descriptor.proto" = "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + "google/protobuf/field_mask.proto" = "github.com/gogo/protobuf/types" + "google/protobuf/timestamp.proto" = "github.com/gogo/protobuf/types" + "google/protobuf/duration.proto" = "github.com/gogo/protobuf/types" + "github/containerd/cgroups/stats/v1/metrics.proto" = "github.com/containerd/cgroups/stats/v1" + +[[overrides]] +prefixes = ["github.com/Microsoft/hcsshim/internal/shimdiag"] +plugins = ["ttrpc"] + +[[overrides]] +prefixes = ["github.com/Microsoft/hcsshim/internal/computeagent"] +plugins = ["ttrpc"] + +[[overrides]] +prefixes = ["github.com/Microsoft/hcsshim/internal/ncproxyttrpc"] +plugins = ["ttrpc"] + +[[overrides]] +prefixes = ["github.com/Microsoft/hcsshim/internal/vmservice"] +plugins = ["ttrpc"] \ No newline at end of file diff --git a/vendor/github.com/Microsoft/hcsshim/README.md b/vendor/github.com/Microsoft/hcsshim/README.md new file mode 100644 index 00000000000..b8ca926a9da --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/README.md @@ -0,0 +1,120 @@ +# hcsshim + +[![Build status](https://github.com/microsoft/hcsshim/actions/workflows/ci.yml/badge.svg?branch=master)](https://github.com/microsoft/hcsshim/actions?query=branch%3Amaster) + +This package contains the Golang interface for using the Windows [Host Compute Service](https://techcommunity.microsoft.com/t5/containers/introducing-the-host-compute-service-hcs/ba-p/382332) (HCS) to launch and manage [Windows Containers](https://docs.microsoft.com/en-us/virtualization/windowscontainers/about/). It also contains other helpers and functions for managing Windows Containers such as the Golang interface for the Host Network Service (HNS), as well as code for the [guest agent](./internal/guest/README.md) (commonly referred to as the GCS or Guest Compute Service in the codebase) used to support running Linux Hyper-V containers. + +It is primarily used in the [Moby](https://github.com/moby/moby) and [Containerd](https://github.com/containerd/containerd) projects, but it can be freely used by other projects as well. + +## Building + +While this repository can be used as a library of sorts to call the HCS apis, there are a couple binaries built out of the repository as well. The main ones being the Linux guest agent, and an implementation of the [runtime v2 containerd shim api](https://github.com/containerd/containerd/blob/master/runtime/v2/README.md). +### Linux Hyper-V Container Guest Agent + +To build the Linux guest agent itself all that's needed is to set your GOOS to "Linux" and build out of ./cmd/gcs. +```powershell +C:\> $env:GOOS="linux" +C:\> go build .\cmd\gcs\ +``` + +or on a Linux machine +```sh +> go build ./cmd/gcs +``` + +If you want it to be packaged inside of a rootfs to boot with alongside all of the other tools then you'll need to provide a rootfs that it can be packaged inside of. An easy way is to export the rootfs of a container. + +```sh +docker pull busybox +docker run --name base_image_container busybox +docker export base_image_container | gzip > base.tar.gz +BASE=./base.tar.gz +make all +``` + +If the build is successful, in the `./out` folder you should see: +```sh +> ls ./out/ +delta.tar.gz initrd.img rootfs.tar.gz +``` + +### Containerd Shim +For info on the Runtime V2 API: https://github.com/containerd/containerd/blob/master/runtime/v2/README.md. + +Contrary to the typical Linux architecture of shim -> runc, the runhcs shim is used both to launch and manage the lifetime of containers. + +```powershell +C:\> $env:GOOS="windows" +C:\> go build .\cmd\containerd-shim-runhcs-v1 +``` + +Then place the binary in the same directory that Containerd is located at in your environment. A default Containerd configuration file can be generated by running: +```powershell +.\containerd.exe config default | Out-File "C:\Program Files\containerd\config.toml" -Encoding ascii +``` + +This config file will already have the shim set as the default runtime for cri interactions. + +To trial using the shim out with ctr.exe: +```powershell +C:\> ctr.exe run --runtime io.containerd.runhcs.v1 --rm mcr.microsoft.com/windows/nanoserver:2004 windows-test cmd /c "echo Hello World!" +``` + +## Contributing + +This project welcomes contributions and suggestions. Most contributions require you to agree to a +Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us +the rights to use your contribution. For details, visit https://cla.microsoft.com. + +When you submit a pull request, a CLA-bot will automatically determine whether you need to provide +a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions +provided by the bot. You will only need to do this once across all repos using our CLA. + +We also require that contributors [sign their commits](https://git-scm.com/docs/git-commit) using `git commit -s` or `git commit --signoff` to +certify they either authored the work themselves or otherwise have permission to use it in this project. Please see https://developercertificate.org/ for +more info, as well as to make sure that you can attest to the rules listed. Our CI uses the [DCO Github app](https://github.com/apps/dco) to ensure +that all commits in a given PR are signed-off. + +### Test Directory (Important to note) + +This project has tried to trim some dependencies from the root Go modules file that would be cumbersome to get transitively included if this +project is being vendored/used as a library. Some of these dependencies were only being used for tests, so the /test directory in this project also has +its own go.mod file where these are now included to get around this issue. Our tests rely on the code in this project to run, so the test Go modules file +has a relative path replace directive to pull in the latest hcsshim code that the tests actually touch from this project +(which is the repo itself on your disk). + +``` +replace ( + github.com/Microsoft/hcsshim => ../ +) +``` + +Because of this, for most code changes you may need to run `go mod vendor` + `go mod tidy` in the /test directory in this repository, as the +CI in this project will check if the files are out of date and will fail if this is true. + + +## Code of Conduct + +This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). +For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or +contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. + +## Dependencies + +This project requires Golang 1.9 or newer to build. + +For system requirements to run this project, see the Microsoft docs on [Windows Container requirements](https://docs.microsoft.com/en-us/virtualization/windowscontainers/deploy-containers/system-requirements). + +## Reporting Security Issues + +Security issues and bugs should be reported privately, via email, to the Microsoft Security +Response Center (MSRC) at [secure@microsoft.com](mailto:secure@microsoft.com). You should +receive a response within 24 hours. If for some reason you do not, please follow up via +email to ensure we received your original message. Further information, including the +[MSRC PGP](https://technet.microsoft.com/en-us/security/dn606155) key, can be found in +the [Security TechCenter](https://technet.microsoft.com/en-us/security/default). + +For additional details, see [Report a Computer Security Vulnerability](https://technet.microsoft.com/en-us/security/ff852094.aspx) on Technet + +--------------- +Copyright (c) 2018 Microsoft Corp. All rights reserved. diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/attach.go b/vendor/github.com/Microsoft/hcsshim/computestorage/attach.go new file mode 100644 index 00000000000..7f1f2823ddc --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/computestorage/attach.go @@ -0,0 +1,38 @@ +package computestorage + +import ( + "context" + "encoding/json" + + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/pkg/errors" + "go.opencensus.io/trace" +) + +// AttachLayerStorageFilter sets up the layer storage filter on a writable +// container layer. +// +// `layerPath` is a path to a directory the writable layer is mounted. If the +// path does not end in a `\` the platform will append it automatically. +// +// `layerData` is the parent read-only layer data. +func AttachLayerStorageFilter(ctx context.Context, layerPath string, layerData LayerData) (err error) { + title := "hcsshim.AttachLayerStorageFilter" + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("layerPath", layerPath), + ) + + bytes, err := json.Marshal(layerData) + if err != nil { + return err + } + + err = hcsAttachLayerStorageFilter(layerPath, string(bytes)) + if err != nil { + return errors.Wrap(err, "failed to attach layer storage filter") + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/destroy.go b/vendor/github.com/Microsoft/hcsshim/computestorage/destroy.go new file mode 100644 index 00000000000..8e28e6c5046 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/computestorage/destroy.go @@ -0,0 +1,26 @@ +package computestorage + +import ( + "context" + + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/pkg/errors" + "go.opencensus.io/trace" +) + +// DestroyLayer deletes a container layer. +// +// `layerPath` is a path to a directory containing the layer to export. +func DestroyLayer(ctx context.Context, layerPath string) (err error) { + title := "hcsshim.DestroyLayer" + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("layerPath", layerPath)) + + err = hcsDestroyLayer(layerPath) + if err != nil { + return errors.Wrap(err, "failed to destroy layer") + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/detach.go b/vendor/github.com/Microsoft/hcsshim/computestorage/detach.go new file mode 100644 index 00000000000..435473257e3 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/computestorage/detach.go @@ -0,0 +1,26 @@ +package computestorage + +import ( + "context" + + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/pkg/errors" + "go.opencensus.io/trace" +) + +// DetachLayerStorageFilter detaches the layer storage filter on a writable container layer. +// +// `layerPath` is a path to a directory containing the layer to export. +func DetachLayerStorageFilter(ctx context.Context, layerPath string) (err error) { + title := "hcsshim.DetachLayerStorageFilter" + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("layerPath", layerPath)) + + err = hcsDetachLayerStorageFilter(layerPath) + if err != nil { + return errors.Wrap(err, "failed to detach layer storage filter") + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/export.go b/vendor/github.com/Microsoft/hcsshim/computestorage/export.go new file mode 100644 index 00000000000..a1b12dd1292 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/computestorage/export.go @@ -0,0 +1,46 @@ +package computestorage + +import ( + "context" + "encoding/json" + + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/pkg/errors" + "go.opencensus.io/trace" +) + +// ExportLayer exports a container layer. +// +// `layerPath` is a path to a directory containing the layer to export. +// +// `exportFolderPath` is a pre-existing folder to export the layer to. +// +// `layerData` is the parent layer data. +// +// `options` are the export options applied to the exported layer. +func ExportLayer(ctx context.Context, layerPath, exportFolderPath string, layerData LayerData, options ExportLayerOptions) (err error) { + title := "hcsshim.ExportLayer" + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("layerPath", layerPath), + trace.StringAttribute("exportFolderPath", exportFolderPath), + ) + + ldbytes, err := json.Marshal(layerData) + if err != nil { + return err + } + + obytes, err := json.Marshal(options) + if err != nil { + return err + } + + err = hcsExportLayer(layerPath, exportFolderPath, string(ldbytes), string(obytes)) + if err != nil { + return errors.Wrap(err, "failed to export layer") + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/format.go b/vendor/github.com/Microsoft/hcsshim/computestorage/format.go new file mode 100644 index 00000000000..83c0fa33f0f --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/computestorage/format.go @@ -0,0 +1,26 @@ +package computestorage + +import ( + "context" + + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/pkg/errors" + "go.opencensus.io/trace" + "golang.org/x/sys/windows" +) + +// FormatWritableLayerVhd formats a virtual disk for use as a writable container layer. +// +// If the VHD is not mounted it will be temporarily mounted. +func FormatWritableLayerVhd(ctx context.Context, vhdHandle windows.Handle) (err error) { + title := "hcsshim.FormatWritableLayerVhd" + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + + err = hcsFormatWritableLayerVhd(vhdHandle) + if err != nil { + return errors.Wrap(err, "failed to format writable layer vhd") + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/helpers.go b/vendor/github.com/Microsoft/hcsshim/computestorage/helpers.go new file mode 100644 index 00000000000..87fee452cd3 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/computestorage/helpers.go @@ -0,0 +1,193 @@ +package computestorage + +import ( + "context" + "os" + "path/filepath" + "syscall" + + "github.com/Microsoft/go-winio/pkg/security" + "github.com/Microsoft/go-winio/vhd" + "github.com/pkg/errors" + "golang.org/x/sys/windows" +) + +const defaultVHDXBlockSizeInMB = 1 + +// SetupContainerBaseLayer is a helper to setup a containers scratch. It +// will create and format the vhdx's inside and the size is configurable with the sizeInGB +// parameter. +// +// `layerPath` is the path to the base container layer on disk. +// +// `baseVhdPath` is the path to where the base vhdx for the base layer should be created. +// +// `diffVhdPath` is the path where the differencing disk for the base layer should be created. +// +// `sizeInGB` is the size in gigabytes to make the base vhdx. +func SetupContainerBaseLayer(ctx context.Context, layerPath, baseVhdPath, diffVhdPath string, sizeInGB uint64) (err error) { + var ( + hivesPath = filepath.Join(layerPath, "Hives") + layoutPath = filepath.Join(layerPath, "Layout") + ) + + // We need to remove the hives directory and layout file as `SetupBaseOSLayer` fails if these files + // already exist. `SetupBaseOSLayer` will create these files internally. We also remove the base and + // differencing disks if they exist in case we're asking for a different size. + if _, err := os.Stat(hivesPath); err == nil { + if err := os.RemoveAll(hivesPath); err != nil { + return errors.Wrap(err, "failed to remove prexisting hives directory") + } + } + if _, err := os.Stat(layoutPath); err == nil { + if err := os.RemoveAll(layoutPath); err != nil { + return errors.Wrap(err, "failed to remove prexisting layout file") + } + } + + if _, err := os.Stat(baseVhdPath); err == nil { + if err := os.RemoveAll(baseVhdPath); err != nil { + return errors.Wrap(err, "failed to remove base vhdx path") + } + } + if _, err := os.Stat(diffVhdPath); err == nil { + if err := os.RemoveAll(diffVhdPath); err != nil { + return errors.Wrap(err, "failed to remove differencing vhdx") + } + } + + createParams := &vhd.CreateVirtualDiskParameters{ + Version: 2, + Version2: vhd.CreateVersion2{ + MaximumSize: sizeInGB * 1024 * 1024 * 1024, + BlockSizeInBytes: defaultVHDXBlockSizeInMB * 1024 * 1024, + }, + } + handle, err := vhd.CreateVirtualDisk(baseVhdPath, vhd.VirtualDiskAccessNone, vhd.CreateVirtualDiskFlagNone, createParams) + if err != nil { + return errors.Wrap(err, "failed to create vhdx") + } + + defer func() { + if err != nil { + _ = syscall.CloseHandle(handle) + os.RemoveAll(baseVhdPath) + os.RemoveAll(diffVhdPath) + } + }() + + if err = FormatWritableLayerVhd(ctx, windows.Handle(handle)); err != nil { + return err + } + // Base vhd handle must be closed before calling SetupBaseLayer in case of Container layer + if err = syscall.CloseHandle(handle); err != nil { + return errors.Wrap(err, "failed to close vhdx handle") + } + + options := OsLayerOptions{ + Type: OsLayerTypeContainer, + } + + // SetupBaseOSLayer expects an empty vhd handle for a container layer and will + // error out otherwise. + if err = SetupBaseOSLayer(ctx, layerPath, 0, options); err != nil { + return err + } + // Create the differencing disk that will be what's copied for the final rw layer + // for a container. + if err = vhd.CreateDiffVhd(diffVhdPath, baseVhdPath, defaultVHDXBlockSizeInMB); err != nil { + return errors.Wrap(err, "failed to create differencing disk") + } + + if err = security.GrantVmGroupAccess(baseVhdPath); err != nil { + return errors.Wrapf(err, "failed to grant vm group access to %s", baseVhdPath) + } + if err = security.GrantVmGroupAccess(diffVhdPath); err != nil { + return errors.Wrapf(err, "failed to grant vm group access to %s", diffVhdPath) + } + return nil +} + +// SetupUtilityVMBaseLayer is a helper to setup a UVMs scratch space. It will create and format +// the vhdx inside and the size is configurable by the sizeInGB parameter. +// +// `uvmPath` is the path to the UtilityVM filesystem. +// +// `baseVhdPath` is the path to where the base vhdx for the UVM should be created. +// +// `diffVhdPath` is the path where the differencing disk for the UVM should be created. +// +// `sizeInGB` specifies the size in gigabytes to make the base vhdx. +func SetupUtilityVMBaseLayer(ctx context.Context, uvmPath, baseVhdPath, diffVhdPath string, sizeInGB uint64) (err error) { + // Remove the base and differencing disks if they exist in case we're asking for a different size. + if _, err := os.Stat(baseVhdPath); err == nil { + if err := os.RemoveAll(baseVhdPath); err != nil { + return errors.Wrap(err, "failed to remove base vhdx") + } + } + if _, err := os.Stat(diffVhdPath); err == nil { + if err := os.RemoveAll(diffVhdPath); err != nil { + return errors.Wrap(err, "failed to remove differencing vhdx") + } + } + + // Just create the vhdx for utilityVM layer, no need to format it. + createParams := &vhd.CreateVirtualDiskParameters{ + Version: 2, + Version2: vhd.CreateVersion2{ + MaximumSize: sizeInGB * 1024 * 1024 * 1024, + BlockSizeInBytes: defaultVHDXBlockSizeInMB * 1024 * 1024, + }, + } + handle, err := vhd.CreateVirtualDisk(baseVhdPath, vhd.VirtualDiskAccessNone, vhd.CreateVirtualDiskFlagNone, createParams) + if err != nil { + return errors.Wrap(err, "failed to create vhdx") + } + + defer func() { + if err != nil { + _ = syscall.CloseHandle(handle) + os.RemoveAll(baseVhdPath) + os.RemoveAll(diffVhdPath) + } + }() + + // If it is a UtilityVM layer then the base vhdx must be attached when calling + // `SetupBaseOSLayer` + attachParams := &vhd.AttachVirtualDiskParameters{ + Version: 2, + } + if err := vhd.AttachVirtualDisk(handle, vhd.AttachVirtualDiskFlagNone, attachParams); err != nil { + return errors.Wrapf(err, "failed to attach virtual disk") + } + + options := OsLayerOptions{ + Type: OsLayerTypeVM, + } + if err := SetupBaseOSLayer(ctx, uvmPath, windows.Handle(handle), options); err != nil { + return err + } + + // Detach and close the handle after setting up the layer as we don't need the handle + // for anything else and we no longer need to be attached either. + if err = vhd.DetachVirtualDisk(handle); err != nil { + return errors.Wrap(err, "failed to detach vhdx") + } + if err = syscall.CloseHandle(handle); err != nil { + return errors.Wrap(err, "failed to close vhdx handle") + } + + // Create the differencing disk that will be what's copied for the final rw layer + // for a container. + if err = vhd.CreateDiffVhd(diffVhdPath, baseVhdPath, defaultVHDXBlockSizeInMB); err != nil { + return errors.Wrap(err, "failed to create differencing disk") + } + + if err := security.GrantVmGroupAccess(baseVhdPath); err != nil { + return errors.Wrapf(err, "failed to grant vm group access to %s", baseVhdPath) + } + if err := security.GrantVmGroupAccess(diffVhdPath); err != nil { + return errors.Wrapf(err, "failed to grant vm group access to %s", diffVhdPath) + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/import.go b/vendor/github.com/Microsoft/hcsshim/computestorage/import.go new file mode 100644 index 00000000000..0c61dab3291 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/computestorage/import.go @@ -0,0 +1,41 @@ +package computestorage + +import ( + "context" + "encoding/json" + + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/pkg/errors" + "go.opencensus.io/trace" +) + +// ImportLayer imports a container layer. +// +// `layerPath` is a path to a directory to import the layer to. If the directory +// does not exist it will be automatically created. +// +// `sourceFolderpath` is a pre-existing folder that contains the layer to +// import. +// +// `layerData` is the parent layer data. +func ImportLayer(ctx context.Context, layerPath, sourceFolderPath string, layerData LayerData) (err error) { + title := "hcsshim.ImportLayer" + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("layerPath", layerPath), + trace.StringAttribute("sourceFolderPath", sourceFolderPath), + ) + + bytes, err := json.Marshal(layerData) + if err != nil { + return err + } + + err = hcsImportLayer(layerPath, sourceFolderPath, string(bytes)) + if err != nil { + return errors.Wrap(err, "failed to import layer") + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/initialize.go b/vendor/github.com/Microsoft/hcsshim/computestorage/initialize.go new file mode 100644 index 00000000000..53ed8ea6eda --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/computestorage/initialize.go @@ -0,0 +1,38 @@ +package computestorage + +import ( + "context" + "encoding/json" + + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/pkg/errors" + "go.opencensus.io/trace" +) + +// InitializeWritableLayer initializes a writable layer for a container. +// +// `layerPath` is a path to a directory the layer is mounted. If the +// path does not end in a `\` the platform will append it automatically. +// +// `layerData` is the parent read-only layer data. +func InitializeWritableLayer(ctx context.Context, layerPath string, layerData LayerData) (err error) { + title := "hcsshim.InitializeWritableLayer" + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("layerPath", layerPath), + ) + + bytes, err := json.Marshal(layerData) + if err != nil { + return err + } + + // Options are not used in the platform as of RS5 + err = hcsInitializeWritableLayer(layerPath, string(bytes), "") + if err != nil { + return errors.Wrap(err, "failed to intitialize container layer") + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/mount.go b/vendor/github.com/Microsoft/hcsshim/computestorage/mount.go new file mode 100644 index 00000000000..fcdbbef8143 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/computestorage/mount.go @@ -0,0 +1,27 @@ +package computestorage + +import ( + "context" + + "github.com/Microsoft/hcsshim/internal/interop" + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/pkg/errors" + "go.opencensus.io/trace" + "golang.org/x/sys/windows" +) + +// GetLayerVhdMountPath returns the volume path for a virtual disk of a writable container layer. +func GetLayerVhdMountPath(ctx context.Context, vhdHandle windows.Handle) (path string, err error) { + title := "hcsshim.GetLayerVhdMountPath" + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + + var mountPath *uint16 + err = hcsGetLayerVhdMountPath(vhdHandle, &mountPath) + if err != nil { + return "", errors.Wrap(err, "failed to get vhd mount path") + } + path = interop.ConvertAndFreeCoTaskMemString(mountPath) + return path, nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/setup.go b/vendor/github.com/Microsoft/hcsshim/computestorage/setup.go new file mode 100644 index 00000000000..06aaf841e8f --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/computestorage/setup.go @@ -0,0 +1,74 @@ +package computestorage + +import ( + "context" + "encoding/json" + + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/Microsoft/hcsshim/osversion" + "github.com/pkg/errors" + "go.opencensus.io/trace" + "golang.org/x/sys/windows" +) + +// SetupBaseOSLayer sets up a layer that contains a base OS for a container. +// +// `layerPath` is a path to a directory containing the layer. +// +// `vhdHandle` is an empty file handle of `options.Type == OsLayerTypeContainer` +// or else it is a file handle to the 'SystemTemplateBase.vhdx' if `options.Type +// == OsLayerTypeVm`. +// +// `options` are the options applied while processing the layer. +func SetupBaseOSLayer(ctx context.Context, layerPath string, vhdHandle windows.Handle, options OsLayerOptions) (err error) { + title := "hcsshim.SetupBaseOSLayer" + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("layerPath", layerPath), + ) + + bytes, err := json.Marshal(options) + if err != nil { + return err + } + + err = hcsSetupBaseOSLayer(layerPath, vhdHandle, string(bytes)) + if err != nil { + return errors.Wrap(err, "failed to setup base OS layer") + } + return nil +} + +// SetupBaseOSVolume sets up a volume that contains a base OS for a container. +// +// `layerPath` is a path to a directory containing the layer. +// +// `volumePath` is the path to the volume to be used for setup. +// +// `options` are the options applied while processing the layer. +func SetupBaseOSVolume(ctx context.Context, layerPath, volumePath string, options OsLayerOptions) (err error) { + if osversion.Build() < 19645 { + return errors.New("SetupBaseOSVolume is not present on builds older than 19645") + } + title := "hcsshim.SetupBaseOSVolume" + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("layerPath", layerPath), + trace.StringAttribute("volumePath", volumePath), + ) + + bytes, err := json.Marshal(options) + if err != nil { + return err + } + + err = hcsSetupBaseOSVolume(layerPath, volumePath, string(bytes)) + if err != nil { + return errors.Wrap(err, "failed to setup base OS layer") + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/storage.go b/vendor/github.com/Microsoft/hcsshim/computestorage/storage.go new file mode 100644 index 00000000000..95aff9c1848 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/computestorage/storage.go @@ -0,0 +1,50 @@ +// Package computestorage is a wrapper around the HCS storage APIs. These are new storage APIs introduced +// separate from the original graphdriver calls intended to give more freedom around creating +// and managing container layers and scratch spaces. +package computestorage + +import ( + hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" +) + +//go:generate go run ../mksyscall_windows.go -output zsyscall_windows.go storage.go + +//sys hcsImportLayer(layerPath string, sourceFolderPath string, layerData string) (hr error) = computestorage.HcsImportLayer? +//sys hcsExportLayer(layerPath string, exportFolderPath string, layerData string, options string) (hr error) = computestorage.HcsExportLayer? +//sys hcsDestroyLayer(layerPath string) (hr error) = computestorage.HcsDestoryLayer? +//sys hcsSetupBaseOSLayer(layerPath string, handle windows.Handle, options string) (hr error) = computestorage.HcsSetupBaseOSLayer? +//sys hcsInitializeWritableLayer(writableLayerPath string, layerData string, options string) (hr error) = computestorage.HcsInitializeWritableLayer? +//sys hcsAttachLayerStorageFilter(layerPath string, layerData string) (hr error) = computestorage.HcsAttachLayerStorageFilter? +//sys hcsDetachLayerStorageFilter(layerPath string) (hr error) = computestorage.HcsDetachLayerStorageFilter? +//sys hcsFormatWritableLayerVhd(handle windows.Handle) (hr error) = computestorage.HcsFormatWritableLayerVhd? +//sys hcsGetLayerVhdMountPath(vhdHandle windows.Handle, mountPath **uint16) (hr error) = computestorage.HcsGetLayerVhdMountPath? +//sys hcsSetupBaseOSVolume(layerPath string, volumePath string, options string) (hr error) = computestorage.HcsSetupBaseOSVolume? + +// LayerData is the data used to describe parent layer information. +type LayerData struct { + SchemaVersion hcsschema.Version `json:"SchemaVersion,omitempty"` + Layers []hcsschema.Layer `json:"Layers,omitempty"` +} + +// ExportLayerOptions are the set of options that are used with the `computestorage.HcsExportLayer` syscall. +type ExportLayerOptions struct { + IsWritableLayer bool `json:"IsWritableLayer,omitempty"` +} + +// OsLayerType is the type of layer being operated on. +type OsLayerType string + +const ( + // OsLayerTypeContainer is a container layer. + OsLayerTypeContainer OsLayerType = "Container" + // OsLayerTypeVM is a virtual machine layer. + OsLayerTypeVM OsLayerType = "Vm" +) + +// OsLayerOptions are the set of options that are used with the `SetupBaseOSLayer` and +// `SetupBaseOSVolume` calls. +type OsLayerOptions struct { + Type OsLayerType `json:"Type,omitempty"` + DisableCiCacheOptimization bool `json:"DisableCiCacheOptimization,omitempty"` + SkipUpdateBcdForBoot bool `json:"SkipUpdateBcdForBoot,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/computestorage/zsyscall_windows.go new file mode 100644 index 00000000000..4f951806743 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/computestorage/zsyscall_windows.go @@ -0,0 +1,319 @@ +// Code generated mksyscall_windows.exe DO NOT EDIT + +package computestorage + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return nil + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modcomputestorage = windows.NewLazySystemDLL("computestorage.dll") + + procHcsImportLayer = modcomputestorage.NewProc("HcsImportLayer") + procHcsExportLayer = modcomputestorage.NewProc("HcsExportLayer") + procHcsDestoryLayer = modcomputestorage.NewProc("HcsDestoryLayer") + procHcsSetupBaseOSLayer = modcomputestorage.NewProc("HcsSetupBaseOSLayer") + procHcsInitializeWritableLayer = modcomputestorage.NewProc("HcsInitializeWritableLayer") + procHcsAttachLayerStorageFilter = modcomputestorage.NewProc("HcsAttachLayerStorageFilter") + procHcsDetachLayerStorageFilter = modcomputestorage.NewProc("HcsDetachLayerStorageFilter") + procHcsFormatWritableLayerVhd = modcomputestorage.NewProc("HcsFormatWritableLayerVhd") + procHcsGetLayerVhdMountPath = modcomputestorage.NewProc("HcsGetLayerVhdMountPath") + procHcsSetupBaseOSVolume = modcomputestorage.NewProc("HcsSetupBaseOSVolume") +) + +func hcsImportLayer(layerPath string, sourceFolderPath string, layerData string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(layerPath) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(sourceFolderPath) + if hr != nil { + return + } + var _p2 *uint16 + _p2, hr = syscall.UTF16PtrFromString(layerData) + if hr != nil { + return + } + return _hcsImportLayer(_p0, _p1, _p2) +} + +func _hcsImportLayer(layerPath *uint16, sourceFolderPath *uint16, layerData *uint16) (hr error) { + if hr = procHcsImportLayer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsImportLayer.Addr(), 3, uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(sourceFolderPath)), uintptr(unsafe.Pointer(layerData))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsExportLayer(layerPath string, exportFolderPath string, layerData string, options string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(layerPath) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(exportFolderPath) + if hr != nil { + return + } + var _p2 *uint16 + _p2, hr = syscall.UTF16PtrFromString(layerData) + if hr != nil { + return + } + var _p3 *uint16 + _p3, hr = syscall.UTF16PtrFromString(options) + if hr != nil { + return + } + return _hcsExportLayer(_p0, _p1, _p2, _p3) +} + +func _hcsExportLayer(layerPath *uint16, exportFolderPath *uint16, layerData *uint16, options *uint16) (hr error) { + if hr = procHcsExportLayer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHcsExportLayer.Addr(), 4, uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(exportFolderPath)), uintptr(unsafe.Pointer(layerData)), uintptr(unsafe.Pointer(options)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsDestroyLayer(layerPath string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(layerPath) + if hr != nil { + return + } + return _hcsDestroyLayer(_p0) +} + +func _hcsDestroyLayer(layerPath *uint16) (hr error) { + if hr = procHcsDestoryLayer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsDestoryLayer.Addr(), 1, uintptr(unsafe.Pointer(layerPath)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsSetupBaseOSLayer(layerPath string, handle windows.Handle, options string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(layerPath) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(options) + if hr != nil { + return + } + return _hcsSetupBaseOSLayer(_p0, handle, _p1) +} + +func _hcsSetupBaseOSLayer(layerPath *uint16, handle windows.Handle, options *uint16) (hr error) { + if hr = procHcsSetupBaseOSLayer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsSetupBaseOSLayer.Addr(), 3, uintptr(unsafe.Pointer(layerPath)), uintptr(handle), uintptr(unsafe.Pointer(options))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsInitializeWritableLayer(writableLayerPath string, layerData string, options string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(writableLayerPath) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(layerData) + if hr != nil { + return + } + var _p2 *uint16 + _p2, hr = syscall.UTF16PtrFromString(options) + if hr != nil { + return + } + return _hcsInitializeWritableLayer(_p0, _p1, _p2) +} + +func _hcsInitializeWritableLayer(writableLayerPath *uint16, layerData *uint16, options *uint16) (hr error) { + if hr = procHcsInitializeWritableLayer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsInitializeWritableLayer.Addr(), 3, uintptr(unsafe.Pointer(writableLayerPath)), uintptr(unsafe.Pointer(layerData)), uintptr(unsafe.Pointer(options))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsAttachLayerStorageFilter(layerPath string, layerData string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(layerPath) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(layerData) + if hr != nil { + return + } + return _hcsAttachLayerStorageFilter(_p0, _p1) +} + +func _hcsAttachLayerStorageFilter(layerPath *uint16, layerData *uint16) (hr error) { + if hr = procHcsAttachLayerStorageFilter.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsAttachLayerStorageFilter.Addr(), 2, uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(layerData)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsDetachLayerStorageFilter(layerPath string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(layerPath) + if hr != nil { + return + } + return _hcsDetachLayerStorageFilter(_p0) +} + +func _hcsDetachLayerStorageFilter(layerPath *uint16) (hr error) { + if hr = procHcsDetachLayerStorageFilter.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsDetachLayerStorageFilter.Addr(), 1, uintptr(unsafe.Pointer(layerPath)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsFormatWritableLayerVhd(handle windows.Handle) (hr error) { + if hr = procHcsFormatWritableLayerVhd.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsFormatWritableLayerVhd.Addr(), 1, uintptr(handle), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsGetLayerVhdMountPath(vhdHandle windows.Handle, mountPath **uint16) (hr error) { + if hr = procHcsGetLayerVhdMountPath.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsGetLayerVhdMountPath.Addr(), 2, uintptr(vhdHandle), uintptr(unsafe.Pointer(mountPath)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsSetupBaseOSVolume(layerPath string, volumePath string, options string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(layerPath) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(volumePath) + if hr != nil { + return + } + var _p2 *uint16 + _p2, hr = syscall.UTF16PtrFromString(options) + if hr != nil { + return + } + return _hcsSetupBaseOSVolume(_p0, _p1, _p2) +} + +func _hcsSetupBaseOSVolume(layerPath *uint16, volumePath *uint16, options *uint16) (hr error) { + if hr = procHcsSetupBaseOSVolume.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsSetupBaseOSVolume.Addr(), 3, uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(volumePath)), uintptr(unsafe.Pointer(options))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} diff --git a/vendor/github.com/Microsoft/hcsshim/container.go b/vendor/github.com/Microsoft/hcsshim/container.go new file mode 100644 index 00000000000..bfd722898e9 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/container.go @@ -0,0 +1,223 @@ +package hcsshim + +import ( + "context" + "fmt" + "os" + "sync" + "time" + + "github.com/Microsoft/hcsshim/internal/hcs" + "github.com/Microsoft/hcsshim/internal/hcs/schema1" + "github.com/Microsoft/hcsshim/internal/mergemaps" +) + +// ContainerProperties holds the properties for a container and the processes running in that container +type ContainerProperties = schema1.ContainerProperties + +// MemoryStats holds the memory statistics for a container +type MemoryStats = schema1.MemoryStats + +// ProcessorStats holds the processor statistics for a container +type ProcessorStats = schema1.ProcessorStats + +// StorageStats holds the storage statistics for a container +type StorageStats = schema1.StorageStats + +// NetworkStats holds the network statistics for a container +type NetworkStats = schema1.NetworkStats + +// Statistics is the structure returned by a statistics call on a container +type Statistics = schema1.Statistics + +// ProcessList is the structure of an item returned by a ProcessList call on a container +type ProcessListItem = schema1.ProcessListItem + +// MappedVirtualDiskController is the structure of an item returned by a MappedVirtualDiskList call on a container +type MappedVirtualDiskController = schema1.MappedVirtualDiskController + +// Type of Request Support in ModifySystem +type RequestType = schema1.RequestType + +// Type of Resource Support in ModifySystem +type ResourceType = schema1.ResourceType + +// RequestType const +const ( + Add = schema1.Add + Remove = schema1.Remove + Network = schema1.Network +) + +// ResourceModificationRequestResponse is the structure used to send request to the container to modify the system +// Supported resource types are Network and Request Types are Add/Remove +type ResourceModificationRequestResponse = schema1.ResourceModificationRequestResponse + +type container struct { + system *hcs.System + waitOnce sync.Once + waitErr error + waitCh chan struct{} +} + +// createComputeSystemAdditionalJSON is read from the environment at initialisation +// time. It allows an environment variable to define additional JSON which +// is merged in the CreateComputeSystem call to HCS. +var createContainerAdditionalJSON []byte + +func init() { + createContainerAdditionalJSON = ([]byte)(os.Getenv("HCSSHIM_CREATECONTAINER_ADDITIONALJSON")) +} + +// CreateContainer creates a new container with the given configuration but does not start it. +func CreateContainer(id string, c *ContainerConfig) (Container, error) { + fullConfig, err := mergemaps.MergeJSON(c, createContainerAdditionalJSON) + if err != nil { + return nil, fmt.Errorf("failed to merge additional JSON '%s': %s", createContainerAdditionalJSON, err) + } + + system, err := hcs.CreateComputeSystem(context.Background(), id, fullConfig) + if err != nil { + return nil, err + } + return &container{system: system}, err +} + +// OpenContainer opens an existing container by ID. +func OpenContainer(id string) (Container, error) { + system, err := hcs.OpenComputeSystem(context.Background(), id) + if err != nil { + return nil, err + } + return &container{system: system}, err +} + +// GetContainers gets a list of the containers on the system that match the query +func GetContainers(q ComputeSystemQuery) ([]ContainerProperties, error) { + return hcs.GetComputeSystems(context.Background(), q) +} + +// Start synchronously starts the container. +func (container *container) Start() error { + return convertSystemError(container.system.Start(context.Background()), container) +} + +// Shutdown requests a container shutdown, but it may not actually be shutdown until Wait() succeeds. +func (container *container) Shutdown() error { + err := container.system.Shutdown(context.Background()) + if err != nil { + return convertSystemError(err, container) + } + return &ContainerError{Container: container, Err: ErrVmcomputeOperationPending, Operation: "hcsshim::ComputeSystem::Shutdown"} +} + +// Terminate requests a container terminate, but it may not actually be terminated until Wait() succeeds. +func (container *container) Terminate() error { + err := container.system.Terminate(context.Background()) + if err != nil { + return convertSystemError(err, container) + } + return &ContainerError{Container: container, Err: ErrVmcomputeOperationPending, Operation: "hcsshim::ComputeSystem::Terminate"} +} + +// Waits synchronously waits for the container to shutdown or terminate. +func (container *container) Wait() error { + err := container.system.Wait() + if err == nil { + err = container.system.ExitError() + } + return convertSystemError(err, container) +} + +// WaitTimeout synchronously waits for the container to terminate or the duration to elapse. It +// returns false if timeout occurs. +func (container *container) WaitTimeout(timeout time.Duration) error { + container.waitOnce.Do(func() { + container.waitCh = make(chan struct{}) + go func() { + container.waitErr = container.Wait() + close(container.waitCh) + }() + }) + t := time.NewTimer(timeout) + defer t.Stop() + select { + case <-t.C: + return &ContainerError{Container: container, Err: ErrTimeout, Operation: "hcsshim::ComputeSystem::Wait"} + case <-container.waitCh: + return container.waitErr + } +} + +// Pause pauses the execution of a container. +func (container *container) Pause() error { + return convertSystemError(container.system.Pause(context.Background()), container) +} + +// Resume resumes the execution of a container. +func (container *container) Resume() error { + return convertSystemError(container.system.Resume(context.Background()), container) +} + +// HasPendingUpdates returns true if the container has updates pending to install +func (container *container) HasPendingUpdates() (bool, error) { + return false, nil +} + +// Statistics returns statistics for the container. This is a legacy v1 call +func (container *container) Statistics() (Statistics, error) { + properties, err := container.system.Properties(context.Background(), schema1.PropertyTypeStatistics) + if err != nil { + return Statistics{}, convertSystemError(err, container) + } + + return properties.Statistics, nil +} + +// ProcessList returns an array of ProcessListItems for the container. This is a legacy v1 call +func (container *container) ProcessList() ([]ProcessListItem, error) { + properties, err := container.system.Properties(context.Background(), schema1.PropertyTypeProcessList) + if err != nil { + return nil, convertSystemError(err, container) + } + + return properties.ProcessList, nil +} + +// This is a legacy v1 call +func (container *container) MappedVirtualDisks() (map[int]MappedVirtualDiskController, error) { + properties, err := container.system.Properties(context.Background(), schema1.PropertyTypeMappedVirtualDisk) + if err != nil { + return nil, convertSystemError(err, container) + } + + return properties.MappedVirtualDiskControllers, nil +} + +// CreateProcess launches a new process within the container. +func (container *container) CreateProcess(c *ProcessConfig) (Process, error) { + p, err := container.system.CreateProcess(context.Background(), c) + if err != nil { + return nil, convertSystemError(err, container) + } + return &process{p: p.(*hcs.Process)}, nil +} + +// OpenProcess gets an interface to an existing process within the container. +func (container *container) OpenProcess(pid int) (Process, error) { + p, err := container.system.OpenProcess(context.Background(), pid) + if err != nil { + return nil, convertSystemError(err, container) + } + return &process{p: p}, nil +} + +// Close cleans up any state associated with the container but does not terminate or wait for it. +func (container *container) Close() error { + return convertSystemError(container.system.Close(), container) +} + +// Modify the System +func (container *container) Modify(config *ResourceModificationRequestResponse) error { + return convertSystemError(container.system.Modify(context.Background(), config), container) +} diff --git a/vendor/github.com/Microsoft/hcsshim/errors.go b/vendor/github.com/Microsoft/hcsshim/errors.go new file mode 100644 index 00000000000..f367022e712 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/errors.go @@ -0,0 +1,245 @@ +package hcsshim + +import ( + "fmt" + "syscall" + + "github.com/Microsoft/hcsshim/internal/hns" + + "github.com/Microsoft/hcsshim/internal/hcs" + "github.com/Microsoft/hcsshim/internal/hcserror" +) + +var ( + // ErrComputeSystemDoesNotExist is an error encountered when the container being operated on no longer exists = hcs.exist + ErrComputeSystemDoesNotExist = hcs.ErrComputeSystemDoesNotExist + + // ErrElementNotFound is an error encountered when the object being referenced does not exist + ErrElementNotFound = hcs.ErrElementNotFound + + // ErrElementNotFound is an error encountered when the object being referenced does not exist + ErrNotSupported = hcs.ErrNotSupported + + // ErrInvalidData is an error encountered when the request being sent to hcs is invalid/unsupported + // decimal -2147024883 / hex 0x8007000d + ErrInvalidData = hcs.ErrInvalidData + + // ErrHandleClose is an error encountered when the handle generating the notification being waited on has been closed + ErrHandleClose = hcs.ErrHandleClose + + // ErrAlreadyClosed is an error encountered when using a handle that has been closed by the Close method + ErrAlreadyClosed = hcs.ErrAlreadyClosed + + // ErrInvalidNotificationType is an error encountered when an invalid notification type is used + ErrInvalidNotificationType = hcs.ErrInvalidNotificationType + + // ErrInvalidProcessState is an error encountered when the process is not in a valid state for the requested operation + ErrInvalidProcessState = hcs.ErrInvalidProcessState + + // ErrTimeout is an error encountered when waiting on a notification times out + ErrTimeout = hcs.ErrTimeout + + // ErrUnexpectedContainerExit is the error encountered when a container exits while waiting for + // a different expected notification + ErrUnexpectedContainerExit = hcs.ErrUnexpectedContainerExit + + // ErrUnexpectedProcessAbort is the error encountered when communication with the compute service + // is lost while waiting for a notification + ErrUnexpectedProcessAbort = hcs.ErrUnexpectedProcessAbort + + // ErrUnexpectedValue is an error encountered when hcs returns an invalid value + ErrUnexpectedValue = hcs.ErrUnexpectedValue + + // ErrVmcomputeAlreadyStopped is an error encountered when a shutdown or terminate request is made on a stopped container + ErrVmcomputeAlreadyStopped = hcs.ErrVmcomputeAlreadyStopped + + // ErrVmcomputeOperationPending is an error encountered when the operation is being completed asynchronously + ErrVmcomputeOperationPending = hcs.ErrVmcomputeOperationPending + + // ErrVmcomputeOperationInvalidState is an error encountered when the compute system is not in a valid state for the requested operation + ErrVmcomputeOperationInvalidState = hcs.ErrVmcomputeOperationInvalidState + + // ErrProcNotFound is an error encountered when a procedure look up fails. + ErrProcNotFound = hcs.ErrProcNotFound + + // ErrVmcomputeOperationAccessIsDenied is an error which can be encountered when enumerating compute systems in RS1/RS2 + // builds when the underlying silo might be in the process of terminating. HCS was fixed in RS3. + ErrVmcomputeOperationAccessIsDenied = hcs.ErrVmcomputeOperationAccessIsDenied + + // ErrVmcomputeInvalidJSON is an error encountered when the compute system does not support/understand the messages sent by management + ErrVmcomputeInvalidJSON = hcs.ErrVmcomputeInvalidJSON + + // ErrVmcomputeUnknownMessage is an error encountered guest compute system doesn't support the message + ErrVmcomputeUnknownMessage = hcs.ErrVmcomputeUnknownMessage + + // ErrNotSupported is an error encountered when hcs doesn't support the request + ErrPlatformNotSupported = hcs.ErrPlatformNotSupported +) + +type EndpointNotFoundError = hns.EndpointNotFoundError +type NetworkNotFoundError = hns.NetworkNotFoundError + +// ProcessError is an error encountered in HCS during an operation on a Process object +type ProcessError struct { + Process *process + Operation string + Err error + Events []hcs.ErrorEvent +} + +// ContainerError is an error encountered in HCS during an operation on a Container object +type ContainerError struct { + Container *container + Operation string + Err error + Events []hcs.ErrorEvent +} + +func (e *ContainerError) Error() string { + if e == nil { + return "" + } + + if e.Container == nil { + return "unexpected nil container for error: " + e.Err.Error() + } + + s := "container " + e.Container.system.ID() + + if e.Operation != "" { + s += " encountered an error during " + e.Operation + } + + switch e.Err.(type) { + case nil: + break + case syscall.Errno: + s += fmt.Sprintf(": failure in a Windows system call: %s (0x%x)", e.Err, hcserror.Win32FromError(e.Err)) + default: + s += fmt.Sprintf(": %s", e.Err.Error()) + } + + for _, ev := range e.Events { + s += "\n" + ev.String() + } + + return s +} + +func (e *ProcessError) Error() string { + if e == nil { + return "" + } + + if e.Process == nil { + return "Unexpected nil process for error: " + e.Err.Error() + } + + s := fmt.Sprintf("process %d in container %s", e.Process.p.Pid(), e.Process.p.SystemID()) + if e.Operation != "" { + s += " encountered an error during " + e.Operation + } + + switch e.Err.(type) { + case nil: + break + case syscall.Errno: + s += fmt.Sprintf(": failure in a Windows system call: %s (0x%x)", e.Err, hcserror.Win32FromError(e.Err)) + default: + s += fmt.Sprintf(": %s", e.Err.Error()) + } + + for _, ev := range e.Events { + s += "\n" + ev.String() + } + + return s +} + +// IsNotExist checks if an error is caused by the Container or Process not existing. +// Note: Currently, ErrElementNotFound can mean that a Process has either +// already exited, or does not exist. Both IsAlreadyStopped and IsNotExist +// will currently return true when the error is ErrElementNotFound. +func IsNotExist(err error) bool { + if _, ok := err.(EndpointNotFoundError); ok { + return true + } + if _, ok := err.(NetworkNotFoundError); ok { + return true + } + return hcs.IsNotExist(getInnerError(err)) +} + +// IsAlreadyClosed checks if an error is caused by the Container or Process having been +// already closed by a call to the Close() method. +func IsAlreadyClosed(err error) bool { + return hcs.IsAlreadyClosed(getInnerError(err)) +} + +// IsPending returns a boolean indicating whether the error is that +// the requested operation is being completed in the background. +func IsPending(err error) bool { + return hcs.IsPending(getInnerError(err)) +} + +// IsTimeout returns a boolean indicating whether the error is caused by +// a timeout waiting for the operation to complete. +func IsTimeout(err error) bool { + return hcs.IsTimeout(getInnerError(err)) +} + +// IsAlreadyStopped returns a boolean indicating whether the error is caused by +// a Container or Process being already stopped. +// Note: Currently, ErrElementNotFound can mean that a Process has either +// already exited, or does not exist. Both IsAlreadyStopped and IsNotExist +// will currently return true when the error is ErrElementNotFound. +func IsAlreadyStopped(err error) bool { + return hcs.IsAlreadyStopped(getInnerError(err)) +} + +// IsNotSupported returns a boolean indicating whether the error is caused by +// unsupported platform requests +// Note: Currently Unsupported platform requests can be mean either +// ErrVmcomputeInvalidJSON, ErrInvalidData, ErrNotSupported or ErrVmcomputeUnknownMessage +// is thrown from the Platform +func IsNotSupported(err error) bool { + return hcs.IsNotSupported(getInnerError(err)) +} + +// IsOperationInvalidState returns true when err is caused by +// `ErrVmcomputeOperationInvalidState`. +func IsOperationInvalidState(err error) bool { + return hcs.IsOperationInvalidState(getInnerError(err)) +} + +// IsAccessIsDenied returns true when err is caused by +// `ErrVmcomputeOperationAccessIsDenied`. +func IsAccessIsDenied(err error) bool { + return hcs.IsAccessIsDenied(getInnerError(err)) +} + +func getInnerError(err error) error { + switch pe := err.(type) { + case nil: + return nil + case *ContainerError: + err = pe.Err + case *ProcessError: + err = pe.Err + } + return err +} + +func convertSystemError(err error, c *container) error { + if serr, ok := err.(*hcs.SystemError); ok { + return &ContainerError{Container: c, Operation: serr.Op, Err: serr.Err, Events: serr.Events} + } + return err +} + +func convertProcessError(err error, p *process) error { + if perr, ok := err.(*hcs.ProcessError); ok { + return &ProcessError{Process: p, Operation: perr.Op, Err: perr.Err, Events: perr.Events} + } + return err +} diff --git a/vendor/github.com/Microsoft/hcsshim/functional_tests.ps1 b/vendor/github.com/Microsoft/hcsshim/functional_tests.ps1 new file mode 100644 index 00000000000..ce6edbcf329 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/functional_tests.ps1 @@ -0,0 +1,12 @@ +# Requirements so far: +# dockerd running +# - image microsoft/nanoserver (matching host base image) docker load -i c:\baseimages\nanoserver.tar +# - image alpine (linux) docker pull --platform=linux alpine + + +# TODO: Add this a parameter for debugging. ie "functional-tests -debug=$true" +#$env:HCSSHIM_FUNCTIONAL_TESTS_DEBUG="yes please" + +#pushd uvm +go test -v -tags "functional uvmcreate uvmscratch uvmscsi uvmvpmem uvmvsmb uvmp9" ./... +#popd \ No newline at end of file diff --git a/vendor/github.com/Microsoft/hcsshim/go.mod b/vendor/github.com/Microsoft/hcsshim/go.mod new file mode 100644 index 00000000000..9c60dd30251 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/go.mod @@ -0,0 +1,39 @@ +module github.com/Microsoft/hcsshim + +go 1.13 + +require ( + github.com/BurntSushi/toml v0.3.1 + github.com/Microsoft/go-winio v0.4.17 + github.com/cenkalti/backoff/v4 v4.1.1 + github.com/containerd/cgroups v1.0.1 + github.com/containerd/console v1.0.2 + github.com/containerd/containerd v1.5.7 + github.com/containerd/go-runc v1.0.0 + github.com/containerd/ttrpc v1.1.0 + github.com/containerd/typeurl v1.0.2 + github.com/gogo/protobuf v1.3.2 + github.com/golang/mock v1.6.0 + github.com/google/go-cmp v0.5.6 + github.com/google/go-containerregistry v0.5.1 + github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3 + github.com/mattn/go-shellwords v1.0.6 + github.com/opencontainers/runc v1.0.2 + github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 + github.com/pkg/errors v0.9.1 + github.com/sirupsen/logrus v1.8.1 + github.com/urfave/cli v1.22.2 + github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852 + github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae + go.etcd.io/bbolt v1.3.6 + go.opencensus.io v0.22.3 + golang.org/x/net v0.0.0-20210825183410-e898025ed96a // indirect + golang.org/x/sync v0.0.0-20210220032951-036812b2e83c + golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e + google.golang.org/grpc v1.40.0 +) + +replace ( + google.golang.org/genproto => google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63 + google.golang.org/grpc => google.golang.org/grpc v1.27.1 +) diff --git a/vendor/github.com/Microsoft/hcsshim/go.sum b/vendor/github.com/Microsoft/hcsshim/go.sum new file mode 100644 index 00000000000..93c37657f3c --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/go.sum @@ -0,0 +1,993 @@ +bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= +github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= +github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= +github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= +github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= +github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.4.17 h1:iT12IBVClFevaf8PuVyi3UmZOVh4OqnaLxDTW2O6j3w= +github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= +github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8= +github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg= +github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= +github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= +github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= +github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= +github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= +github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= +github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= +github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= +github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= +github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= +github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= +github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= +github.com/cenkalti/backoff/v4 v4.1.1 h1:G2HAfAmvm/GcKan2oOQpBXOd2tT2G57ZnZGWa1PxPBQ= +github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= +github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= +github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc= +github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= +github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE= +github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU= +github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= +github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= +github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E= +github.com/containerd/btrfs v0.0.0-20210316141732-918d888fb676/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= +github.com/containerd/btrfs v1.0.0/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= +github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI= +github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= +github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM= +github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= +github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= +github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= +github.com/containerd/cgroups v1.0.1 h1:iJnMvco9XGvKUvNQkv88bE4uJXxRQH18efbKo9w5vHQ= +github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= +github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= +github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw= +github.com/containerd/console v1.0.2 h1:Pi6D+aZXM+oUw1czuKgH5IJ+y0jhYcwBJfx5/Ghn9dE= +github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= +github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.1-0.20191213020239-082f7e3aed57/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ= +github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU= +github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI= +github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s= +github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= +github.com/containerd/containerd v1.5.7 h1:rQyoYtj4KddB3bxG6SAqd4+08gePNyJjRqvOIfV3rkM= +github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c= +github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo= +github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y= +github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ= +github.com/containerd/continuity v0.1.0 h1:UFRRY5JemiAhPZrr/uE0n8fMTLcZsUvySPr1+D7pgr8= +github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM= +github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= +github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= +github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= +github.com/containerd/fifo v1.0.0 h1:6PirWBr9/L7GDamKr+XM0IeUFXu5mf3M/BPpH9gaLBU= +github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= +github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU= +github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk= +github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= +github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= +github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g= +github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= +github.com/containerd/go-runc v1.0.0 h1:oU+lLv1ULm5taqgV/CJivypVODI4SUz1znWjv3nNYS0= +github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= +github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0= +github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA= +github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow= +github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms= +github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c= +github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= +github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= +github.com/containerd/stargz-snapshotter/estargz v0.4.1 h1:5e7heayhB7CcgdTkqfZqrNaNv15gABwr3Q2jBTbLlt4= +github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM= +github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= +github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= +github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= +github.com/containerd/ttrpc v1.1.0 h1:GbtyLRxb0gOLR0TYQWt3O6B0NvT8tMdorEHqIQo/lWI= +github.com/containerd/ttrpc v1.1.0/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ= +github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= +github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk= +github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg= +github.com/containerd/typeurl v1.0.2 h1:Chlt8zIieDbzQFzXzAeBEF92KhExuE4p9p92/QmY7aY= +github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s= +github.com/containerd/zfs v0.0.0-20200918131355-0a33824f23a2/go.mod h1:8IgZOBdv8fAgXddBT4dBXJPtxyRsejFIpXoklgxgEjw= +github.com/containerd/zfs v0.0.0-20210301145711-11e8f1707f62/go.mod h1:A9zfAbMlQwE+/is6hi0Xw8ktpL+6glmqZYtevJgaB8Y= +github.com/containerd/zfs v0.0.0-20210315114300-dde8f0fda960/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containerd/zfs v0.0.0-20210324211415-d5c4544f0433/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containerd/zfs v1.0.0/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM= +github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8= +github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc= +github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4= +github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= +github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= +github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= +github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= +github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= +github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= +github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= +github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= +github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017 h1:2HQmlpI3yI9deH18Q6xiSOIjXD4sLI55Y/gfpa8/558= +github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= +github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= +github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7 h1:Cvj7S8I4Xpx78KAl6TwTmMHuHlZ/0SM60NUneGJQ7IE= +github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.6.3 h1:zI2p9+1NQYdnG6sMU26EX4aVGlqbInSQxQXLvzJ4RPQ= +github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= +github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= +github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= +github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= +github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= +github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= +github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e h1:BWhy2j3IXJhjCbC68FptL43tDKIq8FladmaTs3Xs7Z8= +github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= +github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.0.4 h1:9349emZab16e7zQvpmsbtjc18ykshndd8y2PG3sgJbA= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU= +github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0 h1:LUVKkCeviFUMKqHa4tXIIij/lbhnMbP7Fn5wKdKkRh4= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-containerregistry v0.5.1 h1:/+mFTs4AlwsJ/mJe8NDtKb7BxLtbZFpcn8vDsneEkwQ= +github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= +github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= +github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3 h1:jUp75lepDg0phMUJBCmvaeFDldD2N3S1lBuPwUTszio= +github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/mattn/go-shellwords v1.0.6 h1:9Jok5pILi5S1MnDirGVTufYGtksUs/V2BWUP3ZkeUUI= +github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY= +github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= +github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= +github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/sys/mountinfo v0.4.1 h1:1O+1cHA1aujwEwwVMa2Xm2l+gIpUHyd3+D+d7LZh1kM= +github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= +github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= +github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= +github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= +github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= +github.com/opencontainers/runc v1.0.2 h1:opHZMaswlyxz1OuGpBE53Dwe4/xF7EZTY0A2L/FpCOg= +github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= +github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 h1:3snG66yBm59tKhhSPQrQ/0bCrv1LQbKt40LnUPiUxdc= +github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= +github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= +github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= +github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= +github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= +github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= +github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= +github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= +github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli v1.22.2 h1:gsqYFH8bb9ekPA12kRo0hfjngWQjkJPlN9R0N78BoUo= +github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= +github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= +github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852 h1:cPXZWzzG0NllBLdjWoD1nDfaqu98YMv+OneaKc8sPOA= +github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= +github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= +github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= +github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae h1:4hwBBUfQCFe3Cym0ZtKyq7L16eZUtYKs+BaHDN6mAns= +github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= +github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= +github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= +github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= +github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= +go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= +go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210825183410-e898025ed96a h1:bRuuGXV8wwSdGTB+CtJf+FjgO1APK1CoO39T4BN/XBw= +golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190522044717-8097e1b27ff5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e h1:fLOSk5Q00efkSvAm+4xcoXD+RRmLmmulPn5I3Y9F2EM= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200916195026-c9a70fc28ce3/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63 h1:YzfoEYWbODU5Fbt37+h7X16BWQbad7Q4S6gclTKFXM8= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/grpc v1.27.1 h1:zvIju4sqAGvwKspUQOhwnpcqSbzi7/H6QomNNjTL4sk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= +gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= +gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= +k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= +k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= +k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= +k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= +k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= +k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= +k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= +k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= +k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= +k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= +k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= +k8s.io/code-generator v0.19.7/go.mod h1:lwEq3YnLYb/7uVXLorOJfxg+cUu2oihFhHZ0n9NIla0= +k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= +k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= +k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM= +k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM= +k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= +k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= +k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc= +k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= +k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= +k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= +k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/vendor/github.com/Microsoft/hcsshim/hcsshim.go b/vendor/github.com/Microsoft/hcsshim/hcsshim.go new file mode 100644 index 00000000000..ceb3ac85ee4 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/hcsshim.go @@ -0,0 +1,28 @@ +// Shim for the Host Compute Service (HCS) to manage Windows Server +// containers and Hyper-V containers. + +package hcsshim + +import ( + "syscall" + + "github.com/Microsoft/hcsshim/internal/hcserror" +) + +//go:generate go run mksyscall_windows.go -output zsyscall_windows.go hcsshim.go + +//sys SetCurrentThreadCompartmentId(compartmentId uint32) (hr error) = iphlpapi.SetCurrentThreadCompartmentId + +const ( + // Specific user-visible exit codes + WaitErrExecFailed = 32767 + + ERROR_GEN_FAILURE = hcserror.ERROR_GEN_FAILURE + ERROR_SHUTDOWN_IN_PROGRESS = syscall.Errno(1115) + WSAEINVAL = syscall.Errno(10022) + + // Timeout on wait calls + TimeoutInfinite = 0xFFFFFFFF +) + +type HcsError = hcserror.HcsError diff --git a/vendor/github.com/Microsoft/hcsshim/hnsendpoint.go b/vendor/github.com/Microsoft/hcsshim/hnsendpoint.go new file mode 100644 index 00000000000..9e0059447d9 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/hnsendpoint.go @@ -0,0 +1,118 @@ +package hcsshim + +import ( + "github.com/Microsoft/hcsshim/internal/hns" +) + +// HNSEndpoint represents a network endpoint in HNS +type HNSEndpoint = hns.HNSEndpoint + +// HNSEndpointStats represent the stats for an networkendpoint in HNS +type HNSEndpointStats = hns.EndpointStats + +// Namespace represents a Compartment. +type Namespace = hns.Namespace + +//SystemType represents the type of the system on which actions are done +type SystemType string + +// SystemType const +const ( + ContainerType SystemType = "Container" + VirtualMachineType SystemType = "VirtualMachine" + HostType SystemType = "Host" +) + +// EndpointAttachDetachRequest is the structure used to send request to the container to modify the system +// Supported resource types are Network and Request Types are Add/Remove +type EndpointAttachDetachRequest = hns.EndpointAttachDetachRequest + +// EndpointResquestResponse is object to get the endpoint request response +type EndpointResquestResponse = hns.EndpointResquestResponse + +// HNSEndpointRequest makes a HNS call to modify/query a network endpoint +func HNSEndpointRequest(method, path, request string) (*HNSEndpoint, error) { + return hns.HNSEndpointRequest(method, path, request) +} + +// HNSListEndpointRequest makes a HNS call to query the list of available endpoints +func HNSListEndpointRequest() ([]HNSEndpoint, error) { + return hns.HNSListEndpointRequest() +} + +// HotAttachEndpoint makes a HCS Call to attach the endpoint to the container +func HotAttachEndpoint(containerID string, endpointID string) error { + endpoint, err := GetHNSEndpointByID(endpointID) + if err != nil { + return err + } + isAttached, err := endpoint.IsAttached(containerID) + if isAttached { + return err + } + return modifyNetworkEndpoint(containerID, endpointID, Add) +} + +// HotDetachEndpoint makes a HCS Call to detach the endpoint from the container +func HotDetachEndpoint(containerID string, endpointID string) error { + endpoint, err := GetHNSEndpointByID(endpointID) + if err != nil { + return err + } + isAttached, err := endpoint.IsAttached(containerID) + if !isAttached { + return err + } + return modifyNetworkEndpoint(containerID, endpointID, Remove) +} + +// ModifyContainer corresponding to the container id, by sending a request +func modifyContainer(id string, request *ResourceModificationRequestResponse) error { + container, err := OpenContainer(id) + if err != nil { + if IsNotExist(err) { + return ErrComputeSystemDoesNotExist + } + return getInnerError(err) + } + defer container.Close() + err = container.Modify(request) + if err != nil { + if IsNotSupported(err) { + return ErrPlatformNotSupported + } + return getInnerError(err) + } + + return nil +} + +func modifyNetworkEndpoint(containerID string, endpointID string, request RequestType) error { + requestMessage := &ResourceModificationRequestResponse{ + Resource: Network, + Request: request, + Data: endpointID, + } + err := modifyContainer(containerID, requestMessage) + + if err != nil { + return err + } + + return nil +} + +// GetHNSEndpointByID get the Endpoint by ID +func GetHNSEndpointByID(endpointID string) (*HNSEndpoint, error) { + return hns.GetHNSEndpointByID(endpointID) +} + +// GetHNSEndpointByName gets the endpoint filtered by Name +func GetHNSEndpointByName(endpointName string) (*HNSEndpoint, error) { + return hns.GetHNSEndpointByName(endpointName) +} + +// GetHNSEndpointStats gets the endpoint stats by ID +func GetHNSEndpointStats(endpointName string) (*HNSEndpointStats, error) { + return hns.GetHNSEndpointStats(endpointName) +} diff --git a/vendor/github.com/Microsoft/hcsshim/hnsglobals.go b/vendor/github.com/Microsoft/hcsshim/hnsglobals.go new file mode 100644 index 00000000000..2b538190476 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/hnsglobals.go @@ -0,0 +1,16 @@ +package hcsshim + +import ( + "github.com/Microsoft/hcsshim/internal/hns" +) + +type HNSGlobals = hns.HNSGlobals +type HNSVersion = hns.HNSVersion + +var ( + HNSVersion1803 = hns.HNSVersion1803 +) + +func GetHNSGlobals() (*HNSGlobals, error) { + return hns.GetHNSGlobals() +} diff --git a/vendor/github.com/Microsoft/hcsshim/hnsnetwork.go b/vendor/github.com/Microsoft/hcsshim/hnsnetwork.go new file mode 100644 index 00000000000..f775fa1d07c --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/hnsnetwork.go @@ -0,0 +1,36 @@ +package hcsshim + +import ( + "github.com/Microsoft/hcsshim/internal/hns" +) + +// Subnet is assoicated with a network and represents a list +// of subnets available to the network +type Subnet = hns.Subnet + +// MacPool is assoicated with a network and represents a list +// of macaddresses available to the network +type MacPool = hns.MacPool + +// HNSNetwork represents a network in HNS +type HNSNetwork = hns.HNSNetwork + +// HNSNetworkRequest makes a call into HNS to update/query a single network +func HNSNetworkRequest(method, path, request string) (*HNSNetwork, error) { + return hns.HNSNetworkRequest(method, path, request) +} + +// HNSListNetworkRequest makes a HNS call to query the list of available networks +func HNSListNetworkRequest(method, path, request string) ([]HNSNetwork, error) { + return hns.HNSListNetworkRequest(method, path, request) +} + +// GetHNSNetworkByID +func GetHNSNetworkByID(networkID string) (*HNSNetwork, error) { + return hns.GetHNSNetworkByID(networkID) +} + +// GetHNSNetworkName filtered by Name +func GetHNSNetworkByName(networkName string) (*HNSNetwork, error) { + return hns.GetHNSNetworkByName(networkName) +} diff --git a/vendor/github.com/Microsoft/hcsshim/hnspolicy.go b/vendor/github.com/Microsoft/hcsshim/hnspolicy.go new file mode 100644 index 00000000000..00ab2636449 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/hnspolicy.go @@ -0,0 +1,60 @@ +package hcsshim + +import ( + "github.com/Microsoft/hcsshim/internal/hns" +) + +// Type of Request Support in ModifySystem +type PolicyType = hns.PolicyType + +// RequestType const +const ( + Nat = hns.Nat + ACL = hns.ACL + PA = hns.PA + VLAN = hns.VLAN + VSID = hns.VSID + VNet = hns.VNet + L2Driver = hns.L2Driver + Isolation = hns.Isolation + QOS = hns.QOS + OutboundNat = hns.OutboundNat + ExternalLoadBalancer = hns.ExternalLoadBalancer + Route = hns.Route + Proxy = hns.Proxy +) + +type ProxyPolicy = hns.ProxyPolicy + +type NatPolicy = hns.NatPolicy + +type QosPolicy = hns.QosPolicy + +type IsolationPolicy = hns.IsolationPolicy + +type VlanPolicy = hns.VlanPolicy + +type VsidPolicy = hns.VsidPolicy + +type PaPolicy = hns.PaPolicy + +type OutboundNatPolicy = hns.OutboundNatPolicy + +type ActionType = hns.ActionType +type DirectionType = hns.DirectionType +type RuleType = hns.RuleType + +const ( + Allow = hns.Allow + Block = hns.Block + + In = hns.In + Out = hns.Out + + Host = hns.Host + Switch = hns.Switch +) + +type ACLPolicy = hns.ACLPolicy + +type Policy = hns.Policy diff --git a/vendor/github.com/Microsoft/hcsshim/hnspolicylist.go b/vendor/github.com/Microsoft/hcsshim/hnspolicylist.go new file mode 100644 index 00000000000..55aaa4a50ef --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/hnspolicylist.go @@ -0,0 +1,47 @@ +package hcsshim + +import ( + "github.com/Microsoft/hcsshim/internal/hns" +) + +// RoutePolicy is a structure defining schema for Route based Policy +type RoutePolicy = hns.RoutePolicy + +// ELBPolicy is a structure defining schema for ELB LoadBalancing based Policy +type ELBPolicy = hns.ELBPolicy + +// LBPolicy is a structure defining schema for LoadBalancing based Policy +type LBPolicy = hns.LBPolicy + +// PolicyList is a structure defining schema for Policy list request +type PolicyList = hns.PolicyList + +// HNSPolicyListRequest makes a call into HNS to update/query a single network +func HNSPolicyListRequest(method, path, request string) (*PolicyList, error) { + return hns.HNSPolicyListRequest(method, path, request) +} + +// HNSListPolicyListRequest gets all the policy list +func HNSListPolicyListRequest() ([]PolicyList, error) { + return hns.HNSListPolicyListRequest() +} + +// PolicyListRequest makes a HNS call to modify/query a network policy list +func PolicyListRequest(method, path, request string) (*PolicyList, error) { + return hns.PolicyListRequest(method, path, request) +} + +// GetPolicyListByID get the policy list by ID +func GetPolicyListByID(policyListID string) (*PolicyList, error) { + return hns.GetPolicyListByID(policyListID) +} + +// AddLoadBalancer policy list for the specified endpoints +func AddLoadBalancer(endpoints []HNSEndpoint, isILB bool, sourceVIP, vip string, protocol uint16, internalPort uint16, externalPort uint16) (*PolicyList, error) { + return hns.AddLoadBalancer(endpoints, isILB, sourceVIP, vip, protocol, internalPort, externalPort) +} + +// AddRoute adds route policy list for the specified endpoints +func AddRoute(endpoints []HNSEndpoint, destinationPrefix string, nextHop string, encapEnabled bool) (*PolicyList, error) { + return hns.AddRoute(endpoints, destinationPrefix, nextHop, encapEnabled) +} diff --git a/vendor/github.com/Microsoft/hcsshim/hnssupport.go b/vendor/github.com/Microsoft/hcsshim/hnssupport.go new file mode 100644 index 00000000000..69405244b67 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/hnssupport.go @@ -0,0 +1,13 @@ +package hcsshim + +import ( + "github.com/Microsoft/hcsshim/internal/hns" +) + +type HNSSupportedFeatures = hns.HNSSupportedFeatures + +type HNSAclFeatures = hns.HNSAclFeatures + +func GetHNSSupportedFeatures() HNSSupportedFeatures { + return hns.GetHNSSupportedFeatures() +} diff --git a/vendor/github.com/Microsoft/hcsshim/interface.go b/vendor/github.com/Microsoft/hcsshim/interface.go new file mode 100644 index 00000000000..300eb599668 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/interface.go @@ -0,0 +1,114 @@ +package hcsshim + +import ( + "io" + "time" + + "github.com/Microsoft/hcsshim/internal/hcs/schema1" +) + +// ProcessConfig is used as both the input of Container.CreateProcess +// and to convert the parameters to JSON for passing onto the HCS +type ProcessConfig = schema1.ProcessConfig + +type Layer = schema1.Layer +type MappedDir = schema1.MappedDir +type MappedPipe = schema1.MappedPipe +type HvRuntime = schema1.HvRuntime +type MappedVirtualDisk = schema1.MappedVirtualDisk + +// AssignedDevice represents a device that has been directly assigned to a container +// +// NOTE: Support added in RS5 +type AssignedDevice = schema1.AssignedDevice + +// ContainerConfig is used as both the input of CreateContainer +// and to convert the parameters to JSON for passing onto the HCS +type ContainerConfig = schema1.ContainerConfig + +type ComputeSystemQuery = schema1.ComputeSystemQuery + +// Container represents a created (but not necessarily running) container. +type Container interface { + // Start synchronously starts the container. + Start() error + + // Shutdown requests a container shutdown, but it may not actually be shutdown until Wait() succeeds. + Shutdown() error + + // Terminate requests a container terminate, but it may not actually be terminated until Wait() succeeds. + Terminate() error + + // Waits synchronously waits for the container to shutdown or terminate. + Wait() error + + // WaitTimeout synchronously waits for the container to terminate or the duration to elapse. It + // returns false if timeout occurs. + WaitTimeout(time.Duration) error + + // Pause pauses the execution of a container. + Pause() error + + // Resume resumes the execution of a container. + Resume() error + + // HasPendingUpdates returns true if the container has updates pending to install. + HasPendingUpdates() (bool, error) + + // Statistics returns statistics for a container. + Statistics() (Statistics, error) + + // ProcessList returns details for the processes in a container. + ProcessList() ([]ProcessListItem, error) + + // MappedVirtualDisks returns virtual disks mapped to a utility VM, indexed by controller + MappedVirtualDisks() (map[int]MappedVirtualDiskController, error) + + // CreateProcess launches a new process within the container. + CreateProcess(c *ProcessConfig) (Process, error) + + // OpenProcess gets an interface to an existing process within the container. + OpenProcess(pid int) (Process, error) + + // Close cleans up any state associated with the container but does not terminate or wait for it. + Close() error + + // Modify the System + Modify(config *ResourceModificationRequestResponse) error +} + +// Process represents a running or exited process. +type Process interface { + // Pid returns the process ID of the process within the container. + Pid() int + + // Kill signals the process to terminate but does not wait for it to finish terminating. + Kill() error + + // Wait waits for the process to exit. + Wait() error + + // WaitTimeout waits for the process to exit or the duration to elapse. It returns + // false if timeout occurs. + WaitTimeout(time.Duration) error + + // ExitCode returns the exit code of the process. The process must have + // already terminated. + ExitCode() (int, error) + + // ResizeConsole resizes the console of the process. + ResizeConsole(width, height uint16) error + + // Stdio returns the stdin, stdout, and stderr pipes, respectively. Closing + // these pipes does not close the underlying pipes; it should be possible to + // call this multiple times to get multiple interfaces. + Stdio() (io.WriteCloser, io.ReadCloser, io.ReadCloser, error) + + // CloseStdin closes the write side of the stdin pipe so that the process is + // notified on the read side that there is no more data in stdin. + CloseStdin() error + + // Close cleans up any state associated with the process but does not kill + // or wait on it. + Close() error +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/cow/cow.go b/vendor/github.com/Microsoft/hcsshim/internal/cow/cow.go new file mode 100644 index 00000000000..27a62a72386 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/cow/cow.go @@ -0,0 +1,91 @@ +package cow + +import ( + "context" + "io" + + "github.com/Microsoft/hcsshim/internal/hcs/schema1" + hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" +) + +// Process is the interface for an OS process running in a container or utility VM. +type Process interface { + // Close releases resources associated with the process and closes the + // writer and readers returned by Stdio. Depending on the implementation, + // this may also terminate the process. + Close() error + // CloseStdin causes the process's stdin handle to receive EOF/EPIPE/whatever + // is appropriate to indicate that no more data is available. + CloseStdin(ctx context.Context) error + // CloseStdout closes the stdout connection to the process. It is used to indicate + // that we are done receiving output on the shim side. + CloseStdout(ctx context.Context) error + // CloseStderr closes the stderr connection to the process. It is used to indicate + // that we are done receiving output on the shim side. + CloseStderr(ctx context.Context) error + // Pid returns the process ID. + Pid() int + // Stdio returns the stdio streams for a process. These may be nil if a stream + // was not requested during CreateProcess. + Stdio() (_ io.Writer, _ io.Reader, _ io.Reader) + // ResizeConsole resizes the virtual terminal associated with the process. + ResizeConsole(ctx context.Context, width, height uint16) error + // Kill sends a SIGKILL or equivalent signal to the process and returns whether + // the signal was delivered. It does not wait for the process to terminate. + Kill(ctx context.Context) (bool, error) + // Signal sends a signal to the process and returns whether the signal was + // delivered. The input is OS specific (either + // guestrequest.SignalProcessOptionsWCOW or + // guestrequest.SignalProcessOptionsLCOW). It does not wait for the process + // to terminate. + Signal(ctx context.Context, options interface{}) (bool, error) + // Wait waits for the process to complete, or for a connection to the process to be + // terminated by some error condition (including calling Close). + Wait() error + // ExitCode returns the exit code of the process. Returns an error if the process is + // not running. + ExitCode() (int, error) +} + +// ProcessHost is the interface for creating processes. +type ProcessHost interface { + // CreateProcess creates a process. The configuration is host specific + // (either hcsschema.ProcessParameters or lcow.ProcessParameters). + CreateProcess(ctx context.Context, config interface{}) (Process, error) + // OS returns the host's operating system, "linux" or "windows". + OS() string + // IsOCI specifies whether this is an OCI-compliant process host. If true, + // then the configuration passed to CreateProcess should have an OCI process + // spec (or nil if this is the initial process in an OCI container). + // Otherwise, it should have the HCS-specific process parameters. + IsOCI() bool +} + +// Container is the interface for container objects, either running on the host or +// in a utility VM. +type Container interface { + ProcessHost + // Close releases the resources associated with the container. Depending on + // the implementation, this may also terminate the container. + Close() error + // ID returns the container ID. + ID() string + // Properties returns the requested container properties targeting a V1 schema container. + Properties(ctx context.Context, types ...schema1.PropertyType) (*schema1.ContainerProperties, error) + // PropertiesV2 returns the requested container properties targeting a V2 schema container. + PropertiesV2(ctx context.Context, types ...hcsschema.PropertyType) (*hcsschema.Properties, error) + // Start starts a container. + Start(ctx context.Context) error + // Shutdown sends a shutdown request to the container (but does not wait for + // the shutdown to complete). + Shutdown(ctx context.Context) error + // Terminate sends a terminate request to the container (but does not wait + // for the terminate to complete). + Terminate(ctx context.Context) error + // Wait waits for the container to terminate, or for the connection to the + // container to be terminated by some error condition (including calling + // Close). + Wait() error + // Modify sends a request to modify container resources + Modify(ctx context.Context, config interface{}) error +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/callback.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/callback.go new file mode 100644 index 00000000000..d13772b0301 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/callback.go @@ -0,0 +1,161 @@ +package hcs + +import ( + "fmt" + "sync" + "syscall" + + "github.com/Microsoft/hcsshim/internal/interop" + "github.com/Microsoft/hcsshim/internal/logfields" + "github.com/Microsoft/hcsshim/internal/vmcompute" + "github.com/sirupsen/logrus" +) + +var ( + nextCallback uintptr + callbackMap = map[uintptr]*notificationWatcherContext{} + callbackMapLock = sync.RWMutex{} + + notificationWatcherCallback = syscall.NewCallback(notificationWatcher) + + // Notifications for HCS_SYSTEM handles + hcsNotificationSystemExited hcsNotification = 0x00000001 + hcsNotificationSystemCreateCompleted hcsNotification = 0x00000002 + hcsNotificationSystemStartCompleted hcsNotification = 0x00000003 + hcsNotificationSystemPauseCompleted hcsNotification = 0x00000004 + hcsNotificationSystemResumeCompleted hcsNotification = 0x00000005 + hcsNotificationSystemCrashReport hcsNotification = 0x00000006 + hcsNotificationSystemSiloJobCreated hcsNotification = 0x00000007 + hcsNotificationSystemSaveCompleted hcsNotification = 0x00000008 + hcsNotificationSystemRdpEnhancedModeStateChanged hcsNotification = 0x00000009 + hcsNotificationSystemShutdownFailed hcsNotification = 0x0000000A + hcsNotificationSystemGetPropertiesCompleted hcsNotification = 0x0000000B + hcsNotificationSystemModifyCompleted hcsNotification = 0x0000000C + hcsNotificationSystemCrashInitiated hcsNotification = 0x0000000D + hcsNotificationSystemGuestConnectionClosed hcsNotification = 0x0000000E + + // Notifications for HCS_PROCESS handles + hcsNotificationProcessExited hcsNotification = 0x00010000 + + // Common notifications + hcsNotificationInvalid hcsNotification = 0x00000000 + hcsNotificationServiceDisconnect hcsNotification = 0x01000000 +) + +type hcsNotification uint32 + +func (hn hcsNotification) String() string { + switch hn { + case hcsNotificationSystemExited: + return "SystemExited" + case hcsNotificationSystemCreateCompleted: + return "SystemCreateCompleted" + case hcsNotificationSystemStartCompleted: + return "SystemStartCompleted" + case hcsNotificationSystemPauseCompleted: + return "SystemPauseCompleted" + case hcsNotificationSystemResumeCompleted: + return "SystemResumeCompleted" + case hcsNotificationSystemCrashReport: + return "SystemCrashReport" + case hcsNotificationSystemSiloJobCreated: + return "SystemSiloJobCreated" + case hcsNotificationSystemSaveCompleted: + return "SystemSaveCompleted" + case hcsNotificationSystemRdpEnhancedModeStateChanged: + return "SystemRdpEnhancedModeStateChanged" + case hcsNotificationSystemShutdownFailed: + return "SystemShutdownFailed" + case hcsNotificationSystemGetPropertiesCompleted: + return "SystemGetPropertiesCompleted" + case hcsNotificationSystemModifyCompleted: + return "SystemModifyCompleted" + case hcsNotificationSystemCrashInitiated: + return "SystemCrashInitiated" + case hcsNotificationSystemGuestConnectionClosed: + return "SystemGuestConnectionClosed" + case hcsNotificationProcessExited: + return "ProcessExited" + case hcsNotificationInvalid: + return "Invalid" + case hcsNotificationServiceDisconnect: + return "ServiceDisconnect" + default: + return fmt.Sprintf("Unknown: %d", hn) + } +} + +type notificationChannel chan error + +type notificationWatcherContext struct { + channels notificationChannels + handle vmcompute.HcsCallback + + systemID string + processID int +} + +type notificationChannels map[hcsNotification]notificationChannel + +func newSystemChannels() notificationChannels { + channels := make(notificationChannels) + for _, notif := range []hcsNotification{ + hcsNotificationServiceDisconnect, + hcsNotificationSystemExited, + hcsNotificationSystemCreateCompleted, + hcsNotificationSystemStartCompleted, + hcsNotificationSystemPauseCompleted, + hcsNotificationSystemResumeCompleted, + hcsNotificationSystemSaveCompleted, + } { + channels[notif] = make(notificationChannel, 1) + } + return channels +} + +func newProcessChannels() notificationChannels { + channels := make(notificationChannels) + for _, notif := range []hcsNotification{ + hcsNotificationServiceDisconnect, + hcsNotificationProcessExited, + } { + channels[notif] = make(notificationChannel, 1) + } + return channels +} + +func closeChannels(channels notificationChannels) { + for _, c := range channels { + close(c) + } +} + +func notificationWatcher(notificationType hcsNotification, callbackNumber uintptr, notificationStatus uintptr, notificationData *uint16) uintptr { + var result error + if int32(notificationStatus) < 0 { + result = interop.Win32FromHresult(notificationStatus) + } + + callbackMapLock.RLock() + context := callbackMap[callbackNumber] + callbackMapLock.RUnlock() + + if context == nil { + return 0 + } + + log := logrus.WithFields(logrus.Fields{ + "notification-type": notificationType.String(), + "system-id": context.systemID, + }) + if context.processID != 0 { + log.Data[logfields.ProcessID] = context.processID + } + log.Debug("HCS notification") + + if channel, ok := context.channels[notificationType]; ok { + channel <- result + } + + return 0 +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go new file mode 100644 index 00000000000..e21354ffd66 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go @@ -0,0 +1,343 @@ +package hcs + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net" + "syscall" + + "github.com/Microsoft/hcsshim/internal/log" +) + +var ( + // ErrComputeSystemDoesNotExist is an error encountered when the container being operated on no longer exists + ErrComputeSystemDoesNotExist = syscall.Errno(0xc037010e) + + // ErrElementNotFound is an error encountered when the object being referenced does not exist + ErrElementNotFound = syscall.Errno(0x490) + + // ErrElementNotFound is an error encountered when the object being referenced does not exist + ErrNotSupported = syscall.Errno(0x32) + + // ErrInvalidData is an error encountered when the request being sent to hcs is invalid/unsupported + // decimal -2147024883 / hex 0x8007000d + ErrInvalidData = syscall.Errno(0xd) + + // ErrHandleClose is an error encountered when the handle generating the notification being waited on has been closed + ErrHandleClose = errors.New("hcsshim: the handle generating this notification has been closed") + + // ErrAlreadyClosed is an error encountered when using a handle that has been closed by the Close method + ErrAlreadyClosed = errors.New("hcsshim: the handle has already been closed") + + // ErrInvalidNotificationType is an error encountered when an invalid notification type is used + ErrInvalidNotificationType = errors.New("hcsshim: invalid notification type") + + // ErrInvalidProcessState is an error encountered when the process is not in a valid state for the requested operation + ErrInvalidProcessState = errors.New("the process is in an invalid state for the attempted operation") + + // ErrTimeout is an error encountered when waiting on a notification times out + ErrTimeout = errors.New("hcsshim: timeout waiting for notification") + + // ErrUnexpectedContainerExit is the error encountered when a container exits while waiting for + // a different expected notification + ErrUnexpectedContainerExit = errors.New("unexpected container exit") + + // ErrUnexpectedProcessAbort is the error encountered when communication with the compute service + // is lost while waiting for a notification + ErrUnexpectedProcessAbort = errors.New("lost communication with compute service") + + // ErrUnexpectedValue is an error encountered when hcs returns an invalid value + ErrUnexpectedValue = errors.New("unexpected value returned from hcs") + + // ErrVmcomputeAlreadyStopped is an error encountered when a shutdown or terminate request is made on a stopped container + ErrVmcomputeAlreadyStopped = syscall.Errno(0xc0370110) + + // ErrVmcomputeOperationPending is an error encountered when the operation is being completed asynchronously + ErrVmcomputeOperationPending = syscall.Errno(0xC0370103) + + // ErrVmcomputeOperationInvalidState is an error encountered when the compute system is not in a valid state for the requested operation + ErrVmcomputeOperationInvalidState = syscall.Errno(0xc0370105) + + // ErrProcNotFound is an error encountered when a procedure look up fails. + ErrProcNotFound = syscall.Errno(0x7f) + + // ErrVmcomputeOperationAccessIsDenied is an error which can be encountered when enumerating compute systems in RS1/RS2 + // builds when the underlying silo might be in the process of terminating. HCS was fixed in RS3. + ErrVmcomputeOperationAccessIsDenied = syscall.Errno(0x5) + + // ErrVmcomputeInvalidJSON is an error encountered when the compute system does not support/understand the messages sent by management + ErrVmcomputeInvalidJSON = syscall.Errno(0xc037010d) + + // ErrVmcomputeUnknownMessage is an error encountered guest compute system doesn't support the message + ErrVmcomputeUnknownMessage = syscall.Errno(0xc037010b) + + // ErrVmcomputeUnexpectedExit is an error encountered when the compute system terminates unexpectedly + ErrVmcomputeUnexpectedExit = syscall.Errno(0xC0370106) + + // ErrNotSupported is an error encountered when hcs doesn't support the request + ErrPlatformNotSupported = errors.New("unsupported platform request") + + // ErrProcessAlreadyStopped is returned by hcs if the process we're trying to kill has already been stopped. + ErrProcessAlreadyStopped = syscall.Errno(0x8037011f) + + // ErrInvalidHandle is an error that can be encountrered when querying the properties of a compute system when the handle to that + // compute system has already been closed. + ErrInvalidHandle = syscall.Errno(0x6) +) + +type ErrorEvent struct { + Message string `json:"Message,omitempty"` // Fully formated error message + StackTrace string `json:"StackTrace,omitempty"` // Stack trace in string form + Provider string `json:"Provider,omitempty"` + EventID uint16 `json:"EventId,omitempty"` + Flags uint32 `json:"Flags,omitempty"` + Source string `json:"Source,omitempty"` + //Data []EventData `json:"Data,omitempty"` // Omit this as HCS doesn't encode this well. It's more confusing to include. It is however logged in debug mode (see processHcsResult function) +} + +type hcsResult struct { + Error int32 + ErrorMessage string + ErrorEvents []ErrorEvent `json:"ErrorEvents,omitempty"` +} + +func (ev *ErrorEvent) String() string { + evs := "[Event Detail: " + ev.Message + if ev.StackTrace != "" { + evs += " Stack Trace: " + ev.StackTrace + } + if ev.Provider != "" { + evs += " Provider: " + ev.Provider + } + if ev.EventID != 0 { + evs = fmt.Sprintf("%s EventID: %d", evs, ev.EventID) + } + if ev.Flags != 0 { + evs = fmt.Sprintf("%s flags: %d", evs, ev.Flags) + } + if ev.Source != "" { + evs += " Source: " + ev.Source + } + evs += "]" + return evs +} + +func processHcsResult(ctx context.Context, resultJSON string) []ErrorEvent { + if resultJSON != "" { + result := &hcsResult{} + if err := json.Unmarshal([]byte(resultJSON), result); err != nil { + log.G(ctx).WithError(err).Warning("Could not unmarshal HCS result") + return nil + } + return result.ErrorEvents + } + return nil +} + +type HcsError struct { + Op string + Err error + Events []ErrorEvent +} + +var _ net.Error = &HcsError{} + +func (e *HcsError) Error() string { + s := e.Op + ": " + e.Err.Error() + for _, ev := range e.Events { + s += "\n" + ev.String() + } + return s +} + +func (e *HcsError) Temporary() bool { + err, ok := e.Err.(net.Error) + return ok && err.Temporary() +} + +func (e *HcsError) Timeout() bool { + err, ok := e.Err.(net.Error) + return ok && err.Timeout() +} + +// ProcessError is an error encountered in HCS during an operation on a Process object +type ProcessError struct { + SystemID string + Pid int + Op string + Err error + Events []ErrorEvent +} + +var _ net.Error = &ProcessError{} + +// SystemError is an error encountered in HCS during an operation on a Container object +type SystemError struct { + ID string + Op string + Err error + Events []ErrorEvent +} + +var _ net.Error = &SystemError{} + +func (e *SystemError) Error() string { + s := e.Op + " " + e.ID + ": " + e.Err.Error() + for _, ev := range e.Events { + s += "\n" + ev.String() + } + return s +} + +func (e *SystemError) Temporary() bool { + err, ok := e.Err.(net.Error) + return ok && err.Temporary() +} + +func (e *SystemError) Timeout() bool { + err, ok := e.Err.(net.Error) + return ok && err.Timeout() +} + +func makeSystemError(system *System, op string, err error, events []ErrorEvent) error { + // Don't double wrap errors + if _, ok := err.(*SystemError); ok { + return err + } + return &SystemError{ + ID: system.ID(), + Op: op, + Err: err, + Events: events, + } +} + +func (e *ProcessError) Error() string { + s := fmt.Sprintf("%s %s:%d: %s", e.Op, e.SystemID, e.Pid, e.Err.Error()) + for _, ev := range e.Events { + s += "\n" + ev.String() + } + return s +} + +func (e *ProcessError) Temporary() bool { + err, ok := e.Err.(net.Error) + return ok && err.Temporary() +} + +func (e *ProcessError) Timeout() bool { + err, ok := e.Err.(net.Error) + return ok && err.Timeout() +} + +func makeProcessError(process *Process, op string, err error, events []ErrorEvent) error { + // Don't double wrap errors + if _, ok := err.(*ProcessError); ok { + return err + } + return &ProcessError{ + Pid: process.Pid(), + SystemID: process.SystemID(), + Op: op, + Err: err, + Events: events, + } +} + +// IsNotExist checks if an error is caused by the Container or Process not existing. +// Note: Currently, ErrElementNotFound can mean that a Process has either +// already exited, or does not exist. Both IsAlreadyStopped and IsNotExist +// will currently return true when the error is ErrElementNotFound. +func IsNotExist(err error) bool { + err = getInnerError(err) + return err == ErrComputeSystemDoesNotExist || + err == ErrElementNotFound +} + +// IsErrorInvalidHandle checks whether the error is the result of an operation carried +// out on a handle that is invalid/closed. This error popped up while trying to query +// stats on a container in the process of being stopped. +func IsErrorInvalidHandle(err error) bool { + err = getInnerError(err) + return err == ErrInvalidHandle +} + +// IsAlreadyClosed checks if an error is caused by the Container or Process having been +// already closed by a call to the Close() method. +func IsAlreadyClosed(err error) bool { + err = getInnerError(err) + return err == ErrAlreadyClosed +} + +// IsPending returns a boolean indicating whether the error is that +// the requested operation is being completed in the background. +func IsPending(err error) bool { + err = getInnerError(err) + return err == ErrVmcomputeOperationPending +} + +// IsTimeout returns a boolean indicating whether the error is caused by +// a timeout waiting for the operation to complete. +func IsTimeout(err error) bool { + if err, ok := err.(net.Error); ok && err.Timeout() { + return true + } + err = getInnerError(err) + return err == ErrTimeout +} + +// IsAlreadyStopped returns a boolean indicating whether the error is caused by +// a Container or Process being already stopped. +// Note: Currently, ErrElementNotFound can mean that a Process has either +// already exited, or does not exist. Both IsAlreadyStopped and IsNotExist +// will currently return true when the error is ErrElementNotFound. +func IsAlreadyStopped(err error) bool { + err = getInnerError(err) + return err == ErrVmcomputeAlreadyStopped || + err == ErrProcessAlreadyStopped || + err == ErrElementNotFound +} + +// IsNotSupported returns a boolean indicating whether the error is caused by +// unsupported platform requests +// Note: Currently Unsupported platform requests can be mean either +// ErrVmcomputeInvalidJSON, ErrInvalidData, ErrNotSupported or ErrVmcomputeUnknownMessage +// is thrown from the Platform +func IsNotSupported(err error) bool { + err = getInnerError(err) + // If Platform doesn't recognize or support the request sent, below errors are seen + return err == ErrVmcomputeInvalidJSON || + err == ErrInvalidData || + err == ErrNotSupported || + err == ErrVmcomputeUnknownMessage +} + +// IsOperationInvalidState returns true when err is caused by +// `ErrVmcomputeOperationInvalidState`. +func IsOperationInvalidState(err error) bool { + err = getInnerError(err) + return err == ErrVmcomputeOperationInvalidState +} + +// IsAccessIsDenied returns true when err is caused by +// `ErrVmcomputeOperationAccessIsDenied`. +func IsAccessIsDenied(err error) bool { + err = getInnerError(err) + return err == ErrVmcomputeOperationAccessIsDenied +} + +func getInnerError(err error) error { + switch pe := err.(type) { + case nil: + return nil + case *HcsError: + err = pe.Err + case *SystemError: + err = pe.Err + case *ProcessError: + err = pe.Err + } + return err +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go new file mode 100644 index 00000000000..f4605922ab4 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go @@ -0,0 +1,557 @@ +package hcs + +import ( + "context" + "encoding/json" + "errors" + "io" + "os" + "sync" + "syscall" + "time" + + "github.com/Microsoft/hcsshim/internal/log" + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/Microsoft/hcsshim/internal/vmcompute" + "go.opencensus.io/trace" +) + +// ContainerError is an error encountered in HCS +type Process struct { + handleLock sync.RWMutex + handle vmcompute.HcsProcess + processID int + system *System + hasCachedStdio bool + stdioLock sync.Mutex + stdin io.WriteCloser + stdout io.ReadCloser + stderr io.ReadCloser + callbackNumber uintptr + killSignalDelivered bool + + closedWaitOnce sync.Once + waitBlock chan struct{} + exitCode int + waitError error +} + +func newProcess(process vmcompute.HcsProcess, processID int, computeSystem *System) *Process { + return &Process{ + handle: process, + processID: processID, + system: computeSystem, + waitBlock: make(chan struct{}), + } +} + +type processModifyRequest struct { + Operation string + ConsoleSize *consoleSize `json:",omitempty"` + CloseHandle *closeHandle `json:",omitempty"` +} + +type consoleSize struct { + Height uint16 + Width uint16 +} + +type closeHandle struct { + Handle string +} + +type processStatus struct { + ProcessID uint32 + Exited bool + ExitCode uint32 + LastWaitResult int32 +} + +const stdIn string = "StdIn" + +const ( + modifyConsoleSize string = "ConsoleSize" + modifyCloseHandle string = "CloseHandle" +) + +// Pid returns the process ID of the process within the container. +func (process *Process) Pid() int { + return process.processID +} + +// SystemID returns the ID of the process's compute system. +func (process *Process) SystemID() string { + return process.system.ID() +} + +func (process *Process) processSignalResult(ctx context.Context, err error) (bool, error) { + switch err { + case nil: + return true, nil + case ErrVmcomputeOperationInvalidState, ErrComputeSystemDoesNotExist, ErrElementNotFound: + select { + case <-process.waitBlock: + // The process exit notification has already arrived. + default: + // The process should be gone, but we have not received the notification. + // After a second, force unblock the process wait to work around a possible + // deadlock in the HCS. + go func() { + time.Sleep(time.Second) + process.closedWaitOnce.Do(func() { + log.G(ctx).WithError(err).Warn("force unblocking process waits") + process.exitCode = -1 + process.waitError = err + close(process.waitBlock) + }) + }() + } + return false, nil + default: + return false, err + } +} + +// Signal signals the process with `options`. +// +// For LCOW `guestrequest.SignalProcessOptionsLCOW`. +// +// For WCOW `guestrequest.SignalProcessOptionsWCOW`. +func (process *Process) Signal(ctx context.Context, options interface{}) (bool, error) { + process.handleLock.RLock() + defer process.handleLock.RUnlock() + + operation := "hcs::Process::Signal" + + if process.handle == 0 { + return false, makeProcessError(process, operation, ErrAlreadyClosed, nil) + } + + optionsb, err := json.Marshal(options) + if err != nil { + return false, err + } + + resultJSON, err := vmcompute.HcsSignalProcess(ctx, process.handle, string(optionsb)) + events := processHcsResult(ctx, resultJSON) + delivered, err := process.processSignalResult(ctx, err) + if err != nil { + err = makeProcessError(process, operation, err, events) + } + return delivered, err +} + +// Kill signals the process to terminate but does not wait for it to finish terminating. +func (process *Process) Kill(ctx context.Context) (bool, error) { + process.handleLock.RLock() + defer process.handleLock.RUnlock() + + operation := "hcs::Process::Kill" + + if process.handle == 0 { + return false, makeProcessError(process, operation, ErrAlreadyClosed, nil) + } + + if process.killSignalDelivered { + // A kill signal has already been sent to this process. Sending a second + // one offers no real benefit, as processes cannot stop themselves from + // being terminated, once a TerminateProcess has been issued. Sending a + // second kill may result in a number of errors (two of which detailed bellow) + // and which we can avoid handling. + return true, nil + } + + resultJSON, err := vmcompute.HcsTerminateProcess(ctx, process.handle) + if err != nil { + // We still need to check these two cases, as processes may still be killed by an + // external actor (human operator, OOM, random script etc). + if errors.Is(err, os.ErrPermission) || IsAlreadyStopped(err) { + // There are two cases where it should be safe to ignore an error returned + // by HcsTerminateProcess. The first one is cause by the fact that + // HcsTerminateProcess ends up calling TerminateProcess in the context + // of a container. According to the TerminateProcess documentation: + // https://docs.microsoft.com/en-us/windows/win32/api/processthreadsapi/nf-processthreadsapi-terminateprocess#remarks + // After a process has terminated, call to TerminateProcess with open + // handles to the process fails with ERROR_ACCESS_DENIED (5) error code. + // It's safe to ignore this error here. HCS should always have permissions + // to kill processes inside any container. So an ERROR_ACCESS_DENIED + // is unlikely to be anything else than what the ending remarks in the + // documentation states. + // + // The second case is generated by hcs itself, if for any reason HcsTerminateProcess + // is called twice in a very short amount of time. In such cases, hcs may return + // HCS_E_PROCESS_ALREADY_STOPPED. + return true, nil + } + } + events := processHcsResult(ctx, resultJSON) + delivered, err := process.processSignalResult(ctx, err) + if err != nil { + err = makeProcessError(process, operation, err, events) + } + + process.killSignalDelivered = delivered + return delivered, err +} + +// waitBackground waits for the process exit notification. Once received sets +// `process.waitError` (if any) and unblocks all `Wait` calls. +// +// This MUST be called exactly once per `process.handle` but `Wait` is safe to +// call multiple times. +func (process *Process) waitBackground() { + operation := "hcs::Process::waitBackground" + ctx, span := trace.StartSpan(context.Background(), operation) + defer span.End() + span.AddAttributes( + trace.StringAttribute("cid", process.SystemID()), + trace.Int64Attribute("pid", int64(process.processID))) + + var ( + err error + exitCode = -1 + propertiesJSON string + resultJSON string + ) + + err = waitForNotification(ctx, process.callbackNumber, hcsNotificationProcessExited, nil) + if err != nil { + err = makeProcessError(process, operation, err, nil) + log.G(ctx).WithError(err).Error("failed wait") + } else { + process.handleLock.RLock() + defer process.handleLock.RUnlock() + + // Make sure we didnt race with Close() here + if process.handle != 0 { + propertiesJSON, resultJSON, err = vmcompute.HcsGetProcessProperties(ctx, process.handle) + events := processHcsResult(ctx, resultJSON) + if err != nil { + err = makeProcessError(process, operation, err, events) //nolint:ineffassign + } else { + properties := &processStatus{} + err = json.Unmarshal([]byte(propertiesJSON), properties) + if err != nil { + err = makeProcessError(process, operation, err, nil) //nolint:ineffassign + } else { + if properties.LastWaitResult != 0 { + log.G(ctx).WithField("wait-result", properties.LastWaitResult).Warning("non-zero last wait result") + } else { + exitCode = int(properties.ExitCode) + } + } + } + } + } + log.G(ctx).WithField("exitCode", exitCode).Debug("process exited") + + process.closedWaitOnce.Do(func() { + process.exitCode = exitCode + process.waitError = err + close(process.waitBlock) + }) + oc.SetSpanStatus(span, err) +} + +// Wait waits for the process to exit. If the process has already exited returns +// the pervious error (if any). +func (process *Process) Wait() error { + <-process.waitBlock + return process.waitError +} + +// ResizeConsole resizes the console of the process. +func (process *Process) ResizeConsole(ctx context.Context, width, height uint16) error { + process.handleLock.RLock() + defer process.handleLock.RUnlock() + + operation := "hcs::Process::ResizeConsole" + + if process.handle == 0 { + return makeProcessError(process, operation, ErrAlreadyClosed, nil) + } + + modifyRequest := processModifyRequest{ + Operation: modifyConsoleSize, + ConsoleSize: &consoleSize{ + Height: height, + Width: width, + }, + } + + modifyRequestb, err := json.Marshal(modifyRequest) + if err != nil { + return err + } + + resultJSON, err := vmcompute.HcsModifyProcess(ctx, process.handle, string(modifyRequestb)) + events := processHcsResult(ctx, resultJSON) + if err != nil { + return makeProcessError(process, operation, err, events) + } + + return nil +} + +// ExitCode returns the exit code of the process. The process must have +// already terminated. +func (process *Process) ExitCode() (int, error) { + select { + case <-process.waitBlock: + if process.waitError != nil { + return -1, process.waitError + } + return process.exitCode, nil + default: + return -1, makeProcessError(process, "hcs::Process::ExitCode", ErrInvalidProcessState, nil) + } +} + +// StdioLegacy returns the stdin, stdout, and stderr pipes, respectively. Closing +// these pipes does not close the underlying pipes. Once returned, these pipes +// are the responsibility of the caller to close. +func (process *Process) StdioLegacy() (_ io.WriteCloser, _ io.ReadCloser, _ io.ReadCloser, err error) { + operation := "hcs::Process::StdioLegacy" + ctx, span := trace.StartSpan(context.Background(), operation) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("cid", process.SystemID()), + trace.Int64Attribute("pid", int64(process.processID))) + + process.handleLock.RLock() + defer process.handleLock.RUnlock() + + if process.handle == 0 { + return nil, nil, nil, makeProcessError(process, operation, ErrAlreadyClosed, nil) + } + + process.stdioLock.Lock() + defer process.stdioLock.Unlock() + if process.hasCachedStdio { + stdin, stdout, stderr := process.stdin, process.stdout, process.stderr + process.stdin, process.stdout, process.stderr = nil, nil, nil + process.hasCachedStdio = false + return stdin, stdout, stderr, nil + } + + processInfo, resultJSON, err := vmcompute.HcsGetProcessInfo(ctx, process.handle) + events := processHcsResult(ctx, resultJSON) + if err != nil { + return nil, nil, nil, makeProcessError(process, operation, err, events) + } + + pipes, err := makeOpenFiles([]syscall.Handle{processInfo.StdInput, processInfo.StdOutput, processInfo.StdError}) + if err != nil { + return nil, nil, nil, makeProcessError(process, operation, err, nil) + } + + return pipes[0], pipes[1], pipes[2], nil +} + +// Stdio returns the stdin, stdout, and stderr pipes, respectively. +// To close them, close the process handle. +func (process *Process) Stdio() (stdin io.Writer, stdout, stderr io.Reader) { + process.stdioLock.Lock() + defer process.stdioLock.Unlock() + return process.stdin, process.stdout, process.stderr +} + +// CloseStdin closes the write side of the stdin pipe so that the process is +// notified on the read side that there is no more data in stdin. +func (process *Process) CloseStdin(ctx context.Context) error { + process.handleLock.RLock() + defer process.handleLock.RUnlock() + + operation := "hcs::Process::CloseStdin" + + if process.handle == 0 { + return makeProcessError(process, operation, ErrAlreadyClosed, nil) + } + + modifyRequest := processModifyRequest{ + Operation: modifyCloseHandle, + CloseHandle: &closeHandle{ + Handle: stdIn, + }, + } + + modifyRequestb, err := json.Marshal(modifyRequest) + if err != nil { + return err + } + + resultJSON, err := vmcompute.HcsModifyProcess(ctx, process.handle, string(modifyRequestb)) + events := processHcsResult(ctx, resultJSON) + if err != nil { + return makeProcessError(process, operation, err, events) + } + + process.stdioLock.Lock() + if process.stdin != nil { + process.stdin.Close() + process.stdin = nil + } + process.stdioLock.Unlock() + + return nil +} + +func (process *Process) CloseStdout(ctx context.Context) (err error) { + ctx, span := trace.StartSpan(ctx, "hcs::Process::CloseStdout") //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("cid", process.SystemID()), + trace.Int64Attribute("pid", int64(process.processID))) + + process.handleLock.Lock() + defer process.handleLock.Unlock() + + if process.handle == 0 { + return nil + } + + process.stdioLock.Lock() + defer process.stdioLock.Unlock() + if process.stdout != nil { + process.stdout.Close() + process.stdout = nil + } + return nil +} + +func (process *Process) CloseStderr(ctx context.Context) (err error) { + ctx, span := trace.StartSpan(ctx, "hcs::Process::CloseStderr") //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("cid", process.SystemID()), + trace.Int64Attribute("pid", int64(process.processID))) + + process.handleLock.Lock() + defer process.handleLock.Unlock() + + if process.handle == 0 { + return nil + } + + process.stdioLock.Lock() + defer process.stdioLock.Unlock() + if process.stderr != nil { + process.stderr.Close() + process.stderr = nil + + } + return nil +} + +// Close cleans up any state associated with the process but does not kill +// or wait on it. +func (process *Process) Close() (err error) { + operation := "hcs::Process::Close" + ctx, span := trace.StartSpan(context.Background(), operation) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("cid", process.SystemID()), + trace.Int64Attribute("pid", int64(process.processID))) + + process.handleLock.Lock() + defer process.handleLock.Unlock() + + // Don't double free this + if process.handle == 0 { + return nil + } + + process.stdioLock.Lock() + if process.stdin != nil { + process.stdin.Close() + process.stdin = nil + } + if process.stdout != nil { + process.stdout.Close() + process.stdout = nil + } + if process.stderr != nil { + process.stderr.Close() + process.stderr = nil + } + process.stdioLock.Unlock() + + if err = process.unregisterCallback(ctx); err != nil { + return makeProcessError(process, operation, err, nil) + } + + if err = vmcompute.HcsCloseProcess(ctx, process.handle); err != nil { + return makeProcessError(process, operation, err, nil) + } + + process.handle = 0 + process.closedWaitOnce.Do(func() { + process.exitCode = -1 + process.waitError = ErrAlreadyClosed + close(process.waitBlock) + }) + + return nil +} + +func (process *Process) registerCallback(ctx context.Context) error { + callbackContext := ¬ificationWatcherContext{ + channels: newProcessChannels(), + systemID: process.SystemID(), + processID: process.processID, + } + + callbackMapLock.Lock() + callbackNumber := nextCallback + nextCallback++ + callbackMap[callbackNumber] = callbackContext + callbackMapLock.Unlock() + + callbackHandle, err := vmcompute.HcsRegisterProcessCallback(ctx, process.handle, notificationWatcherCallback, callbackNumber) + if err != nil { + return err + } + callbackContext.handle = callbackHandle + process.callbackNumber = callbackNumber + + return nil +} + +func (process *Process) unregisterCallback(ctx context.Context) error { + callbackNumber := process.callbackNumber + + callbackMapLock.RLock() + callbackContext := callbackMap[callbackNumber] + callbackMapLock.RUnlock() + + if callbackContext == nil { + return nil + } + + handle := callbackContext.handle + + if handle == 0 { + return nil + } + + // vmcompute.HcsUnregisterProcessCallback has its own synchronization to + // wait for all callbacks to complete. We must NOT hold the callbackMapLock. + err := vmcompute.HcsUnregisterProcessCallback(ctx, handle) + if err != nil { + return err + } + + closeChannels(callbackContext.channels) + + callbackMapLock.Lock() + delete(callbackMap, callbackNumber) + callbackMapLock.Unlock() + + handle = 0 //nolint:ineffassign + + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema1/schema1.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema1/schema1.go new file mode 100644 index 00000000000..b621c559388 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema1/schema1.go @@ -0,0 +1,250 @@ +package schema1 + +import ( + "encoding/json" + "time" + + "github.com/Microsoft/go-winio/pkg/guid" + hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" +) + +// ProcessConfig is used as both the input of Container.CreateProcess +// and to convert the parameters to JSON for passing onto the HCS +type ProcessConfig struct { + ApplicationName string `json:",omitempty"` + CommandLine string `json:",omitempty"` + CommandArgs []string `json:",omitempty"` // Used by Linux Containers on Windows + User string `json:",omitempty"` + WorkingDirectory string `json:",omitempty"` + Environment map[string]string `json:",omitempty"` + EmulateConsole bool `json:",omitempty"` + CreateStdInPipe bool `json:",omitempty"` + CreateStdOutPipe bool `json:",omitempty"` + CreateStdErrPipe bool `json:",omitempty"` + ConsoleSize [2]uint `json:",omitempty"` + CreateInUtilityVm bool `json:",omitempty"` // Used by Linux Containers on Windows + OCISpecification *json.RawMessage `json:",omitempty"` // Used by Linux Containers on Windows +} + +type Layer struct { + ID string + Path string +} + +type MappedDir struct { + HostPath string + ContainerPath string + ReadOnly bool + BandwidthMaximum uint64 + IOPSMaximum uint64 + CreateInUtilityVM bool + // LinuxMetadata - Support added in 1803/RS4+. + LinuxMetadata bool `json:",omitempty"` +} + +type MappedPipe struct { + HostPath string + ContainerPipeName string +} + +type HvRuntime struct { + ImagePath string `json:",omitempty"` + SkipTemplate bool `json:",omitempty"` + LinuxInitrdFile string `json:",omitempty"` // File under ImagePath on host containing an initrd image for starting a Linux utility VM + LinuxKernelFile string `json:",omitempty"` // File under ImagePath on host containing a kernel for starting a Linux utility VM + LinuxBootParameters string `json:",omitempty"` // Additional boot parameters for starting a Linux Utility VM in initrd mode + BootSource string `json:",omitempty"` // "Vhd" for Linux Utility VM booting from VHD + WritableBootSource bool `json:",omitempty"` // Linux Utility VM booting from VHD +} + +type MappedVirtualDisk struct { + HostPath string `json:",omitempty"` // Path to VHD on the host + ContainerPath string // Platform-specific mount point path in the container + CreateInUtilityVM bool `json:",omitempty"` + ReadOnly bool `json:",omitempty"` + Cache string `json:",omitempty"` // "" (Unspecified); "Disabled"; "Enabled"; "Private"; "PrivateAllowSharing" + AttachOnly bool `json:",omitempty"` +} + +// AssignedDevice represents a device that has been directly assigned to a container +// +// NOTE: Support added in RS5 +type AssignedDevice struct { + // InterfaceClassGUID of the device to assign to container. + InterfaceClassGUID string `json:"InterfaceClassGuid,omitempty"` +} + +// ContainerConfig is used as both the input of CreateContainer +// and to convert the parameters to JSON for passing onto the HCS +type ContainerConfig struct { + SystemType string // HCS requires this to be hard-coded to "Container" + Name string // Name of the container. We use the docker ID. + Owner string `json:",omitempty"` // The management platform that created this container + VolumePath string `json:",omitempty"` // Windows volume path for scratch space. Used by Windows Server Containers only. Format \\?\\Volume{GUID} + IgnoreFlushesDuringBoot bool `json:",omitempty"` // Optimization hint for container startup in Windows + LayerFolderPath string `json:",omitempty"` // Where the layer folders are located. Used by Windows Server Containers only. Format %root%\windowsfilter\containerID + Layers []Layer // List of storage layers. Required for Windows Server and Hyper-V Containers. Format ID=GUID;Path=%root%\windowsfilter\layerID + Credentials string `json:",omitempty"` // Credentials information + ProcessorCount uint32 `json:",omitempty"` // Number of processors to assign to the container. + ProcessorWeight uint64 `json:",omitempty"` // CPU shares (relative weight to other containers with cpu shares). Range is from 1 to 10000. A value of 0 results in default shares. + ProcessorMaximum int64 `json:",omitempty"` // Specifies the portion of processor cycles that this container can use as a percentage times 100. Range is from 1 to 10000. A value of 0 results in no limit. + StorageIOPSMaximum uint64 `json:",omitempty"` // Maximum Storage IOPS + StorageBandwidthMaximum uint64 `json:",omitempty"` // Maximum Storage Bandwidth in bytes per second + StorageSandboxSize uint64 `json:",omitempty"` // Size in bytes that the container system drive should be expanded to if smaller + MemoryMaximumInMB int64 `json:",omitempty"` // Maximum memory available to the container in Megabytes + HostName string `json:",omitempty"` // Hostname + MappedDirectories []MappedDir `json:",omitempty"` // List of mapped directories (volumes/mounts) + MappedPipes []MappedPipe `json:",omitempty"` // List of mapped Windows named pipes + HvPartition bool // True if it a Hyper-V Container + NetworkSharedContainerName string `json:",omitempty"` // Name (ID) of the container that we will share the network stack with. + EndpointList []string `json:",omitempty"` // List of networking endpoints to be attached to container + HvRuntime *HvRuntime `json:",omitempty"` // Hyper-V container settings. Used by Hyper-V containers only. Format ImagePath=%root%\BaseLayerID\UtilityVM + Servicing bool `json:",omitempty"` // True if this container is for servicing + AllowUnqualifiedDNSQuery bool `json:",omitempty"` // True to allow unqualified DNS name resolution + DNSSearchList string `json:",omitempty"` // Comma seperated list of DNS suffixes to use for name resolution + ContainerType string `json:",omitempty"` // "Linux" for Linux containers on Windows. Omitted otherwise. + TerminateOnLastHandleClosed bool `json:",omitempty"` // Should HCS terminate the container once all handles have been closed + MappedVirtualDisks []MappedVirtualDisk `json:",omitempty"` // Array of virtual disks to mount at start + AssignedDevices []AssignedDevice `json:",omitempty"` // Array of devices to assign. NOTE: Support added in RS5 +} + +type ComputeSystemQuery struct { + IDs []string `json:"Ids,omitempty"` + Types []string `json:",omitempty"` + Names []string `json:",omitempty"` + Owners []string `json:",omitempty"` +} + +type PropertyType string + +const ( + PropertyTypeStatistics PropertyType = "Statistics" // V1 and V2 + PropertyTypeProcessList PropertyType = "ProcessList" // V1 and V2 + PropertyTypeMappedVirtualDisk PropertyType = "MappedVirtualDisk" // Not supported in V2 schema call + PropertyTypeGuestConnection PropertyType = "GuestConnection" // V1 and V2. Nil return from HCS before RS5 +) + +type PropertyQuery struct { + PropertyTypes []PropertyType `json:",omitempty"` +} + +// ContainerProperties holds the properties for a container and the processes running in that container +type ContainerProperties struct { + ID string `json:"Id"` + State string + Name string + SystemType string + RuntimeOSType string `json:"RuntimeOsType,omitempty"` + Owner string + SiloGUID string `json:"SiloGuid,omitempty"` + RuntimeID guid.GUID `json:"RuntimeId,omitempty"` + IsRuntimeTemplate bool `json:",omitempty"` + RuntimeImagePath string `json:",omitempty"` + Stopped bool `json:",omitempty"` + ExitType string `json:",omitempty"` + AreUpdatesPending bool `json:",omitempty"` + ObRoot string `json:",omitempty"` + Statistics Statistics `json:",omitempty"` + ProcessList []ProcessListItem `json:",omitempty"` + MappedVirtualDiskControllers map[int]MappedVirtualDiskController `json:",omitempty"` + GuestConnectionInfo GuestConnectionInfo `json:",omitempty"` +} + +// MemoryStats holds the memory statistics for a container +type MemoryStats struct { + UsageCommitBytes uint64 `json:"MemoryUsageCommitBytes,omitempty"` + UsageCommitPeakBytes uint64 `json:"MemoryUsageCommitPeakBytes,omitempty"` + UsagePrivateWorkingSetBytes uint64 `json:"MemoryUsagePrivateWorkingSetBytes,omitempty"` +} + +// ProcessorStats holds the processor statistics for a container +type ProcessorStats struct { + TotalRuntime100ns uint64 `json:",omitempty"` + RuntimeUser100ns uint64 `json:",omitempty"` + RuntimeKernel100ns uint64 `json:",omitempty"` +} + +// StorageStats holds the storage statistics for a container +type StorageStats struct { + ReadCountNormalized uint64 `json:",omitempty"` + ReadSizeBytes uint64 `json:",omitempty"` + WriteCountNormalized uint64 `json:",omitempty"` + WriteSizeBytes uint64 `json:",omitempty"` +} + +// NetworkStats holds the network statistics for a container +type NetworkStats struct { + BytesReceived uint64 `json:",omitempty"` + BytesSent uint64 `json:",omitempty"` + PacketsReceived uint64 `json:",omitempty"` + PacketsSent uint64 `json:",omitempty"` + DroppedPacketsIncoming uint64 `json:",omitempty"` + DroppedPacketsOutgoing uint64 `json:",omitempty"` + EndpointId string `json:",omitempty"` + InstanceId string `json:",omitempty"` +} + +// Statistics is the structure returned by a statistics call on a container +type Statistics struct { + Timestamp time.Time `json:",omitempty"` + ContainerStartTime time.Time `json:",omitempty"` + Uptime100ns uint64 `json:",omitempty"` + Memory MemoryStats `json:",omitempty"` + Processor ProcessorStats `json:",omitempty"` + Storage StorageStats `json:",omitempty"` + Network []NetworkStats `json:",omitempty"` +} + +// ProcessList is the structure of an item returned by a ProcessList call on a container +type ProcessListItem struct { + CreateTimestamp time.Time `json:",omitempty"` + ImageName string `json:",omitempty"` + KernelTime100ns uint64 `json:",omitempty"` + MemoryCommitBytes uint64 `json:",omitempty"` + MemoryWorkingSetPrivateBytes uint64 `json:",omitempty"` + MemoryWorkingSetSharedBytes uint64 `json:",omitempty"` + ProcessId uint32 `json:",omitempty"` + UserTime100ns uint64 `json:",omitempty"` +} + +// MappedVirtualDiskController is the structure of an item returned by a MappedVirtualDiskList call on a container +type MappedVirtualDiskController struct { + MappedVirtualDisks map[int]MappedVirtualDisk `json:",omitempty"` +} + +// GuestDefinedCapabilities is part of the GuestConnectionInfo returned by a GuestConnection call on a utility VM +type GuestDefinedCapabilities struct { + NamespaceAddRequestSupported bool `json:",omitempty"` + SignalProcessSupported bool `json:",omitempty"` + DumpStacksSupported bool `json:",omitempty"` + DeleteContainerStateSupported bool `json:",omitempty"` + UpdateContainerSupported bool `json:",omitempty"` +} + +// GuestConnectionInfo is the structure of an iterm return by a GuestConnection call on a utility VM +type GuestConnectionInfo struct { + SupportedSchemaVersions []hcsschema.Version `json:",omitempty"` + ProtocolVersion uint32 `json:",omitempty"` + GuestDefinedCapabilities GuestDefinedCapabilities `json:",omitempty"` +} + +// Type of Request Support in ModifySystem +type RequestType string + +// Type of Resource Support in ModifySystem +type ResourceType string + +// RequestType const +const ( + Add RequestType = "Add" + Remove RequestType = "Remove" + Network ResourceType = "Network" +) + +// ResourceModificationRequestResponse is the structure used to send request to the container to modify the system +// Supported resource types are Network and Request Types are Add/Remove +type ResourceModificationRequestResponse struct { + Resource ResourceType `json:"ResourceType"` + Data interface{} `json:"Settings"` + Request RequestType `json:"RequestType,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/attachment.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/attachment.go new file mode 100644 index 00000000000..70884aad75f --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/attachment.go @@ -0,0 +1,36 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Attachment struct { + Type_ string `json:"Type,omitempty"` + + Path string `json:"Path,omitempty"` + + IgnoreFlushes bool `json:"IgnoreFlushes,omitempty"` + + CachingMode string `json:"CachingMode,omitempty"` + + NoWriteHardening bool `json:"NoWriteHardening,omitempty"` + + DisableExpansionOptimization bool `json:"DisableExpansionOptimization,omitempty"` + + IgnoreRelativeLocator bool `json:"IgnoreRelativeLocator,omitempty"` + + CaptureIoAttributionContext bool `json:"CaptureIoAttributionContext,omitempty"` + + ReadOnly bool `json:"ReadOnly,omitempty"` + + SupportCompressedVolumes bool `json:"SupportCompressedVolumes,omitempty"` + + AlwaysAllowSparseFiles bool `json:"AlwaysAllowSparseFiles,omitempty"` + + ExtensibleVirtualDiskType string `json:"ExtensibleVirtualDiskType,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/battery.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/battery.go new file mode 100644 index 00000000000..ecbbed4c233 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/battery.go @@ -0,0 +1,13 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Battery struct { +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cache_query_stats_response.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cache_query_stats_response.go new file mode 100644 index 00000000000..c1ea3953b58 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cache_query_stats_response.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type CacheQueryStatsResponse struct { + L3OccupancyBytes int32 `json:"L3OccupancyBytes,omitempty"` + + L3TotalBwBytes int32 `json:"L3TotalBwBytes,omitempty"` + + L3LocalBwBytes int32 `json:"L3LocalBwBytes,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/chipset.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/chipset.go new file mode 100644 index 00000000000..ca75277a3f2 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/chipset.go @@ -0,0 +1,27 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Chipset struct { + Uefi *Uefi `json:"Uefi,omitempty"` + + IsNumLockDisabled bool `json:"IsNumLockDisabled,omitempty"` + + BaseBoardSerialNumber string `json:"BaseBoardSerialNumber,omitempty"` + + ChassisSerialNumber string `json:"ChassisSerialNumber,omitempty"` + + ChassisAssetTag string `json:"ChassisAssetTag,omitempty"` + + UseUtc bool `json:"UseUtc,omitempty"` + + // LinuxKernelDirect - Added in v2.2 Builds >=181117 + LinuxKernelDirect *LinuxKernelDirect `json:"LinuxKernelDirect,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/close_handle.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/close_handle.go new file mode 100644 index 00000000000..b4f9c315b05 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/close_handle.go @@ -0,0 +1,14 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type CloseHandle struct { + Handle string `json:"Handle,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/com_port.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/com_port.go new file mode 100644 index 00000000000..8bf8cab60e5 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/com_port.go @@ -0,0 +1,17 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// ComPort specifies the named pipe that will be used for the port, with empty string indicating a disconnected port. +type ComPort struct { + NamedPipe string `json:"NamedPipe,omitempty"` + + OptimizeForDebugger bool `json:"OptimizeForDebugger,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/compute_system.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/compute_system.go new file mode 100644 index 00000000000..10cea67e042 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/compute_system.go @@ -0,0 +1,26 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ComputeSystem struct { + Owner string `json:"Owner,omitempty"` + + SchemaVersion *Version `json:"SchemaVersion,omitempty"` + + HostingSystemId string `json:"HostingSystemId,omitempty"` + + HostedSystem interface{} `json:"HostedSystem,omitempty"` + + Container *Container `json:"Container,omitempty"` + + VirtualMachine *VirtualMachine `json:"VirtualMachine,omitempty"` + + ShouldTerminateOnLastHandleClosed bool `json:"ShouldTerminateOnLastHandleClosed,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/configuration.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/configuration.go new file mode 100644 index 00000000000..1d5dfe68ad6 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/configuration.go @@ -0,0 +1,72 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +import ( + "net/http" +) + +// contextKeys are used to identify the type of value in the context. +// Since these are string, it is possible to get a short description of the +// context key for logging and debugging using key.String(). + +type contextKey string + +func (c contextKey) String() string { + return "auth " + string(c) +} + +var ( + // ContextOAuth2 takes a oauth2.TokenSource as authentication for the request. + ContextOAuth2 = contextKey("token") + + // ContextBasicAuth takes BasicAuth as authentication for the request. + ContextBasicAuth = contextKey("basic") + + // ContextAccessToken takes a string oauth2 access token as authentication for the request. + ContextAccessToken = contextKey("accesstoken") + + // ContextAPIKey takes an APIKey as authentication for the request + ContextAPIKey = contextKey("apikey") +) + +// BasicAuth provides basic http authentication to a request passed via context using ContextBasicAuth +type BasicAuth struct { + UserName string `json:"userName,omitempty"` + Password string `json:"password,omitempty"` +} + +// APIKey provides API key based authentication to a request passed via context using ContextAPIKey +type APIKey struct { + Key string + Prefix string +} + +type Configuration struct { + BasePath string `json:"basePath,omitempty"` + Host string `json:"host,omitempty"` + Scheme string `json:"scheme,omitempty"` + DefaultHeader map[string]string `json:"defaultHeader,omitempty"` + UserAgent string `json:"userAgent,omitempty"` + HTTPClient *http.Client +} + +func NewConfiguration() *Configuration { + cfg := &Configuration{ + BasePath: "https://localhost", + DefaultHeader: make(map[string]string), + UserAgent: "Swagger-Codegen/2.1.0/go", + } + return cfg +} + +func (c *Configuration) AddDefaultHeader(key string, value string) { + c.DefaultHeader[key] = value +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/console_size.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/console_size.go new file mode 100644 index 00000000000..68aa04a573e --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/console_size.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ConsoleSize struct { + Height int32 `json:"Height,omitempty"` + + Width int32 `json:"Width,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container.go new file mode 100644 index 00000000000..39a54432c02 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container.go @@ -0,0 +1,36 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Container struct { + GuestOs *GuestOs `json:"GuestOs,omitempty"` + + Storage *Storage `json:"Storage,omitempty"` + + MappedDirectories []MappedDirectory `json:"MappedDirectories,omitempty"` + + MappedPipes []MappedPipe `json:"MappedPipes,omitempty"` + + Memory *Memory `json:"Memory,omitempty"` + + Processor *Processor `json:"Processor,omitempty"` + + Networking *Networking `json:"Networking,omitempty"` + + HvSocket *HvSocket `json:"HvSocket,omitempty"` + + ContainerCredentialGuard *ContainerCredentialGuardState `json:"ContainerCredentialGuard,omitempty"` + + RegistryChanges *RegistryChanges `json:"RegistryChanges,omitempty"` + + AssignedDevices []Device `json:"AssignedDevices,omitempty"` + + AdditionalDeviceNamespace *ContainerDefinitionDevice `json:"AdditionalDeviceNamespace,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_add_instance_request.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_add_instance_request.go new file mode 100644 index 00000000000..495c6ebc8f4 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_add_instance_request.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ContainerCredentialGuardAddInstanceRequest struct { + Id string `json:"Id,omitempty"` + CredentialSpec string `json:"CredentialSpec,omitempty"` + Transport string `json:"Transport,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_hv_socket_service_config.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_hv_socket_service_config.go new file mode 100644 index 00000000000..1ed4c008f25 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_hv_socket_service_config.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ContainerCredentialGuardHvSocketServiceConfig struct { + ServiceId string `json:"ServiceId,omitempty"` + ServiceConfig *HvSocketServiceConfig `json:"ServiceConfig,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_instance.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_instance.go new file mode 100644 index 00000000000..d7ebd0fcca1 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_instance.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ContainerCredentialGuardInstance struct { + Id string `json:"Id,omitempty"` + CredentialGuard *ContainerCredentialGuardState `json:"CredentialGuard,omitempty"` + HvSocketConfig *ContainerCredentialGuardHvSocketServiceConfig `json:"HvSocketConfig,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_modify_operation.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_modify_operation.go new file mode 100644 index 00000000000..71005b090be --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_modify_operation.go @@ -0,0 +1,17 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ContainerCredentialGuardModifyOperation string + +const ( + AddInstance ContainerCredentialGuardModifyOperation = "AddInstance" + RemoveInstance ContainerCredentialGuardModifyOperation = "RemoveInstance" +) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_operation_request.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_operation_request.go new file mode 100644 index 00000000000..952cda4965c --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_operation_request.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ContainerCredentialGuardOperationRequest struct { + Operation ContainerCredentialGuardModifyOperation `json:"Operation,omitempty"` + OperationDetails interface{} `json:"OperationDetails,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_remove_instance_request.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_remove_instance_request.go new file mode 100644 index 00000000000..32e5a3beed1 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_remove_instance_request.go @@ -0,0 +1,14 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ContainerCredentialGuardRemoveInstanceRequest struct { + Id string `json:"Id,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_state.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_state.go new file mode 100644 index 00000000000..0f8f644379c --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_state.go @@ -0,0 +1,25 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ContainerCredentialGuardState struct { + + // Authentication cookie for calls to a Container Credential Guard instance. + Cookie string `json:"Cookie,omitempty"` + + // Name of the RPC endpoint of the Container Credential Guard instance. + RpcEndpoint string `json:"RpcEndpoint,omitempty"` + + // Transport used for the configured Container Credential Guard instance. + Transport string `json:"Transport,omitempty"` + + // Credential spec used for the configured Container Credential Guard instance. + CredentialSpec string `json:"CredentialSpec,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_system_info.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_system_info.go new file mode 100644 index 00000000000..ea306fa21ac --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_system_info.go @@ -0,0 +1,14 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ContainerCredentialGuardSystemInfo struct { + Instances []ContainerCredentialGuardInstance `json:"Instances,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_memory_information.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_memory_information.go new file mode 100644 index 00000000000..1fd7ca5d56f --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_memory_information.go @@ -0,0 +1,25 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// memory usage as viewed from within the container +type ContainerMemoryInformation struct { + TotalPhysicalBytes int32 `json:"TotalPhysicalBytes,omitempty"` + + TotalUsage int32 `json:"TotalUsage,omitempty"` + + CommittedBytes int32 `json:"CommittedBytes,omitempty"` + + SharedCommittedBytes int32 `json:"SharedCommittedBytes,omitempty"` + + CommitLimitBytes int32 `json:"CommitLimitBytes,omitempty"` + + PeakCommitmentBytes int32 `json:"PeakCommitmentBytes,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group.go new file mode 100644 index 00000000000..90332a5190f --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// CPU groups allow Hyper-V administrators to better manage and allocate the host's CPU resources across guest virtual machines +type CpuGroup struct { + Id string `json:"Id,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_affinity.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_affinity.go new file mode 100644 index 00000000000..8794961bf5c --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_affinity.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type CpuGroupAffinity struct { + LogicalProcessorCount int32 `json:"LogicalProcessorCount,omitempty"` + LogicalProcessors []int32 `json:"LogicalProcessors,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_config.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_config.go new file mode 100644 index 00000000000..0be0475d41a --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_config.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type CpuGroupConfig struct { + GroupId string `json:"GroupId,omitempty"` + Affinity *CpuGroupAffinity `json:"Affinity,omitempty"` + GroupProperties []CpuGroupProperty `json:"GroupProperties,omitempty"` + // Hypervisor CPU group IDs exposed to clients + HypervisorGroupId uint64 `json:"HypervisorGroupId,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_configurations.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_configurations.go new file mode 100644 index 00000000000..3ace0ccc3b8 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_configurations.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// Structure used to return cpu groups for a Service property query +type CpuGroupConfigurations struct { + CpuGroups []CpuGroupConfig `json:"CpuGroups,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_operations.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_operations.go new file mode 100644 index 00000000000..7d897807016 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_operations.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type CPUGroupOperation string + +const ( + CreateGroup CPUGroupOperation = "CreateGroup" + DeleteGroup CPUGroupOperation = "DeleteGroup" + SetProperty CPUGroupOperation = "SetProperty" +) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_property.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_property.go new file mode 100644 index 00000000000..bbad6a2c450 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_property.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type CpuGroupProperty struct { + PropertyCode uint32 `json:"PropertyCode,omitempty"` + PropertyValue uint32 `json:"PropertyValue,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/create_group_operation.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/create_group_operation.go new file mode 100644 index 00000000000..91a8278fe3c --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/create_group_operation.go @@ -0,0 +1,17 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// Create group operation settings +type CreateGroupOperation struct { + GroupId string `json:"GroupId,omitempty"` + LogicalProcessorCount uint32 `json:"LogicalProcessorCount,omitempty"` + LogicalProcessors []uint32 `json:"LogicalProcessors,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/delete_group_operation.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/delete_group_operation.go new file mode 100644 index 00000000000..134bd988175 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/delete_group_operation.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// Delete group operation settings +type DeleteGroupOperation struct { + GroupId string `json:"GroupId,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/device.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/device.go new file mode 100644 index 00000000000..31c4538affc --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/device.go @@ -0,0 +1,27 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type DeviceType string + +const ( + ClassGUID DeviceType = "ClassGuid" + DeviceInstanceID DeviceType = "DeviceInstance" + GPUMirror DeviceType = "GpuMirror" +) + +type Device struct { + // The type of device to assign to the container. + Type DeviceType `json:"Type,omitempty"` + // The interface class guid of the device interfaces to assign to the container. Only used when Type is ClassGuid. + InterfaceClassGuid string `json:"InterfaceClassGuid,omitempty"` + // The location path of the device to assign to the container. Only used when Type is DeviceInstanceID. + LocationPath string `json:"LocationPath,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/devices.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/devices.go new file mode 100644 index 00000000000..e985d96d228 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/devices.go @@ -0,0 +1,46 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Devices struct { + ComPorts map[string]ComPort `json:"ComPorts,omitempty"` + + Scsi map[string]Scsi `json:"Scsi,omitempty"` + + VirtualPMem *VirtualPMemController `json:"VirtualPMem,omitempty"` + + NetworkAdapters map[string]NetworkAdapter `json:"NetworkAdapters,omitempty"` + + VideoMonitor *VideoMonitor `json:"VideoMonitor,omitempty"` + + Keyboard *Keyboard `json:"Keyboard,omitempty"` + + Mouse *Mouse `json:"Mouse,omitempty"` + + HvSocket *HvSocket2 `json:"HvSocket,omitempty"` + + EnhancedModeVideo *EnhancedModeVideo `json:"EnhancedModeVideo,omitempty"` + + GuestCrashReporting *GuestCrashReporting `json:"GuestCrashReporting,omitempty"` + + VirtualSmb *VirtualSmb `json:"VirtualSmb,omitempty"` + + Plan9 *Plan9 `json:"Plan9,omitempty"` + + Battery *Battery `json:"Battery,omitempty"` + + FlexibleIov map[string]FlexibleIoDevice `json:"FlexibleIov,omitempty"` + + SharedMemory *SharedMemoryConfiguration `json:"SharedMemory,omitempty"` + + // TODO: This is pre-release support in schema 2.3. Need to add build number + // docs when a public build with this is out. + VirtualPci map[string]VirtualPciDevice `json:",omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/enhanced_mode_video.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/enhanced_mode_video.go new file mode 100644 index 00000000000..85450c41e10 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/enhanced_mode_video.go @@ -0,0 +1,14 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type EnhancedModeVideo struct { + ConnectionOptions *RdpConnectionOptions `json:"ConnectionOptions,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/flexible_io_device.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/flexible_io_device.go new file mode 100644 index 00000000000..fe86cab6556 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/flexible_io_device.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type FlexibleIoDevice struct { + EmulatorId string `json:"EmulatorId,omitempty"` + + HostingModel string `json:"HostingModel,omitempty"` + + Configuration []string `json:"Configuration,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_connection.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_connection.go new file mode 100644 index 00000000000..7db29495b3e --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_connection.go @@ -0,0 +1,19 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type GuestConnection struct { + + // Use Vsock rather than Hyper-V sockets to communicate with the guest service. + UseVsock bool `json:"UseVsock,omitempty"` + + // Don't disconnect the guest connection when pausing the virtual machine. + UseConnectedSuspend bool `json:"UseConnectedSuspend,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_connection_info.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_connection_info.go new file mode 100644 index 00000000000..8a369bab71c --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_connection_info.go @@ -0,0 +1,21 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// Information about the guest. +type GuestConnectionInfo struct { + + // Each schema version x.y stands for the range of versions a.b where a==x and b<=y. This list comes from the SupportedSchemaVersions field in GcsCapabilities. + SupportedSchemaVersions []Version `json:"SupportedSchemaVersions,omitempty"` + + ProtocolVersion int32 `json:"ProtocolVersion,omitempty"` + + GuestDefinedCapabilities *interface{} `json:"GuestDefinedCapabilities,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_crash_reporting.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_crash_reporting.go new file mode 100644 index 00000000000..af828004835 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_crash_reporting.go @@ -0,0 +1,14 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type GuestCrashReporting struct { + WindowsCrashSettings *WindowsCrashReporting `json:"WindowsCrashSettings,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_os.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_os.go new file mode 100644 index 00000000000..8838519a39c --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_os.go @@ -0,0 +1,14 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type GuestOs struct { + HostName string `json:"HostName,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_state.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_state.go new file mode 100644 index 00000000000..ef1eec88656 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_state.go @@ -0,0 +1,22 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type GuestState struct { + + // The path to an existing file uses for persistent guest state storage. An empty string indicates the system should initialize new transient, in-memory guest state. + GuestStateFilePath string `json:"GuestStateFilePath,omitempty"` + + // The path to an existing file for persistent runtime state storage. An empty string indicates the system should initialize new transient, in-memory runtime state. + RuntimeStateFilePath string `json:"RuntimeStateFilePath,omitempty"` + + // If true, the guest state and runtime state files will be used as templates to populate transient, in-memory state instead of using the files as persistent backing store. + ForceTransientState bool `json:"ForceTransientState,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/host_processor_modify_request.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/host_processor_modify_request.go new file mode 100644 index 00000000000..2238ce5306c --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/host_processor_modify_request.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// Structure used to request a service processor modification +type HostProcessorModificationRequest struct { + Operation CPUGroupOperation `json:"Operation,omitempty"` + OperationDetails interface{} `json:"OperationDetails,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hosted_system.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hosted_system.go new file mode 100644 index 00000000000..ea3084bca7f --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hosted_system.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type HostedSystem struct { + SchemaVersion *Version `json:"SchemaVersion,omitempty"` + + Container *Container `json:"Container,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket.go new file mode 100644 index 00000000000..23b2ee9e7d4 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type HvSocket struct { + Config *HvSocketSystemConfig `json:"Config,omitempty"` + + EnablePowerShellDirect bool `json:"EnablePowerShellDirect,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_2.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_2.go new file mode 100644 index 00000000000..a017691f02d --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_2.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// HvSocket configuration for a VM +type HvSocket2 struct { + HvSocketConfig *HvSocketSystemConfig `json:"HvSocketConfig,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_address.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_address.go new file mode 100644 index 00000000000..84c11b93ee5 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_address.go @@ -0,0 +1,17 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// This class defines address settings applied to a VM +// by the GCS every time a VM starts or restores. +type HvSocketAddress struct { + LocalAddress string `json:"LocalAddress,omitempty"` + ParentAddress string `json:"ParentAddress,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_service_config.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_service_config.go new file mode 100644 index 00000000000..ecd9f7fbac2 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_service_config.go @@ -0,0 +1,28 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type HvSocketServiceConfig struct { + + // SDDL string that HvSocket will check before allowing a host process to bind to this specific service. If not specified, defaults to the system DefaultBindSecurityDescriptor, defined in HvSocketSystemWpConfig in V1. + BindSecurityDescriptor string `json:"BindSecurityDescriptor,omitempty"` + + // SDDL string that HvSocket will check before allowing a host process to connect to this specific service. If not specified, defaults to the system DefaultConnectSecurityDescriptor, defined in HvSocketSystemWpConfig in V1. + ConnectSecurityDescriptor string `json:"ConnectSecurityDescriptor,omitempty"` + + // If true, HvSocket will process wildcard binds for this service/system combination. Wildcard binds are secured in the registry at SOFTWARE/Microsoft/Windows NT/CurrentVersion/Virtualization/HvSocket/WildcardDescriptors + AllowWildcardBinds bool `json:"AllowWildcardBinds,omitempty"` + + // Disabled controls whether the HvSocket service is accepting connection requests. + // This set to true will make the service refuse all incoming connections as well as cancel + // any connections already established. The service itself will still be active however + // and can be re-enabled at a future time. + Disabled bool `json:"Disabled,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_system_config.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_system_config.go new file mode 100644 index 00000000000..69f4f9d39b9 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_system_config.go @@ -0,0 +1,22 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// This is the HCS Schema version of the HvSocket configuration. The VMWP version is located in Config.Devices.IC in V1. +type HvSocketSystemConfig struct { + + // SDDL string that HvSocket will check before allowing a host process to bind to an unlisted service for this specific container/VM (not wildcard binds). + DefaultBindSecurityDescriptor string `json:"DefaultBindSecurityDescriptor,omitempty"` + + // SDDL string that HvSocket will check before allowing a host process to connect to an unlisted service in the VM/container. + DefaultConnectSecurityDescriptor string `json:"DefaultConnectSecurityDescriptor,omitempty"` + + ServiceTable map[string]HvSocketServiceConfig `json:"ServiceTable,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/interrupt_moderation_mode.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/interrupt_moderation_mode.go new file mode 100644 index 00000000000..a614d63bd72 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/interrupt_moderation_mode.go @@ -0,0 +1,42 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type InterruptModerationName string + +// The valid interrupt moderation modes for I/O virtualization (IOV) offloading. +const ( + DefaultName InterruptModerationName = "Default" + AdaptiveName InterruptModerationName = "Adaptive" + OffName InterruptModerationName = "Off" + LowName InterruptModerationName = "Low" + MediumName InterruptModerationName = "Medium" + HighName InterruptModerationName = "High" +) + +type InterruptModerationValue uint32 + +const ( + DefaultValue InterruptModerationValue = iota + AdaptiveValue + OffValue + LowValue InterruptModerationValue = 100 + MediumValue InterruptModerationValue = 200 + HighValue InterruptModerationValue = 300 +) + +var InterruptModerationValueToName = map[InterruptModerationValue]InterruptModerationName{ + DefaultValue: DefaultName, + AdaptiveValue: AdaptiveName, + OffValue: OffName, + LowValue: LowName, + MediumValue: MediumName, + HighValue: HighName, +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/iov_settings.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/iov_settings.go new file mode 100644 index 00000000000..2a55cc37cd3 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/iov_settings.go @@ -0,0 +1,22 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type IovSettings struct { + // The weight assigned to this port for I/O virtualization (IOV) offloading. + // Setting this to 0 disables IOV offloading. + OffloadWeight *uint32 `json:"OffloadWeight,omitempty"` + + // The number of queue pairs requested for this port for I/O virtualization (IOV) offloading. + QueuePairsRequested *uint32 `json:"QueuePairsRequested,omitempty"` + + // The interrupt moderation mode for I/O virtualization (IOV) offloading. + InterruptModeration *InterruptModerationName `json:"InterruptModeration,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/keyboard.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/keyboard.go new file mode 100644 index 00000000000..3d3fa3b1c73 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/keyboard.go @@ -0,0 +1,13 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Keyboard struct { +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/layer.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/layer.go new file mode 100644 index 00000000000..176c49d4959 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/layer.go @@ -0,0 +1,21 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Layer struct { + Id string `json:"Id,omitempty"` + + Path string `json:"Path,omitempty"` + + PathType string `json:"PathType,omitempty"` + + // Unspecified defaults to Enabled + Cache string `json:"Cache,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/linux_kernel_direct.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/linux_kernel_direct.go new file mode 100644 index 00000000000..0ab6c280fc8 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/linux_kernel_direct.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.2 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type LinuxKernelDirect struct { + KernelFilePath string `json:"KernelFilePath,omitempty"` + + InitRdPath string `json:"InitRdPath,omitempty"` + + KernelCmdLine string `json:"KernelCmdLine,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/logical_processor.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/logical_processor.go new file mode 100644 index 00000000000..2e3aa5e1750 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/logical_processor.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type LogicalProcessor struct { + LpIndex uint32 `json:"LpIndex,omitempty"` + NodeNumber uint8 `json:"NodeNumber,omitempty"` + PackageId uint32 `json:"PackageId,omitempty"` + CoreId uint32 `json:"CoreId,omitempty"` + RootVpIndex int32 `json:"RootVpIndex,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mapped_directory.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mapped_directory.go new file mode 100644 index 00000000000..9b86a40457f --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mapped_directory.go @@ -0,0 +1,20 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type MappedDirectory struct { + HostPath string `json:"HostPath,omitempty"` + + HostPathType string `json:"HostPathType,omitempty"` + + ContainerPath string `json:"ContainerPath,omitempty"` + + ReadOnly bool `json:"ReadOnly,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mapped_pipe.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mapped_pipe.go new file mode 100644 index 00000000000..208074e9a25 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mapped_pipe.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type MappedPipe struct { + ContainerPipeName string `json:"ContainerPipeName,omitempty"` + + HostPath string `json:"HostPath,omitempty"` + + HostPathType string `json:"HostPathType,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory.go new file mode 100644 index 00000000000..30749c67249 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory.go @@ -0,0 +1,14 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Memory struct { + SizeInMB uint64 `json:"SizeInMB,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_2.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_2.go new file mode 100644 index 00000000000..71224c75b9d --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_2.go @@ -0,0 +1,49 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Memory2 struct { + SizeInMB uint64 `json:"SizeInMB,omitempty"` + + AllowOvercommit bool `json:"AllowOvercommit,omitempty"` + + EnableHotHint bool `json:"EnableHotHint,omitempty"` + + EnableColdHint bool `json:"EnableColdHint,omitempty"` + + EnableEpf bool `json:"EnableEpf,omitempty"` + + // EnableDeferredCommit is private in the schema. If regenerated need to add back. + EnableDeferredCommit bool `json:"EnableDeferredCommit,omitempty"` + + // EnableColdDiscardHint if enabled, then the memory cold discard hint feature is exposed + // to the VM, allowing it to trim non-zeroed pages from the working set (if supported by + // the guest operating system). + EnableColdDiscardHint bool `json:"EnableColdDiscardHint,omitempty"` + + // LowMmioGapInMB is the low MMIO region allocated below 4GB. + // + // TODO: This is pre-release support in schema 2.3. Need to add build number + // docs when a public build with this is out. + LowMMIOGapInMB uint64 `json:"LowMmioGapInMB,omitempty"` + + // HighMmioBaseInMB is the high MMIO region allocated above 4GB (base and + // size). + // + // TODO: This is pre-release support in schema 2.3. Need to add build number + // docs when a public build with this is out. + HighMMIOBaseInMB uint64 `json:"HighMmioBaseInMB,omitempty"` + + // HighMmioGapInMB is the high MMIO region. + // + // TODO: This is pre-release support in schema 2.3. Need to add build number + // docs when a public build with this is out. + HighMMIOGapInMB uint64 `json:"HighMmioGapInMB,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_information_for_vm.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_information_for_vm.go new file mode 100644 index 00000000000..811779b04b2 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_information_for_vm.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type MemoryInformationForVm struct { + VirtualNodeCount uint32 `json:"VirtualNodeCount,omitempty"` + + VirtualMachineMemory *VmMemory `json:"VirtualMachineMemory,omitempty"` + + VirtualNodes []VirtualNodeInfo `json:"VirtualNodes,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_stats.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_stats.go new file mode 100644 index 00000000000..906ba597f9f --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_stats.go @@ -0,0 +1,19 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// Memory runtime statistics +type MemoryStats struct { + MemoryUsageCommitBytes uint64 `json:"MemoryUsageCommitBytes,omitempty"` + + MemoryUsageCommitPeakBytes uint64 `json:"MemoryUsageCommitPeakBytes,omitempty"` + + MemoryUsagePrivateWorkingSetBytes uint64 `json:"MemoryUsagePrivateWorkingSetBytes,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_container_definition_device.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_container_definition_device.go new file mode 100644 index 00000000000..8dbe40b3be2 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_container_definition_device.go @@ -0,0 +1,14 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ContainerDefinitionDevice struct { + DeviceExtension []DeviceExtension `json:"device_extension,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_category.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_category.go new file mode 100644 index 00000000000..8fe89f92747 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_category.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type DeviceCategory struct { + Name string `json:"name,omitempty"` + InterfaceClass []InterfaceClass `json:"interface_class,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_extension.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_extension.go new file mode 100644 index 00000000000..a62568d892e --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_extension.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type DeviceExtension struct { + DeviceCategory *DeviceCategory `json:"device_category,omitempty"` + Namespace *DeviceExtensionNamespace `json:"namespace,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_instance.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_instance.go new file mode 100644 index 00000000000..a7410febd6d --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_instance.go @@ -0,0 +1,17 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type DeviceInstance struct { + Id string `json:"id,omitempty"` + LocationPath string `json:"location_path,omitempty"` + PortName string `json:"port_name,omitempty"` + InterfaceClass []InterfaceClass `json:"interface_class,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_namespace.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_namespace.go new file mode 100644 index 00000000000..3553640647e --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_namespace.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type DeviceNamespace struct { + RequiresDriverstore bool `json:"requires_driverstore,omitempty"` + DeviceCategory []DeviceCategory `json:"device_category,omitempty"` + DeviceInstance []DeviceInstance `json:"device_instance,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_interface_class.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_interface_class.go new file mode 100644 index 00000000000..7be98b54107 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_interface_class.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type InterfaceClass struct { + Type_ string `json:"type,omitempty"` + Identifier string `json:"identifier,omitempty"` + Recurse bool `json:"recurse,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_namespace.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_namespace.go new file mode 100644 index 00000000000..3ab9cf1ecf0 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_namespace.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type DeviceExtensionNamespace struct { + Ob *ObjectNamespace `json:"ob,omitempty"` + Device *DeviceNamespace `json:"device,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_directory.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_directory.go new file mode 100644 index 00000000000..d2f51b3b53c --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_directory.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ObjectDirectory struct { + Name string `json:"name,omitempty"` + Clonesd string `json:"clonesd,omitempty"` + Shadow string `json:"shadow,omitempty"` + Symlink []ObjectSymlink `json:"symlink,omitempty"` + Objdir []ObjectDirectory `json:"objdir,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_namespace.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_namespace.go new file mode 100644 index 00000000000..47dfb55bfa8 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_namespace.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ObjectNamespace struct { + Shadow string `json:"shadow,omitempty"` + Symlink []ObjectSymlink `json:"symlink,omitempty"` + Objdir []ObjectDirectory `json:"objdir,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_symlink.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_symlink.go new file mode 100644 index 00000000000..8867ebe5f02 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_symlink.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ObjectSymlink struct { + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + Scope string `json:"scope,omitempty"` + Pathtoclone string `json:"pathtoclone,omitempty"` + AccessMask int32 `json:"access_mask,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/modification_request.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/modification_request.go new file mode 100644 index 00000000000..1384ed88821 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/modification_request.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ModificationRequest struct { + PropertyType PropertyType `json:"PropertyType,omitempty"` + Settings interface{} `json:"Settings,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/modify_setting_request.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/modify_setting_request.go new file mode 100644 index 00000000000..d29455a3e43 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/modify_setting_request.go @@ -0,0 +1,20 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ModifySettingRequest struct { + ResourcePath string `json:"ResourcePath,omitempty"` + + RequestType string `json:"RequestType,omitempty"` + + Settings interface{} `json:"Settings,omitempty"` // NOTE: Swagger generated as *interface{}. Locally updated + + GuestRequest interface{} `json:"GuestRequest,omitempty"` // NOTE: Swagger generated as *interface{}. Locally updated +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mouse.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mouse.go new file mode 100644 index 00000000000..ccf8b938f3a --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mouse.go @@ -0,0 +1,13 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Mouse struct { +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/network_adapter.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/network_adapter.go new file mode 100644 index 00000000000..7408abd317d --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/network_adapter.go @@ -0,0 +1,17 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type NetworkAdapter struct { + EndpointId string `json:"EndpointId,omitempty"` + MacAddress string `json:"MacAddress,omitempty"` + // The I/O virtualization (IOV) offloading configuration. + IovSettings *IovSettings `json:"IovSettings,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/networking.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/networking.go new file mode 100644 index 00000000000..e5ea187a295 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/networking.go @@ -0,0 +1,23 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Networking struct { + AllowUnqualifiedDnsQuery bool `json:"AllowUnqualifiedDnsQuery,omitempty"` + + DnsSearchList string `json:"DnsSearchList,omitempty"` + + NetworkSharedContainerName string `json:"NetworkSharedContainerName,omitempty"` + + // Guid in windows; string in linux + Namespace string `json:"Namespace,omitempty"` + + NetworkAdapters []string `json:"NetworkAdapters,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/pause_notification.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/pause_notification.go new file mode 100644 index 00000000000..d96c9501f33 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/pause_notification.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// Notification data that is indicated to components running in the Virtual Machine. +type PauseNotification struct { + Reason string `json:"Reason,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/pause_options.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/pause_options.go new file mode 100644 index 00000000000..21707a88eb7 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/pause_options.go @@ -0,0 +1,17 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// Options for HcsPauseComputeSystem +type PauseOptions struct { + SuspensionLevel string `json:"SuspensionLevel,omitempty"` + + HostedNotification *PauseNotification `json:"HostedNotification,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/plan9.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/plan9.go new file mode 100644 index 00000000000..29d8c8012ff --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/plan9.go @@ -0,0 +1,14 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Plan9 struct { + Shares []Plan9Share `json:"Shares,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/plan9_share.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/plan9_share.go new file mode 100644 index 00000000000..41f8fdea029 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/plan9_share.go @@ -0,0 +1,34 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Plan9Share struct { + Name string `json:"Name,omitempty"` + + // The name by which the guest operation system can access this share, via the aname parameter in the Plan9 protocol. + AccessName string `json:"AccessName,omitempty"` + + Path string `json:"Path,omitempty"` + + Port int32 `json:"Port,omitempty"` + + // Flags are marked private. Until they are exported correctly + // + // ReadOnly 0x00000001 + // LinuxMetadata 0x00000004 + // CaseSensitive 0x00000008 + Flags int32 `json:"Flags,omitempty"` + + ReadOnly bool `json:"ReadOnly,omitempty"` + + UseShareRootIdentity bool `json:"UseShareRootIdentity,omitempty"` + + AllowedFiles []string `json:"AllowedFiles,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_details.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_details.go new file mode 100644 index 00000000000..e9a662dd59d --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_details.go @@ -0,0 +1,33 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +import ( + "time" +) + +// Information about a process running in a container +type ProcessDetails struct { + ProcessId int32 `json:"ProcessId,omitempty"` + + ImageName string `json:"ImageName,omitempty"` + + CreateTimestamp time.Time `json:"CreateTimestamp,omitempty"` + + UserTime100ns int32 `json:"UserTime100ns,omitempty"` + + KernelTime100ns int32 `json:"KernelTime100ns,omitempty"` + + MemoryCommitBytes int32 `json:"MemoryCommitBytes,omitempty"` + + MemoryWorkingSetPrivateBytes int32 `json:"MemoryWorkingSetPrivateBytes,omitempty"` + + MemoryWorkingSetSharedBytes int32 `json:"MemoryWorkingSetSharedBytes,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_modify_request.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_modify_request.go new file mode 100644 index 00000000000..e4ed095c7be --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_modify_request.go @@ -0,0 +1,19 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// Passed to HcsRpc_ModifyProcess +type ProcessModifyRequest struct { + Operation string `json:"Operation,omitempty"` + + ConsoleSize *ConsoleSize `json:"ConsoleSize,omitempty"` + + CloseHandle *CloseHandle `json:"CloseHandle,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_parameters.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_parameters.go new file mode 100644 index 00000000000..82b0d0532b2 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_parameters.go @@ -0,0 +1,46 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ProcessParameters struct { + ApplicationName string `json:"ApplicationName,omitempty"` + + CommandLine string `json:"CommandLine,omitempty"` + + // optional alternative to CommandLine, currently only supported by Linux GCS + CommandArgs []string `json:"CommandArgs,omitempty"` + + User string `json:"User,omitempty"` + + WorkingDirectory string `json:"WorkingDirectory,omitempty"` + + Environment map[string]string `json:"Environment,omitempty"` + + // if set, will run as low-privilege process + RestrictedToken bool `json:"RestrictedToken,omitempty"` + + // if set, ignore StdErrPipe + EmulateConsole bool `json:"EmulateConsole,omitempty"` + + CreateStdInPipe bool `json:"CreateStdInPipe,omitempty"` + + CreateStdOutPipe bool `json:"CreateStdOutPipe,omitempty"` + + CreateStdErrPipe bool `json:"CreateStdErrPipe,omitempty"` + + // height then width + ConsoleSize []int32 `json:"ConsoleSize,omitempty"` + + // if set, find an existing session for the user and create the process in it + UseExistingLogin bool `json:"UseExistingLogin,omitempty"` + + // if set, use the legacy console instead of conhost + UseLegacyConsole bool `json:"UseLegacyConsole,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_status.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_status.go new file mode 100644 index 00000000000..ad9a4fa9ad6 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_status.go @@ -0,0 +1,21 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// Status of a process running in a container +type ProcessStatus struct { + ProcessId int32 `json:"ProcessId,omitempty"` + + Exited bool `json:"Exited,omitempty"` + + ExitCode int32 `json:"ExitCode,omitempty"` + + LastWaitResult int32 `json:"LastWaitResult,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor.go new file mode 100644 index 00000000000..bb24e88da1a --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Processor struct { + Count int32 `json:"Count,omitempty"` + + Maximum int32 `json:"Maximum,omitempty"` + + Weight int32 `json:"Weight,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_2.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_2.go new file mode 100644 index 00000000000..c64f335ec7d --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_2.go @@ -0,0 +1,23 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.5 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Processor2 struct { + Count int32 `json:"Count,omitempty"` + + Limit int32 `json:"Limit,omitempty"` + + Weight int32 `json:"Weight,omitempty"` + + ExposeVirtualizationExtensions bool `json:"ExposeVirtualizationExtensions,omitempty"` + + // An optional object that configures the CPU Group to which a Virtual Machine is going to bind to. + CpuGroup *CpuGroup `json:"CpuGroup,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_stats.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_stats.go new file mode 100644 index 00000000000..6157e252256 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_stats.go @@ -0,0 +1,19 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// CPU runtime statistics +type ProcessorStats struct { + TotalRuntime100ns uint64 `json:"TotalRuntime100ns,omitempty"` + + RuntimeUser100ns uint64 `json:"RuntimeUser100ns,omitempty"` + + RuntimeKernel100ns uint64 `json:"RuntimeKernel100ns,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_topology.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_topology.go new file mode 100644 index 00000000000..885156e77fa --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_topology.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ProcessorTopology struct { + LogicalProcessorCount uint32 `json:"LogicalProcessorCount,omitempty"` + LogicalProcessors []LogicalProcessor `json:"LogicalProcessors,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/properties.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/properties.go new file mode 100644 index 00000000000..17558cba0f2 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/properties.go @@ -0,0 +1,54 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +import ( + v1 "github.com/containerd/cgroups/stats/v1" +) + +type Properties struct { + Id string `json:"Id,omitempty"` + + SystemType string `json:"SystemType,omitempty"` + + RuntimeOsType string `json:"RuntimeOsType,omitempty"` + + Name string `json:"Name,omitempty"` + + Owner string `json:"Owner,omitempty"` + + RuntimeId string `json:"RuntimeId,omitempty"` + + RuntimeTemplateId string `json:"RuntimeTemplateId,omitempty"` + + State string `json:"State,omitempty"` + + Stopped bool `json:"Stopped,omitempty"` + + ExitType string `json:"ExitType,omitempty"` + + Memory *MemoryInformationForVm `json:"Memory,omitempty"` + + Statistics *Statistics `json:"Statistics,omitempty"` + + ProcessList []ProcessDetails `json:"ProcessList,omitempty"` + + TerminateOnLastHandleClosed bool `json:"TerminateOnLastHandleClosed,omitempty"` + + HostingSystemId string `json:"HostingSystemId,omitempty"` + + SharedMemoryRegionInfo []SharedMemoryRegionInfo `json:"SharedMemoryRegionInfo,omitempty"` + + GuestConnectionInfo *GuestConnectionInfo `json:"GuestConnectionInfo,omitempty"` + + // Metrics is not part of the API for HCS but this is used for LCOW v2 to + // return the full cgroup metrics from the guest. + Metrics *v1.Metrics `json:"LCOWMetrics,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/property_query.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/property_query.go new file mode 100644 index 00000000000..d6d80df1314 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/property_query.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// By default the basic properties will be returned. This query provides a way to request specific properties. +type PropertyQuery struct { + PropertyTypes []PropertyType `json:"PropertyTypes,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/property_type.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/property_type.go new file mode 100644 index 00000000000..98f2c96edbd --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/property_type.go @@ -0,0 +1,26 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type PropertyType string + +const ( + PTMemory PropertyType = "Memory" + PTGuestMemory PropertyType = "GuestMemory" + PTStatistics PropertyType = "Statistics" + PTProcessList PropertyType = "ProcessList" + PTTerminateOnLastHandleClosed PropertyType = "TerminateOnLastHandleClosed" + PTSharedMemoryRegion PropertyType = "SharedMemoryRegion" + PTContainerCredentialGuard PropertyType = "ContainerCredentialGuard" // This field is not generated by swagger. This was added manually. + PTGuestConnection PropertyType = "GuestConnection" + PTICHeartbeatStatus PropertyType = "ICHeartbeatStatus" + PTProcessorTopology PropertyType = "ProcessorTopology" + PTCPUGroup PropertyType = "CpuGroup" +) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/rdp_connection_options.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/rdp_connection_options.go new file mode 100644 index 00000000000..8d5f5c1719e --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/rdp_connection_options.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type RdpConnectionOptions struct { + AccessSids []string `json:"AccessSids,omitempty"` + + NamedPipe string `json:"NamedPipe,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_changes.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_changes.go new file mode 100644 index 00000000000..006906f6e2f --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_changes.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type RegistryChanges struct { + AddValues []RegistryValue `json:"AddValues,omitempty"` + + DeleteKeys []RegistryKey `json:"DeleteKeys,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_key.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_key.go new file mode 100644 index 00000000000..26fde99c74c --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_key.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type RegistryKey struct { + Hive string `json:"Hive,omitempty"` + + Name string `json:"Name,omitempty"` + + Volatile bool `json:"Volatile,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_value.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_value.go new file mode 100644 index 00000000000..3f203176c32 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_value.go @@ -0,0 +1,30 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type RegistryValue struct { + Key *RegistryKey `json:"Key,omitempty"` + + Name string `json:"Name,omitempty"` + + Type_ string `json:"Type,omitempty"` + + // One and only one value type must be set. + StringValue string `json:"StringValue,omitempty"` + + BinaryValue string `json:"BinaryValue,omitempty"` + + DWordValue int32 `json:"DWordValue,omitempty"` + + QWordValue int32 `json:"QWordValue,omitempty"` + + // Only used if RegistryValueType is CustomType The data is in BinaryValue + CustomType int32 `json:"CustomType,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/restore_state.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/restore_state.go new file mode 100644 index 00000000000..778ff58735a --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/restore_state.go @@ -0,0 +1,19 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type RestoreState struct { + + // The path to the save state file to restore the system from. + SaveStateFilePath string `json:"SaveStateFilePath,omitempty"` + + // The ID of the template system to clone this new system off of. An empty string indicates the system should not be cloned from a template. + TemplateSystemId string `json:"TemplateSystemId,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/save_options.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/save_options.go new file mode 100644 index 00000000000..e55fa1d98a5 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/save_options.go @@ -0,0 +1,19 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type SaveOptions struct { + + // The type of save operation to be performed. + SaveType string `json:"SaveType,omitempty"` + + // The path to the file that will container the saved state. + SaveStateFilePath string `json:"SaveStateFilePath,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/scsi.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/scsi.go new file mode 100644 index 00000000000..bf253a470b6 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/scsi.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Scsi struct { + + // Map of attachments, where the key is the integer LUN number on the controller. + Attachments map[string]Attachment `json:"Attachments,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/service_properties.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/service_properties.go new file mode 100644 index 00000000000..b8142ca6a61 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/service_properties.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +import "encoding/json" + +type ServiceProperties struct { + // Changed Properties field to []json.RawMessage from []interface{} to avoid having to + // remarshal sp.Properties[n] and unmarshal into the type(s) we want. + Properties []json.RawMessage `json:"Properties,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_configuration.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_configuration.go new file mode 100644 index 00000000000..df9baa9219a --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_configuration.go @@ -0,0 +1,14 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type SharedMemoryConfiguration struct { + Regions []SharedMemoryRegion `json:"Regions,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_region.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_region.go new file mode 100644 index 00000000000..825b71865d7 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_region.go @@ -0,0 +1,22 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type SharedMemoryRegion struct { + SectionName string `json:"SectionName,omitempty"` + + StartOffset int32 `json:"StartOffset,omitempty"` + + Length int32 `json:"Length,omitempty"` + + AllowGuestWrite bool `json:"AllowGuestWrite,omitempty"` + + HiddenFromGuest bool `json:"HiddenFromGuest,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_region_info.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_region_info.go new file mode 100644 index 00000000000..f67b08eb57a --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_region_info.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type SharedMemoryRegionInfo struct { + SectionName string `json:"SectionName,omitempty"` + + GuestPhysicalAddress int32 `json:"GuestPhysicalAddress,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/silo_properties.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/silo_properties.go new file mode 100644 index 00000000000..5eaf6a7f4a2 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/silo_properties.go @@ -0,0 +1,17 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// Silo job information +type SiloProperties struct { + Enabled bool `json:"Enabled,omitempty"` + + JobName string `json:"JobName,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/statistics.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/statistics.go new file mode 100644 index 00000000000..ba7a6b3963b --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/statistics.go @@ -0,0 +1,29 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +import ( + "time" +) + +// Runtime statistics for a container +type Statistics struct { + Timestamp time.Time `json:"Timestamp,omitempty"` + + ContainerStartTime time.Time `json:"ContainerStartTime,omitempty"` + + Uptime100ns uint64 `json:"Uptime100ns,omitempty"` + + Processor *ProcessorStats `json:"Processor,omitempty"` + + Memory *MemoryStats `json:"Memory,omitempty"` + + Storage *StorageStats `json:"Storage,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage.go new file mode 100644 index 00000000000..2627af91323 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage.go @@ -0,0 +1,21 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Storage struct { + + // List of layers that describe the parent hierarchy for a container's storage. These layers combined together, presented as a disposable and/or committable working storage, are used by the container to record all changes done to the parent layers. + Layers []Layer `json:"Layers,omitempty"` + + // Path that points to the scratch space of a container, where parent layers are combined together to present a new disposable and/or committable layer with the changes done during its runtime. + Path string `json:"Path,omitempty"` + + QoS *StorageQoS `json:"QoS,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage_qo_s.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage_qo_s.go new file mode 100644 index 00000000000..9c5e6eb5323 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage_qo_s.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type StorageQoS struct { + IopsMaximum int32 `json:"IopsMaximum,omitempty"` + + BandwidthMaximum int32 `json:"BandwidthMaximum,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage_stats.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage_stats.go new file mode 100644 index 00000000000..4f042ffd937 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage_stats.go @@ -0,0 +1,21 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// Storage runtime statistics +type StorageStats struct { + ReadCountNormalized uint64 `json:"ReadCountNormalized,omitempty"` + + ReadSizeBytes uint64 `json:"ReadSizeBytes,omitempty"` + + WriteCountNormalized uint64 `json:"WriteCountNormalized,omitempty"` + + WriteSizeBytes uint64 `json:"WriteSizeBytes,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/topology.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/topology.go new file mode 100644 index 00000000000..83486994036 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/topology.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Topology struct { + Memory *Memory2 `json:"Memory,omitempty"` + + Processor *Processor2 `json:"Processor,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/uefi.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/uefi.go new file mode 100644 index 00000000000..0e48ece500c --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/uefi.go @@ -0,0 +1,20 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Uefi struct { + EnableDebugger bool `json:"EnableDebugger,omitempty"` + + SecureBootTemplateId string `json:"SecureBootTemplateId,omitempty"` + + BootThis *UefiBootEntry `json:"BootThis,omitempty"` + + Console string `json:"Console,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/uefi_boot_entry.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/uefi_boot_entry.go new file mode 100644 index 00000000000..3ab409d825e --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/uefi_boot_entry.go @@ -0,0 +1,22 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type UefiBootEntry struct { + DeviceType string `json:"DeviceType,omitempty"` + + DevicePath string `json:"DevicePath,omitempty"` + + DiskNumber int32 `json:"DiskNumber,omitempty"` + + OptionalData string `json:"OptionalData,omitempty"` + + VmbFsRootPath string `json:"VmbFsRootPath,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/version.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/version.go new file mode 100644 index 00000000000..2abfccca315 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/version.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Version struct { + Major int32 `json:"Major,omitempty"` + + Minor int32 `json:"Minor,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/video_monitor.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/video_monitor.go new file mode 100644 index 00000000000..ec5d0fb936d --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/video_monitor.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type VideoMonitor struct { + HorizontalResolution int32 `json:"HorizontalResolution,omitempty"` + + VerticalResolution int32 `json:"VerticalResolution,omitempty"` + + ConnectionOptions *RdpConnectionOptions `json:"ConnectionOptions,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_machine.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_machine.go new file mode 100644 index 00000000000..2d22b1bcb08 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_machine.go @@ -0,0 +1,32 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type VirtualMachine struct { + + // StopOnReset is private in the schema. If regenerated need to put back. + StopOnReset bool `json:"StopOnReset,omitempty"` + + Chipset *Chipset `json:"Chipset,omitempty"` + + ComputeTopology *Topology `json:"ComputeTopology,omitempty"` + + Devices *Devices `json:"Devices,omitempty"` + + GuestState *GuestState `json:"GuestState,omitempty"` + + RestoreState *RestoreState `json:"RestoreState,omitempty"` + + RegistryChanges *RegistryChanges `json:"RegistryChanges,omitempty"` + + StorageQoS *StorageQoS `json:"StorageQoS,omitempty"` + + GuestConnection *GuestConnection `json:"GuestConnection,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_node_info.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_node_info.go new file mode 100644 index 00000000000..91a3c83d4ff --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_node_info.go @@ -0,0 +1,20 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type VirtualNodeInfo struct { + VirtualNodeIndex int32 `json:"VirtualNodeIndex,omitempty"` + + PhysicalNodeNumber int32 `json:"PhysicalNodeNumber,omitempty"` + + VirtualProcessorCount int32 `json:"VirtualProcessorCount,omitempty"` + + MemoryUsageInPages int32 `json:"MemoryUsageInPages,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_controller.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_controller.go new file mode 100644 index 00000000000..f5b7f3e38c0 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_controller.go @@ -0,0 +1,20 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type VirtualPMemController struct { + Devices map[string]VirtualPMemDevice `json:"Devices,omitempty"` + + MaximumCount uint32 `json:"MaximumCount,omitempty"` + + MaximumSizeBytes uint64 `json:"MaximumSizeBytes,omitempty"` + + Backing string `json:"Backing,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_device.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_device.go new file mode 100644 index 00000000000..70cf2d90de0 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_device.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type VirtualPMemDevice struct { + HostPath string `json:"HostPath,omitempty"` + + ReadOnly bool `json:"ReadOnly,omitempty"` + + ImageFormat string `json:"ImageFormat,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_mapping.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_mapping.go new file mode 100644 index 00000000000..9ef322f615b --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_mapping.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type VirtualPMemMapping struct { + HostPath string `json:"HostPath,omitempty"` + ImageFormat string `json:"ImageFormat,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_pci_device.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_pci_device.go new file mode 100644 index 00000000000..f5e05903c54 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_pci_device.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.3 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// TODO: This is pre-release support in schema 2.3. Need to add build number +// docs when a public build with this is out. +type VirtualPciDevice struct { + Functions []VirtualPciFunction `json:",omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_pci_function.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_pci_function.go new file mode 100644 index 00000000000..cedb7d18bc2 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_pci_function.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.3 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// TODO: This is pre-release support in schema 2.3. Need to add build number +// docs when a public build with this is out. +type VirtualPciFunction struct { + DeviceInstancePath string `json:",omitempty"` + + VirtualFunction uint16 `json:",omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb.go new file mode 100644 index 00000000000..362df363e13 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type VirtualSmb struct { + Shares []VirtualSmbShare `json:"Shares,omitempty"` + + DirectFileMappingInMB int64 `json:"DirectFileMappingInMB,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb_share.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb_share.go new file mode 100644 index 00000000000..915e9b6386a --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb_share.go @@ -0,0 +1,20 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type VirtualSmbShare struct { + Name string `json:"Name,omitempty"` + + Path string `json:"Path,omitempty"` + + AllowedFiles []string `json:"AllowedFiles,omitempty"` + + Options *VirtualSmbShareOptions `json:"Options,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb_share_options.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb_share_options.go new file mode 100644 index 00000000000..75196bd8c8d --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb_share_options.go @@ -0,0 +1,62 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type VirtualSmbShareOptions struct { + ReadOnly bool `json:"ReadOnly,omitempty"` + + // convert exclusive access to shared read access + ShareRead bool `json:"ShareRead,omitempty"` + + // all opens will use cached I/O + CacheIo bool `json:"CacheIo,omitempty"` + + // disable oplock support + NoOplocks bool `json:"NoOplocks,omitempty"` + + // Acquire the backup privilege when attempting to open + TakeBackupPrivilege bool `json:"TakeBackupPrivilege,omitempty"` + + // Use the identity of the share root when opening + UseShareRootIdentity bool `json:"UseShareRootIdentity,omitempty"` + + // disable Direct Mapping + NoDirectmap bool `json:"NoDirectmap,omitempty"` + + // disable Byterange locks + NoLocks bool `json:"NoLocks,omitempty"` + + // disable Directory CHange Notifications + NoDirnotify bool `json:"NoDirnotify,omitempty"` + + // share is use for VM shared memory + VmSharedMemory bool `json:"VmSharedMemory,omitempty"` + + // allow access only to the files specified in AllowedFiles + RestrictFileAccess bool `json:"RestrictFileAccess,omitempty"` + + // disable all oplocks except Level II + ForceLevelIIOplocks bool `json:"ForceLevelIIOplocks,omitempty"` + + // Allow the host to reparse this base layer + ReparseBaseLayer bool `json:"ReparseBaseLayer,omitempty"` + + // Enable pseudo-oplocks + PseudoOplocks bool `json:"PseudoOplocks,omitempty"` + + // All opens will use non-cached IO + NonCacheIo bool `json:"NonCacheIo,omitempty"` + + // Enable pseudo directory change notifications + PseudoDirnotify bool `json:"PseudoDirnotify,omitempty"` + + // Block directory enumeration, renames, and deletes. + SingleFileMapping bool `json:"SingleFileMapping,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/vm_memory.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/vm_memory.go new file mode 100644 index 00000000000..8e1836dd6be --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/vm_memory.go @@ -0,0 +1,26 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type VmMemory struct { + AvailableMemory int32 `json:"AvailableMemory,omitempty"` + + AvailableMemoryBuffer int32 `json:"AvailableMemoryBuffer,omitempty"` + + ReservedMemory uint64 `json:"ReservedMemory,omitempty"` + + AssignedMemory uint64 `json:"AssignedMemory,omitempty"` + + SlpActive bool `json:"SlpActive,omitempty"` + + BalancingEnabled bool `json:"BalancingEnabled,omitempty"` + + DmOperationInProgress bool `json:"DmOperationInProgress,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/vm_processor_limits.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/vm_processor_limits.go new file mode 100644 index 00000000000..de1b9cf1ae2 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/vm_processor_limits.go @@ -0,0 +1,22 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// ProcessorLimits is used when modifying processor scheduling limits of a virtual machine. +type ProcessorLimits struct { + // Maximum amount of host CPU resources that the virtual machine can use. + Limit uint64 `json:"Limit,omitempty"` + // Value describing the relative priority of this virtual machine compared to other virtual machines. + Weight uint64 `json:"Weight,omitempty"` + // Minimum amount of host CPU resources that the virtual machine is guaranteed. + Reservation uint64 `json:"Reservation,omitempty"` + // Provides the target maximum CPU frequency, in MHz, for a virtual machine. + MaximumFrequencyMHz uint32 `json:"MaximumFrequencyMHz,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/windows_crash_reporting.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/windows_crash_reporting.go new file mode 100644 index 00000000000..8ed7e566d64 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/windows_crash_reporting.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type WindowsCrashReporting struct { + DumpFileName string `json:"DumpFileName,omitempty"` + + MaxDumpSize int64 `json:"MaxDumpSize,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/service.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/service.go new file mode 100644 index 00000000000..a634dfc1515 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/service.go @@ -0,0 +1,49 @@ +package hcs + +import ( + "context" + "encoding/json" + + hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" + "github.com/Microsoft/hcsshim/internal/vmcompute" +) + +// GetServiceProperties returns properties of the host compute service. +func GetServiceProperties(ctx context.Context, q hcsschema.PropertyQuery) (*hcsschema.ServiceProperties, error) { + operation := "hcs::GetServiceProperties" + + queryb, err := json.Marshal(q) + if err != nil { + return nil, err + } + propertiesJSON, resultJSON, err := vmcompute.HcsGetServiceProperties(ctx, string(queryb)) + events := processHcsResult(ctx, resultJSON) + if err != nil { + return nil, &HcsError{Op: operation, Err: err, Events: events} + } + + if propertiesJSON == "" { + return nil, ErrUnexpectedValue + } + properties := &hcsschema.ServiceProperties{} + if err := json.Unmarshal([]byte(propertiesJSON), properties); err != nil { + return nil, err + } + return properties, nil +} + +// ModifyServiceSettings modifies settings of the host compute service. +func ModifyServiceSettings(ctx context.Context, settings hcsschema.ModificationRequest) error { + operation := "hcs::ModifyServiceSettings" + + settingsJSON, err := json.Marshal(settings) + if err != nil { + return err + } + resultJSON, err := vmcompute.HcsModifyServiceSettings(ctx, string(settingsJSON)) + events := processHcsResult(ctx, resultJSON) + if err != nil { + return &HcsError{Op: operation, Err: err, Events: events} + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go new file mode 100644 index 00000000000..75499c967f0 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go @@ -0,0 +1,637 @@ +package hcs + +import ( + "context" + "encoding/json" + "errors" + "strings" + "sync" + "syscall" + + "github.com/Microsoft/hcsshim/internal/cow" + "github.com/Microsoft/hcsshim/internal/hcs/schema1" + hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" + "github.com/Microsoft/hcsshim/internal/log" + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/Microsoft/hcsshim/internal/timeout" + "github.com/Microsoft/hcsshim/internal/vmcompute" + "go.opencensus.io/trace" +) + +type System struct { + handleLock sync.RWMutex + handle vmcompute.HcsSystem + id string + callbackNumber uintptr + + closedWaitOnce sync.Once + waitBlock chan struct{} + waitError error + exitError error + os, typ string +} + +func newSystem(id string) *System { + return &System{ + id: id, + waitBlock: make(chan struct{}), + } +} + +// CreateComputeSystem creates a new compute system with the given configuration but does not start it. +func CreateComputeSystem(ctx context.Context, id string, hcsDocumentInterface interface{}) (_ *System, err error) { + operation := "hcs::CreateComputeSystem" + + // hcsCreateComputeSystemContext is an async operation. Start the outer span + // here to measure the full create time. + ctx, span := trace.StartSpan(ctx, operation) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("cid", id)) + + computeSystem := newSystem(id) + + hcsDocumentB, err := json.Marshal(hcsDocumentInterface) + if err != nil { + return nil, err + } + + hcsDocument := string(hcsDocumentB) + + var ( + identity syscall.Handle + resultJSON string + createError error + ) + computeSystem.handle, resultJSON, createError = vmcompute.HcsCreateComputeSystem(ctx, id, hcsDocument, identity) + if createError == nil || IsPending(createError) { + defer func() { + if err != nil { + computeSystem.Close() + } + }() + if err = computeSystem.registerCallback(ctx); err != nil { + // Terminate the compute system if it still exists. We're okay to + // ignore a failure here. + _ = computeSystem.Terminate(ctx) + return nil, makeSystemError(computeSystem, operation, err, nil) + } + } + + events, err := processAsyncHcsResult(ctx, createError, resultJSON, computeSystem.callbackNumber, hcsNotificationSystemCreateCompleted, &timeout.SystemCreate) + if err != nil { + if err == ErrTimeout { + // Terminate the compute system if it still exists. We're okay to + // ignore a failure here. + _ = computeSystem.Terminate(ctx) + } + return nil, makeSystemError(computeSystem, operation, err, events) + } + go computeSystem.waitBackground() + if err = computeSystem.getCachedProperties(ctx); err != nil { + return nil, err + } + return computeSystem, nil +} + +// OpenComputeSystem opens an existing compute system by ID. +func OpenComputeSystem(ctx context.Context, id string) (*System, error) { + operation := "hcs::OpenComputeSystem" + + computeSystem := newSystem(id) + handle, resultJSON, err := vmcompute.HcsOpenComputeSystem(ctx, id) + events := processHcsResult(ctx, resultJSON) + if err != nil { + return nil, makeSystemError(computeSystem, operation, err, events) + } + computeSystem.handle = handle + defer func() { + if err != nil { + computeSystem.Close() + } + }() + if err = computeSystem.registerCallback(ctx); err != nil { + return nil, makeSystemError(computeSystem, operation, err, nil) + } + go computeSystem.waitBackground() + if err = computeSystem.getCachedProperties(ctx); err != nil { + return nil, err + } + return computeSystem, nil +} + +func (computeSystem *System) getCachedProperties(ctx context.Context) error { + props, err := computeSystem.Properties(ctx) + if err != nil { + return err + } + computeSystem.typ = strings.ToLower(props.SystemType) + computeSystem.os = strings.ToLower(props.RuntimeOSType) + if computeSystem.os == "" && computeSystem.typ == "container" { + // Pre-RS5 HCS did not return the OS, but it only supported containers + // that ran Windows. + computeSystem.os = "windows" + } + return nil +} + +// OS returns the operating system of the compute system, "linux" or "windows". +func (computeSystem *System) OS() string { + return computeSystem.os +} + +// IsOCI returns whether processes in the compute system should be created via +// OCI. +func (computeSystem *System) IsOCI() bool { + return computeSystem.os == "linux" && computeSystem.typ == "container" +} + +// GetComputeSystems gets a list of the compute systems on the system that match the query +func GetComputeSystems(ctx context.Context, q schema1.ComputeSystemQuery) ([]schema1.ContainerProperties, error) { + operation := "hcs::GetComputeSystems" + + queryb, err := json.Marshal(q) + if err != nil { + return nil, err + } + + computeSystemsJSON, resultJSON, err := vmcompute.HcsEnumerateComputeSystems(ctx, string(queryb)) + events := processHcsResult(ctx, resultJSON) + if err != nil { + return nil, &HcsError{Op: operation, Err: err, Events: events} + } + + if computeSystemsJSON == "" { + return nil, ErrUnexpectedValue + } + computeSystems := []schema1.ContainerProperties{} + if err = json.Unmarshal([]byte(computeSystemsJSON), &computeSystems); err != nil { + return nil, err + } + + return computeSystems, nil +} + +// Start synchronously starts the computeSystem. +func (computeSystem *System) Start(ctx context.Context) (err error) { + operation := "hcs::System::Start" + + // hcsStartComputeSystemContext is an async operation. Start the outer span + // here to measure the full start time. + ctx, span := trace.StartSpan(ctx, operation) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("cid", computeSystem.id)) + + computeSystem.handleLock.RLock() + defer computeSystem.handleLock.RUnlock() + + if computeSystem.handle == 0 { + return makeSystemError(computeSystem, operation, ErrAlreadyClosed, nil) + } + + resultJSON, err := vmcompute.HcsStartComputeSystem(ctx, computeSystem.handle, "") + events, err := processAsyncHcsResult(ctx, err, resultJSON, computeSystem.callbackNumber, hcsNotificationSystemStartCompleted, &timeout.SystemStart) + if err != nil { + return makeSystemError(computeSystem, operation, err, events) + } + + return nil +} + +// ID returns the compute system's identifier. +func (computeSystem *System) ID() string { + return computeSystem.id +} + +// Shutdown requests a compute system shutdown. +func (computeSystem *System) Shutdown(ctx context.Context) error { + computeSystem.handleLock.RLock() + defer computeSystem.handleLock.RUnlock() + + operation := "hcs::System::Shutdown" + + if computeSystem.handle == 0 { + return nil + } + + resultJSON, err := vmcompute.HcsShutdownComputeSystem(ctx, computeSystem.handle, "") + events := processHcsResult(ctx, resultJSON) + switch err { + case nil, ErrVmcomputeAlreadyStopped, ErrComputeSystemDoesNotExist, ErrVmcomputeOperationPending: + default: + return makeSystemError(computeSystem, operation, err, events) + } + return nil +} + +// Terminate requests a compute system terminate. +func (computeSystem *System) Terminate(ctx context.Context) error { + computeSystem.handleLock.RLock() + defer computeSystem.handleLock.RUnlock() + + operation := "hcs::System::Terminate" + + if computeSystem.handle == 0 { + return nil + } + + resultJSON, err := vmcompute.HcsTerminateComputeSystem(ctx, computeSystem.handle, "") + events := processHcsResult(ctx, resultJSON) + switch err { + case nil, ErrVmcomputeAlreadyStopped, ErrComputeSystemDoesNotExist, ErrVmcomputeOperationPending: + default: + return makeSystemError(computeSystem, operation, err, events) + } + return nil +} + +// waitBackground waits for the compute system exit notification. Once received +// sets `computeSystem.waitError` (if any) and unblocks all `Wait` calls. +// +// This MUST be called exactly once per `computeSystem.handle` but `Wait` is +// safe to call multiple times. +func (computeSystem *System) waitBackground() { + operation := "hcs::System::waitBackground" + ctx, span := trace.StartSpan(context.Background(), operation) + defer span.End() + span.AddAttributes(trace.StringAttribute("cid", computeSystem.id)) + + err := waitForNotification(ctx, computeSystem.callbackNumber, hcsNotificationSystemExited, nil) + switch err { + case nil: + log.G(ctx).Debug("system exited") + case ErrVmcomputeUnexpectedExit: + log.G(ctx).Debug("unexpected system exit") + computeSystem.exitError = makeSystemError(computeSystem, operation, err, nil) + err = nil + default: + err = makeSystemError(computeSystem, operation, err, nil) + } + computeSystem.closedWaitOnce.Do(func() { + computeSystem.waitError = err + close(computeSystem.waitBlock) + }) + oc.SetSpanStatus(span, err) +} + +// Wait synchronously waits for the compute system to shutdown or terminate. If +// the compute system has already exited returns the previous error (if any). +func (computeSystem *System) Wait() error { + <-computeSystem.waitBlock + return computeSystem.waitError +} + +// ExitError returns an error describing the reason the compute system terminated. +func (computeSystem *System) ExitError() error { + select { + case <-computeSystem.waitBlock: + if computeSystem.waitError != nil { + return computeSystem.waitError + } + return computeSystem.exitError + default: + return errors.New("container not exited") + } +} + +// Properties returns the requested container properties targeting a V1 schema container. +func (computeSystem *System) Properties(ctx context.Context, types ...schema1.PropertyType) (*schema1.ContainerProperties, error) { + computeSystem.handleLock.RLock() + defer computeSystem.handleLock.RUnlock() + + operation := "hcs::System::Properties" + + queryBytes, err := json.Marshal(schema1.PropertyQuery{PropertyTypes: types}) + if err != nil { + return nil, makeSystemError(computeSystem, operation, err, nil) + } + + propertiesJSON, resultJSON, err := vmcompute.HcsGetComputeSystemProperties(ctx, computeSystem.handle, string(queryBytes)) + events := processHcsResult(ctx, resultJSON) + if err != nil { + return nil, makeSystemError(computeSystem, operation, err, events) + } + + if propertiesJSON == "" { + return nil, ErrUnexpectedValue + } + properties := &schema1.ContainerProperties{} + if err := json.Unmarshal([]byte(propertiesJSON), properties); err != nil { + return nil, makeSystemError(computeSystem, operation, err, nil) + } + + return properties, nil +} + +// PropertiesV2 returns the requested container properties targeting a V2 schema container. +func (computeSystem *System) PropertiesV2(ctx context.Context, types ...hcsschema.PropertyType) (*hcsschema.Properties, error) { + computeSystem.handleLock.RLock() + defer computeSystem.handleLock.RUnlock() + + operation := "hcs::System::PropertiesV2" + + queryBytes, err := json.Marshal(hcsschema.PropertyQuery{PropertyTypes: types}) + if err != nil { + return nil, makeSystemError(computeSystem, operation, err, nil) + } + + propertiesJSON, resultJSON, err := vmcompute.HcsGetComputeSystemProperties(ctx, computeSystem.handle, string(queryBytes)) + events := processHcsResult(ctx, resultJSON) + if err != nil { + return nil, makeSystemError(computeSystem, operation, err, events) + } + + if propertiesJSON == "" { + return nil, ErrUnexpectedValue + } + properties := &hcsschema.Properties{} + if err := json.Unmarshal([]byte(propertiesJSON), properties); err != nil { + return nil, makeSystemError(computeSystem, operation, err, nil) + } + + return properties, nil +} + +// Pause pauses the execution of the computeSystem. This feature is not enabled in TP5. +func (computeSystem *System) Pause(ctx context.Context) (err error) { + operation := "hcs::System::Pause" + + // hcsPauseComputeSystemContext is an async peration. Start the outer span + // here to measure the full pause time. + ctx, span := trace.StartSpan(ctx, operation) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("cid", computeSystem.id)) + + computeSystem.handleLock.RLock() + defer computeSystem.handleLock.RUnlock() + + if computeSystem.handle == 0 { + return makeSystemError(computeSystem, operation, ErrAlreadyClosed, nil) + } + + resultJSON, err := vmcompute.HcsPauseComputeSystem(ctx, computeSystem.handle, "") + events, err := processAsyncHcsResult(ctx, err, resultJSON, computeSystem.callbackNumber, hcsNotificationSystemPauseCompleted, &timeout.SystemPause) + if err != nil { + return makeSystemError(computeSystem, operation, err, events) + } + + return nil +} + +// Resume resumes the execution of the computeSystem. This feature is not enabled in TP5. +func (computeSystem *System) Resume(ctx context.Context) (err error) { + operation := "hcs::System::Resume" + + // hcsResumeComputeSystemContext is an async operation. Start the outer span + // here to measure the full restore time. + ctx, span := trace.StartSpan(ctx, operation) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("cid", computeSystem.id)) + + computeSystem.handleLock.RLock() + defer computeSystem.handleLock.RUnlock() + + if computeSystem.handle == 0 { + return makeSystemError(computeSystem, operation, ErrAlreadyClosed, nil) + } + + resultJSON, err := vmcompute.HcsResumeComputeSystem(ctx, computeSystem.handle, "") + events, err := processAsyncHcsResult(ctx, err, resultJSON, computeSystem.callbackNumber, hcsNotificationSystemResumeCompleted, &timeout.SystemResume) + if err != nil { + return makeSystemError(computeSystem, operation, err, events) + } + + return nil +} + +// Save the compute system +func (computeSystem *System) Save(ctx context.Context, options interface{}) (err error) { + operation := "hcs::System::Save" + + // hcsSaveComputeSystemContext is an async peration. Start the outer span + // here to measure the full save time. + ctx, span := trace.StartSpan(ctx, operation) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("cid", computeSystem.id)) + + saveOptions, err := json.Marshal(options) + if err != nil { + return err + } + + computeSystem.handleLock.RLock() + defer computeSystem.handleLock.RUnlock() + + if computeSystem.handle == 0 { + return makeSystemError(computeSystem, operation, ErrAlreadyClosed, nil) + } + + result, err := vmcompute.HcsSaveComputeSystem(ctx, computeSystem.handle, string(saveOptions)) + events, err := processAsyncHcsResult(ctx, err, result, computeSystem.callbackNumber, hcsNotificationSystemSaveCompleted, &timeout.SystemSave) + if err != nil { + return makeSystemError(computeSystem, operation, err, events) + } + + return nil +} + +func (computeSystem *System) createProcess(ctx context.Context, operation string, c interface{}) (*Process, *vmcompute.HcsProcessInformation, error) { + computeSystem.handleLock.RLock() + defer computeSystem.handleLock.RUnlock() + + if computeSystem.handle == 0 { + return nil, nil, makeSystemError(computeSystem, operation, ErrAlreadyClosed, nil) + } + + configurationb, err := json.Marshal(c) + if err != nil { + return nil, nil, makeSystemError(computeSystem, operation, err, nil) + } + + configuration := string(configurationb) + processInfo, processHandle, resultJSON, err := vmcompute.HcsCreateProcess(ctx, computeSystem.handle, configuration) + events := processHcsResult(ctx, resultJSON) + if err != nil { + return nil, nil, makeSystemError(computeSystem, operation, err, events) + } + + log.G(ctx).WithField("pid", processInfo.ProcessId).Debug("created process pid") + return newProcess(processHandle, int(processInfo.ProcessId), computeSystem), &processInfo, nil +} + +// CreateProcess launches a new process within the computeSystem. +func (computeSystem *System) CreateProcess(ctx context.Context, c interface{}) (cow.Process, error) { + operation := "hcs::System::CreateProcess" + process, processInfo, err := computeSystem.createProcess(ctx, operation, c) + if err != nil { + return nil, err + } + defer func() { + if err != nil { + process.Close() + } + }() + + pipes, err := makeOpenFiles([]syscall.Handle{processInfo.StdInput, processInfo.StdOutput, processInfo.StdError}) + if err != nil { + return nil, makeSystemError(computeSystem, operation, err, nil) + } + process.stdin = pipes[0] + process.stdout = pipes[1] + process.stderr = pipes[2] + process.hasCachedStdio = true + + if err = process.registerCallback(ctx); err != nil { + return nil, makeSystemError(computeSystem, operation, err, nil) + } + go process.waitBackground() + + return process, nil +} + +// OpenProcess gets an interface to an existing process within the computeSystem. +func (computeSystem *System) OpenProcess(ctx context.Context, pid int) (*Process, error) { + computeSystem.handleLock.RLock() + defer computeSystem.handleLock.RUnlock() + + operation := "hcs::System::OpenProcess" + + if computeSystem.handle == 0 { + return nil, makeSystemError(computeSystem, operation, ErrAlreadyClosed, nil) + } + + processHandle, resultJSON, err := vmcompute.HcsOpenProcess(ctx, computeSystem.handle, uint32(pid)) + events := processHcsResult(ctx, resultJSON) + if err != nil { + return nil, makeSystemError(computeSystem, operation, err, events) + } + + process := newProcess(processHandle, pid, computeSystem) + if err = process.registerCallback(ctx); err != nil { + return nil, makeSystemError(computeSystem, operation, err, nil) + } + go process.waitBackground() + + return process, nil +} + +// Close cleans up any state associated with the compute system but does not terminate or wait for it. +func (computeSystem *System) Close() (err error) { + operation := "hcs::System::Close" + ctx, span := trace.StartSpan(context.Background(), operation) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("cid", computeSystem.id)) + + computeSystem.handleLock.Lock() + defer computeSystem.handleLock.Unlock() + + // Don't double free this + if computeSystem.handle == 0 { + return nil + } + + if err = computeSystem.unregisterCallback(ctx); err != nil { + return makeSystemError(computeSystem, operation, err, nil) + } + + err = vmcompute.HcsCloseComputeSystem(ctx, computeSystem.handle) + if err != nil { + return makeSystemError(computeSystem, operation, err, nil) + } + + computeSystem.handle = 0 + computeSystem.closedWaitOnce.Do(func() { + computeSystem.waitError = ErrAlreadyClosed + close(computeSystem.waitBlock) + }) + + return nil +} + +func (computeSystem *System) registerCallback(ctx context.Context) error { + callbackContext := ¬ificationWatcherContext{ + channels: newSystemChannels(), + systemID: computeSystem.id, + } + + callbackMapLock.Lock() + callbackNumber := nextCallback + nextCallback++ + callbackMap[callbackNumber] = callbackContext + callbackMapLock.Unlock() + + callbackHandle, err := vmcompute.HcsRegisterComputeSystemCallback(ctx, computeSystem.handle, notificationWatcherCallback, callbackNumber) + if err != nil { + return err + } + callbackContext.handle = callbackHandle + computeSystem.callbackNumber = callbackNumber + + return nil +} + +func (computeSystem *System) unregisterCallback(ctx context.Context) error { + callbackNumber := computeSystem.callbackNumber + + callbackMapLock.RLock() + callbackContext := callbackMap[callbackNumber] + callbackMapLock.RUnlock() + + if callbackContext == nil { + return nil + } + + handle := callbackContext.handle + + if handle == 0 { + return nil + } + + // hcsUnregisterComputeSystemCallback has its own syncronization + // to wait for all callbacks to complete. We must NOT hold the callbackMapLock. + err := vmcompute.HcsUnregisterComputeSystemCallback(ctx, handle) + if err != nil { + return err + } + + closeChannels(callbackContext.channels) + + callbackMapLock.Lock() + delete(callbackMap, callbackNumber) + callbackMapLock.Unlock() + + handle = 0 //nolint:ineffassign + + return nil +} + +// Modify the System by sending a request to HCS +func (computeSystem *System) Modify(ctx context.Context, config interface{}) error { + computeSystem.handleLock.RLock() + defer computeSystem.handleLock.RUnlock() + + operation := "hcs::System::Modify" + + if computeSystem.handle == 0 { + return makeSystemError(computeSystem, operation, ErrAlreadyClosed, nil) + } + + requestBytes, err := json.Marshal(config) + if err != nil { + return err + } + + requestJSON := string(requestBytes) + resultJSON, err := vmcompute.HcsModifyComputeSystem(ctx, computeSystem.handle, requestJSON) + events := processHcsResult(ctx, resultJSON) + if err != nil { + return makeSystemError(computeSystem, operation, err, events) + } + + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/utils.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/utils.go new file mode 100644 index 00000000000..3342e5bb948 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/utils.go @@ -0,0 +1,62 @@ +package hcs + +import ( + "context" + "io" + "syscall" + + "github.com/Microsoft/go-winio" + diskutil "github.com/Microsoft/go-winio/vhd" + "github.com/Microsoft/hcsshim/computestorage" + "github.com/pkg/errors" + "golang.org/x/sys/windows" +) + +// makeOpenFiles calls winio.MakeOpenFile for each handle in a slice but closes all the handles +// if there is an error. +func makeOpenFiles(hs []syscall.Handle) (_ []io.ReadWriteCloser, err error) { + fs := make([]io.ReadWriteCloser, len(hs)) + for i, h := range hs { + if h != syscall.Handle(0) { + if err == nil { + fs[i], err = winio.MakeOpenFile(h) + } + if err != nil { + syscall.Close(h) + } + } + } + if err != nil { + for _, f := range fs { + if f != nil { + f.Close() + } + } + return nil, err + } + return fs, nil +} + +// CreateNTFSVHD creates a VHD formatted with NTFS of size `sizeGB` at the given `vhdPath`. +func CreateNTFSVHD(ctx context.Context, vhdPath string, sizeGB uint32) (err error) { + if err := diskutil.CreateVhdx(vhdPath, sizeGB, 1); err != nil { + return errors.Wrap(err, "failed to create VHD") + } + + vhd, err := diskutil.OpenVirtualDisk(vhdPath, diskutil.VirtualDiskAccessNone, diskutil.OpenVirtualDiskFlagNone) + if err != nil { + return errors.Wrap(err, "failed to open VHD") + } + defer func() { + err2 := windows.CloseHandle(windows.Handle(vhd)) + if err == nil { + err = errors.Wrap(err2, "failed to close VHD") + } + }() + + if err := computestorage.FormatWritableLayerVhd(ctx, windows.Handle(vhd)); err != nil { + return errors.Wrap(err, "failed to format VHD") + } + + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/waithelper.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/waithelper.go new file mode 100644 index 00000000000..db4e14fdfb2 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/waithelper.go @@ -0,0 +1,68 @@ +package hcs + +import ( + "context" + "time" + + "github.com/Microsoft/hcsshim/internal/log" +) + +func processAsyncHcsResult(ctx context.Context, err error, resultJSON string, callbackNumber uintptr, expectedNotification hcsNotification, timeout *time.Duration) ([]ErrorEvent, error) { + events := processHcsResult(ctx, resultJSON) + if IsPending(err) { + return nil, waitForNotification(ctx, callbackNumber, expectedNotification, timeout) + } + + return events, err +} + +func waitForNotification(ctx context.Context, callbackNumber uintptr, expectedNotification hcsNotification, timeout *time.Duration) error { + callbackMapLock.RLock() + if _, ok := callbackMap[callbackNumber]; !ok { + callbackMapLock.RUnlock() + log.G(ctx).WithField("callbackNumber", callbackNumber).Error("failed to waitForNotification: callbackNumber does not exist in callbackMap") + return ErrHandleClose + } + channels := callbackMap[callbackNumber].channels + callbackMapLock.RUnlock() + + expectedChannel := channels[expectedNotification] + if expectedChannel == nil { + log.G(ctx).WithField("type", expectedNotification).Error("unknown notification type in waitForNotification") + return ErrInvalidNotificationType + } + + var c <-chan time.Time + if timeout != nil { + timer := time.NewTimer(*timeout) + c = timer.C + defer timer.Stop() + } + + select { + case err, ok := <-expectedChannel: + if !ok { + return ErrHandleClose + } + return err + case err, ok := <-channels[hcsNotificationSystemExited]: + if !ok { + return ErrHandleClose + } + // If the expected notification is hcsNotificationSystemExited which of the two selects + // chosen is random. Return the raw error if hcsNotificationSystemExited is expected + if channels[hcsNotificationSystemExited] == expectedChannel { + return err + } + return ErrUnexpectedContainerExit + case _, ok := <-channels[hcsNotificationServiceDisconnect]: + if !ok { + return ErrHandleClose + } + // hcsNotificationServiceDisconnect should never be an expected notification + // it does not need the same handling as hcsNotificationSystemExited + return ErrUnexpectedProcessAbort + case <-c: + return ErrTimeout + } +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcserror/hcserror.go b/vendor/github.com/Microsoft/hcsshim/internal/hcserror/hcserror.go new file mode 100644 index 00000000000..921c2c8556c --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcserror/hcserror.go @@ -0,0 +1,47 @@ +package hcserror + +import ( + "fmt" + "syscall" +) + +const ERROR_GEN_FAILURE = syscall.Errno(31) + +type HcsError struct { + title string + rest string + Err error +} + +func (e *HcsError) Error() string { + s := e.title + if len(s) > 0 && s[len(s)-1] != ' ' { + s += " " + } + s += fmt.Sprintf("failed in Win32: %s (0x%x)", e.Err, Win32FromError(e.Err)) + if e.rest != "" { + if e.rest[0] != ' ' { + s += " " + } + s += e.rest + } + return s +} + +func New(err error, title, rest string) error { + // Pass through DLL errors directly since they do not originate from HCS. + if _, ok := err.(*syscall.DLLError); ok { + return err + } + return &HcsError{title, rest, err} +} + +func Win32FromError(err error) uint32 { + if herr, ok := err.(*HcsError); ok { + return Win32FromError(herr.Err) + } + if code, ok := err.(syscall.Errno); ok { + return uint32(code) + } + return uint32(ERROR_GEN_FAILURE) +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hns.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hns.go new file mode 100644 index 00000000000..b2e475f53c6 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/hns.go @@ -0,0 +1,23 @@ +package hns + +import "fmt" + +//go:generate go run ../../mksyscall_windows.go -output zsyscall_windows.go hns.go + +//sys _hnsCall(method string, path string, object string, response **uint16) (hr error) = vmcompute.HNSCall? + +type EndpointNotFoundError struct { + EndpointName string +} + +func (e EndpointNotFoundError) Error() string { + return fmt.Sprintf("Endpoint %s not found", e.EndpointName) +} + +type NetworkNotFoundError struct { + NetworkName string +} + +func (e NetworkNotFoundError) Error() string { + return fmt.Sprintf("Network %s not found", e.NetworkName) +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go new file mode 100644 index 00000000000..7cf954c7b26 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go @@ -0,0 +1,338 @@ +package hns + +import ( + "encoding/json" + "net" + "strings" + + "github.com/sirupsen/logrus" +) + +// HNSEndpoint represents a network endpoint in HNS +type HNSEndpoint struct { + Id string `json:"ID,omitempty"` + Name string `json:",omitempty"` + VirtualNetwork string `json:",omitempty"` + VirtualNetworkName string `json:",omitempty"` + Policies []json.RawMessage `json:",omitempty"` + MacAddress string `json:",omitempty"` + IPAddress net.IP `json:",omitempty"` + IPv6Address net.IP `json:",omitempty"` + DNSSuffix string `json:",omitempty"` + DNSServerList string `json:",omitempty"` + DNSDomain string `json:",omitempty"` + GatewayAddress string `json:",omitempty"` + GatewayAddressV6 string `json:",omitempty"` + EnableInternalDNS bool `json:",omitempty"` + DisableICC bool `json:",omitempty"` + PrefixLength uint8 `json:",omitempty"` + IPv6PrefixLength uint8 `json:",omitempty"` + IsRemoteEndpoint bool `json:",omitempty"` + EnableLowMetric bool `json:",omitempty"` + Namespace *Namespace `json:",omitempty"` + EncapOverhead uint16 `json:",omitempty"` + SharedContainers []string `json:",omitempty"` +} + +//SystemType represents the type of the system on which actions are done +type SystemType string + +// SystemType const +const ( + ContainerType SystemType = "Container" + VirtualMachineType SystemType = "VirtualMachine" + HostType SystemType = "Host" +) + +// EndpointAttachDetachRequest is the structure used to send request to the container to modify the system +// Supported resource types are Network and Request Types are Add/Remove +type EndpointAttachDetachRequest struct { + ContainerID string `json:"ContainerId,omitempty"` + SystemType SystemType `json:"SystemType"` + CompartmentID uint16 `json:"CompartmentId,omitempty"` + VirtualNICName string `json:"VirtualNicName,omitempty"` +} + +// EndpointResquestResponse is object to get the endpoint request response +type EndpointResquestResponse struct { + Success bool + Error string +} + +// EndpointStats is the object that has stats for a given endpoint +type EndpointStats struct { + BytesReceived uint64 `json:"BytesReceived"` + BytesSent uint64 `json:"BytesSent"` + DroppedPacketsIncoming uint64 `json:"DroppedPacketsIncoming"` + DroppedPacketsOutgoing uint64 `json:"DroppedPacketsOutgoing"` + EndpointID string `json:"EndpointId"` + InstanceID string `json:"InstanceId"` + PacketsReceived uint64 `json:"PacketsReceived"` + PacketsSent uint64 `json:"PacketsSent"` +} + +// HNSEndpointRequest makes a HNS call to modify/query a network endpoint +func HNSEndpointRequest(method, path, request string) (*HNSEndpoint, error) { + endpoint := &HNSEndpoint{} + err := hnsCall(method, "/endpoints/"+path, request, &endpoint) + if err != nil { + return nil, err + } + + return endpoint, nil +} + +// HNSListEndpointRequest makes a HNS call to query the list of available endpoints +func HNSListEndpointRequest() ([]HNSEndpoint, error) { + var endpoint []HNSEndpoint + err := hnsCall("GET", "/endpoints/", "", &endpoint) + if err != nil { + return nil, err + } + + return endpoint, nil +} + +// hnsEndpointStatsRequest makes a HNS call to query the stats for a given endpoint ID +func hnsEndpointStatsRequest(id string) (*EndpointStats, error) { + var stats EndpointStats + err := hnsCall("GET", "/endpointstats/"+id, "", &stats) + if err != nil { + return nil, err + } + + return &stats, nil +} + +// GetHNSEndpointByID get the Endpoint by ID +func GetHNSEndpointByID(endpointID string) (*HNSEndpoint, error) { + return HNSEndpointRequest("GET", endpointID, "") +} + +// GetHNSEndpointStats get the stats for a n Endpoint by ID +func GetHNSEndpointStats(endpointID string) (*EndpointStats, error) { + return hnsEndpointStatsRequest(endpointID) +} + +// GetHNSEndpointByName gets the endpoint filtered by Name +func GetHNSEndpointByName(endpointName string) (*HNSEndpoint, error) { + hnsResponse, err := HNSListEndpointRequest() + if err != nil { + return nil, err + } + for _, hnsEndpoint := range hnsResponse { + if hnsEndpoint.Name == endpointName { + return &hnsEndpoint, nil + } + } + return nil, EndpointNotFoundError{EndpointName: endpointName} +} + +type endpointAttachInfo struct { + SharedContainers json.RawMessage `json:",omitempty"` +} + +func (endpoint *HNSEndpoint) IsAttached(vID string) (bool, error) { + attachInfo := endpointAttachInfo{} + err := hnsCall("GET", "/endpoints/"+endpoint.Id, "", &attachInfo) + + // Return false allows us to just return the err + if err != nil { + return false, err + } + + if strings.Contains(strings.ToLower(string(attachInfo.SharedContainers)), strings.ToLower(vID)) { + return true, nil + } + + return false, nil + +} + +// Create Endpoint by sending EndpointRequest to HNS. TODO: Create a separate HNS interface to place all these methods +func (endpoint *HNSEndpoint) Create() (*HNSEndpoint, error) { + operation := "Create" + title := "hcsshim::HNSEndpoint::" + operation + logrus.Debugf(title+" id=%s", endpoint.Id) + + jsonString, err := json.Marshal(endpoint) + if err != nil { + return nil, err + } + return HNSEndpointRequest("POST", "", string(jsonString)) +} + +// Delete Endpoint by sending EndpointRequest to HNS +func (endpoint *HNSEndpoint) Delete() (*HNSEndpoint, error) { + operation := "Delete" + title := "hcsshim::HNSEndpoint::" + operation + logrus.Debugf(title+" id=%s", endpoint.Id) + + return HNSEndpointRequest("DELETE", endpoint.Id, "") +} + +// Update Endpoint +func (endpoint *HNSEndpoint) Update() (*HNSEndpoint, error) { + operation := "Update" + title := "hcsshim::HNSEndpoint::" + operation + logrus.Debugf(title+" id=%s", endpoint.Id) + jsonString, err := json.Marshal(endpoint) + if err != nil { + return nil, err + } + err = hnsCall("POST", "/endpoints/"+endpoint.Id, string(jsonString), &endpoint) + + return endpoint, err +} + +// ApplyACLPolicy applies a set of ACL Policies on the Endpoint +func (endpoint *HNSEndpoint) ApplyACLPolicy(policies ...*ACLPolicy) error { + operation := "ApplyACLPolicy" + title := "hcsshim::HNSEndpoint::" + operation + logrus.Debugf(title+" id=%s", endpoint.Id) + + for _, policy := range policies { + if policy == nil { + continue + } + jsonString, err := json.Marshal(policy) + if err != nil { + return err + } + endpoint.Policies = append(endpoint.Policies, jsonString) + } + + _, err := endpoint.Update() + return err +} + +// ApplyProxyPolicy applies a set of Proxy Policies on the Endpoint +func (endpoint *HNSEndpoint) ApplyProxyPolicy(policies ...*ProxyPolicy) error { + operation := "ApplyProxyPolicy" + title := "hcsshim::HNSEndpoint::" + operation + logrus.Debugf(title+" id=%s", endpoint.Id) + + for _, policy := range policies { + if policy == nil { + continue + } + jsonString, err := json.Marshal(policy) + if err != nil { + return err + } + endpoint.Policies = append(endpoint.Policies, jsonString) + } + + _, err := endpoint.Update() + return err +} + +// ContainerAttach attaches an endpoint to container +func (endpoint *HNSEndpoint) ContainerAttach(containerID string, compartmentID uint16) error { + operation := "ContainerAttach" + title := "hcsshim::HNSEndpoint::" + operation + logrus.Debugf(title+" id=%s", endpoint.Id) + + requestMessage := &EndpointAttachDetachRequest{ + ContainerID: containerID, + CompartmentID: compartmentID, + SystemType: ContainerType, + } + response := &EndpointResquestResponse{} + jsonString, err := json.Marshal(requestMessage) + if err != nil { + return err + } + return hnsCall("POST", "/endpoints/"+endpoint.Id+"/attach", string(jsonString), &response) +} + +// ContainerDetach detaches an endpoint from container +func (endpoint *HNSEndpoint) ContainerDetach(containerID string) error { + operation := "ContainerDetach" + title := "hcsshim::HNSEndpoint::" + operation + logrus.Debugf(title+" id=%s", endpoint.Id) + + requestMessage := &EndpointAttachDetachRequest{ + ContainerID: containerID, + SystemType: ContainerType, + } + response := &EndpointResquestResponse{} + + jsonString, err := json.Marshal(requestMessage) + if err != nil { + return err + } + return hnsCall("POST", "/endpoints/"+endpoint.Id+"/detach", string(jsonString), &response) +} + +// HostAttach attaches a nic on the host +func (endpoint *HNSEndpoint) HostAttach(compartmentID uint16) error { + operation := "HostAttach" + title := "hcsshim::HNSEndpoint::" + operation + logrus.Debugf(title+" id=%s", endpoint.Id) + requestMessage := &EndpointAttachDetachRequest{ + CompartmentID: compartmentID, + SystemType: HostType, + } + response := &EndpointResquestResponse{} + + jsonString, err := json.Marshal(requestMessage) + if err != nil { + return err + } + return hnsCall("POST", "/endpoints/"+endpoint.Id+"/attach", string(jsonString), &response) + +} + +// HostDetach detaches a nic on the host +func (endpoint *HNSEndpoint) HostDetach() error { + operation := "HostDetach" + title := "hcsshim::HNSEndpoint::" + operation + logrus.Debugf(title+" id=%s", endpoint.Id) + requestMessage := &EndpointAttachDetachRequest{ + SystemType: HostType, + } + response := &EndpointResquestResponse{} + + jsonString, err := json.Marshal(requestMessage) + if err != nil { + return err + } + return hnsCall("POST", "/endpoints/"+endpoint.Id+"/detach", string(jsonString), &response) +} + +// VirtualMachineNICAttach attaches a endpoint to a virtual machine +func (endpoint *HNSEndpoint) VirtualMachineNICAttach(virtualMachineNICName string) error { + operation := "VirtualMachineNicAttach" + title := "hcsshim::HNSEndpoint::" + operation + logrus.Debugf(title+" id=%s", endpoint.Id) + requestMessage := &EndpointAttachDetachRequest{ + VirtualNICName: virtualMachineNICName, + SystemType: VirtualMachineType, + } + response := &EndpointResquestResponse{} + + jsonString, err := json.Marshal(requestMessage) + if err != nil { + return err + } + return hnsCall("POST", "/endpoints/"+endpoint.Id+"/attach", string(jsonString), &response) +} + +// VirtualMachineNICDetach detaches a endpoint from a virtual machine +func (endpoint *HNSEndpoint) VirtualMachineNICDetach() error { + operation := "VirtualMachineNicDetach" + title := "hcsshim::HNSEndpoint::" + operation + logrus.Debugf(title+" id=%s", endpoint.Id) + + requestMessage := &EndpointAttachDetachRequest{ + SystemType: VirtualMachineType, + } + response := &EndpointResquestResponse{} + + jsonString, err := json.Marshal(requestMessage) + if err != nil { + return err + } + return hnsCall("POST", "/endpoints/"+endpoint.Id+"/detach", string(jsonString), &response) +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsfuncs.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsfuncs.go new file mode 100644 index 00000000000..2df4a57f56c --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsfuncs.go @@ -0,0 +1,49 @@ +package hns + +import ( + "encoding/json" + "fmt" + + "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/Microsoft/hcsshim/internal/interop" + "github.com/sirupsen/logrus" +) + +func hnsCallRawResponse(method, path, request string) (*hnsResponse, error) { + var responseBuffer *uint16 + logrus.Debugf("[%s]=>[%s] Request : %s", method, path, request) + + err := _hnsCall(method, path, request, &responseBuffer) + if err != nil { + return nil, hcserror.New(err, "hnsCall ", "") + } + response := interop.ConvertAndFreeCoTaskMemString(responseBuffer) + + hnsresponse := &hnsResponse{} + if err = json.Unmarshal([]byte(response), &hnsresponse); err != nil { + return nil, err + } + return hnsresponse, nil +} + +func hnsCall(method, path, request string, returnResponse interface{}) error { + hnsresponse, err := hnsCallRawResponse(method, path, request) + if err != nil { + return fmt.Errorf("failed during hnsCallRawResponse: %v", err) + } + if !hnsresponse.Success { + return fmt.Errorf("hns failed with error : %s", hnsresponse.Error) + } + + if len(hnsresponse.Output) == 0 { + return nil + } + + logrus.Debugf("Network Response : %s", hnsresponse.Output) + err = json.Unmarshal(hnsresponse.Output, returnResponse) + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsglobals.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsglobals.go new file mode 100644 index 00000000000..a8d8cc56aea --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsglobals.go @@ -0,0 +1,28 @@ +package hns + +type HNSGlobals struct { + Version HNSVersion `json:"Version"` +} + +type HNSVersion struct { + Major int `json:"Major"` + Minor int `json:"Minor"` +} + +var ( + HNSVersion1803 = HNSVersion{Major: 7, Minor: 2} +) + +func GetHNSGlobals() (*HNSGlobals, error) { + var version HNSVersion + err := hnsCall("GET", "/globals/version", "", &version) + if err != nil { + return nil, err + } + + globals := &HNSGlobals{ + Version: version, + } + + return globals, nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsnetwork.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsnetwork.go new file mode 100644 index 00000000000..f12d3ab0411 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsnetwork.go @@ -0,0 +1,141 @@ +package hns + +import ( + "encoding/json" + "errors" + "github.com/sirupsen/logrus" + "net" +) + +// Subnet is assoicated with a network and represents a list +// of subnets available to the network +type Subnet struct { + AddressPrefix string `json:",omitempty"` + GatewayAddress string `json:",omitempty"` + Policies []json.RawMessage `json:",omitempty"` +} + +// MacPool is assoicated with a network and represents a list +// of macaddresses available to the network +type MacPool struct { + StartMacAddress string `json:",omitempty"` + EndMacAddress string `json:",omitempty"` +} + +// HNSNetwork represents a network in HNS +type HNSNetwork struct { + Id string `json:"ID,omitempty"` + Name string `json:",omitempty"` + Type string `json:",omitempty"` + NetworkAdapterName string `json:",omitempty"` + SourceMac string `json:",omitempty"` + Policies []json.RawMessage `json:",omitempty"` + MacPools []MacPool `json:",omitempty"` + Subnets []Subnet `json:",omitempty"` + DNSSuffix string `json:",omitempty"` + DNSServerList string `json:",omitempty"` + DNSServerCompartment uint32 `json:",omitempty"` + ManagementIP string `json:",omitempty"` + AutomaticDNS bool `json:",omitempty"` +} + +type hnsResponse struct { + Success bool + Error string + Output json.RawMessage +} + +// HNSNetworkRequest makes a call into HNS to update/query a single network +func HNSNetworkRequest(method, path, request string) (*HNSNetwork, error) { + var network HNSNetwork + err := hnsCall(method, "/networks/"+path, request, &network) + if err != nil { + return nil, err + } + + return &network, nil +} + +// HNSListNetworkRequest makes a HNS call to query the list of available networks +func HNSListNetworkRequest(method, path, request string) ([]HNSNetwork, error) { + var network []HNSNetwork + err := hnsCall(method, "/networks/"+path, request, &network) + if err != nil { + return nil, err + } + + return network, nil +} + +// GetHNSNetworkByID +func GetHNSNetworkByID(networkID string) (*HNSNetwork, error) { + return HNSNetworkRequest("GET", networkID, "") +} + +// GetHNSNetworkName filtered by Name +func GetHNSNetworkByName(networkName string) (*HNSNetwork, error) { + hsnnetworks, err := HNSListNetworkRequest("GET", "", "") + if err != nil { + return nil, err + } + for _, hnsnetwork := range hsnnetworks { + if hnsnetwork.Name == networkName { + return &hnsnetwork, nil + } + } + return nil, NetworkNotFoundError{NetworkName: networkName} +} + +// Create Network by sending NetworkRequest to HNS. +func (network *HNSNetwork) Create() (*HNSNetwork, error) { + operation := "Create" + title := "hcsshim::HNSNetwork::" + operation + logrus.Debugf(title+" id=%s", network.Id) + + for _, subnet := range network.Subnets { + if (subnet.AddressPrefix != "") && (subnet.GatewayAddress == "") { + return nil, errors.New("network create error, subnet has address prefix but no gateway specified") + } + } + + jsonString, err := json.Marshal(network) + if err != nil { + return nil, err + } + return HNSNetworkRequest("POST", "", string(jsonString)) +} + +// Delete Network by sending NetworkRequest to HNS +func (network *HNSNetwork) Delete() (*HNSNetwork, error) { + operation := "Delete" + title := "hcsshim::HNSNetwork::" + operation + logrus.Debugf(title+" id=%s", network.Id) + + return HNSNetworkRequest("DELETE", network.Id, "") +} + +// Creates an endpoint on the Network. +func (network *HNSNetwork) NewEndpoint(ipAddress net.IP, macAddress net.HardwareAddr) *HNSEndpoint { + return &HNSEndpoint{ + VirtualNetwork: network.Id, + IPAddress: ipAddress, + MacAddress: string(macAddress), + } +} + +func (network *HNSNetwork) CreateEndpoint(endpoint *HNSEndpoint) (*HNSEndpoint, error) { + operation := "CreateEndpoint" + title := "hcsshim::HNSNetwork::" + operation + logrus.Debugf(title+" id=%s, endpointId=%s", network.Id, endpoint.Id) + + endpoint.VirtualNetwork = network.Id + return endpoint.Create() +} + +func (network *HNSNetwork) CreateRemoteEndpoint(endpoint *HNSEndpoint) (*HNSEndpoint, error) { + operation := "CreateRemoteEndpoint" + title := "hcsshim::HNSNetwork::" + operation + logrus.Debugf(title+" id=%s", network.Id) + endpoint.IsRemoteEndpoint = true + return network.CreateEndpoint(endpoint) +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go new file mode 100644 index 00000000000..591a2631e45 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go @@ -0,0 +1,109 @@ +package hns + +// Type of Request Support in ModifySystem +type PolicyType string + +// RequestType const +const ( + Nat PolicyType = "NAT" + ACL PolicyType = "ACL" + PA PolicyType = "PA" + VLAN PolicyType = "VLAN" + VSID PolicyType = "VSID" + VNet PolicyType = "VNET" + L2Driver PolicyType = "L2Driver" + Isolation PolicyType = "Isolation" + QOS PolicyType = "QOS" + OutboundNat PolicyType = "OutBoundNAT" + ExternalLoadBalancer PolicyType = "ELB" + Route PolicyType = "ROUTE" + Proxy PolicyType = "PROXY" +) + +type NatPolicy struct { + Type PolicyType `json:"Type"` + Protocol string `json:",omitempty"` + InternalPort uint16 `json:",omitempty"` + ExternalPort uint16 `json:",omitempty"` +} + +type QosPolicy struct { + Type PolicyType `json:"Type"` + MaximumOutgoingBandwidthInBytes uint64 +} + +type IsolationPolicy struct { + Type PolicyType `json:"Type"` + VLAN uint + VSID uint + InDefaultIsolation bool +} + +type VlanPolicy struct { + Type PolicyType `json:"Type"` + VLAN uint +} + +type VsidPolicy struct { + Type PolicyType `json:"Type"` + VSID uint +} + +type PaPolicy struct { + Type PolicyType `json:"Type"` + PA string `json:"PA"` +} + +type OutboundNatPolicy struct { + Policy + VIP string `json:"VIP,omitempty"` + Exceptions []string `json:"ExceptionList,omitempty"` + Destinations []string `json:",omitempty"` +} + +type ProxyPolicy struct { + Type PolicyType `json:"Type"` + IP string `json:",omitempty"` + Port string `json:",omitempty"` + ExceptionList []string `json:",omitempty"` + Destination string `json:",omitempty"` + OutboundNat bool `json:",omitempty"` +} + +type ActionType string +type DirectionType string +type RuleType string + +const ( + Allow ActionType = "Allow" + Block ActionType = "Block" + + In DirectionType = "In" + Out DirectionType = "Out" + + Host RuleType = "Host" + Switch RuleType = "Switch" +) + +type ACLPolicy struct { + Type PolicyType `json:"Type"` + Id string `json:"Id,omitempty"` + Protocol uint16 `json:",omitempty"` + Protocols string `json:"Protocols,omitempty"` + InternalPort uint16 `json:",omitempty"` + Action ActionType + Direction DirectionType + LocalAddresses string `json:",omitempty"` + RemoteAddresses string `json:",omitempty"` + LocalPorts string `json:"LocalPorts,omitempty"` + LocalPort uint16 `json:",omitempty"` + RemotePorts string `json:"RemotePorts,omitempty"` + RemotePort uint16 `json:",omitempty"` + RuleType RuleType `json:"RuleType,omitempty"` + Priority uint16 `json:",omitempty"` + ServiceName string `json:",omitempty"` +} + +type Policy struct { + Type PolicyType `json:"Type"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicylist.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicylist.go new file mode 100644 index 00000000000..31322a68167 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicylist.go @@ -0,0 +1,201 @@ +package hns + +import ( + "encoding/json" + + "github.com/sirupsen/logrus" +) + +// RoutePolicy is a structure defining schema for Route based Policy +type RoutePolicy struct { + Policy + DestinationPrefix string `json:"DestinationPrefix,omitempty"` + NextHop string `json:"NextHop,omitempty"` + EncapEnabled bool `json:"NeedEncap,omitempty"` +} + +// ELBPolicy is a structure defining schema for ELB LoadBalancing based Policy +type ELBPolicy struct { + LBPolicy + SourceVIP string `json:"SourceVIP,omitempty"` + VIPs []string `json:"VIPs,omitempty"` + ILB bool `json:"ILB,omitempty"` + DSR bool `json:"IsDSR,omitempty"` +} + +// LBPolicy is a structure defining schema for LoadBalancing based Policy +type LBPolicy struct { + Policy + Protocol uint16 `json:"Protocol,omitempty"` + InternalPort uint16 + ExternalPort uint16 +} + +// PolicyList is a structure defining schema for Policy list request +type PolicyList struct { + ID string `json:"ID,omitempty"` + EndpointReferences []string `json:"References,omitempty"` + Policies []json.RawMessage `json:"Policies,omitempty"` +} + +// HNSPolicyListRequest makes a call into HNS to update/query a single network +func HNSPolicyListRequest(method, path, request string) (*PolicyList, error) { + var policy PolicyList + err := hnsCall(method, "/policylists/"+path, request, &policy) + if err != nil { + return nil, err + } + + return &policy, nil +} + +// HNSListPolicyListRequest gets all the policy list +func HNSListPolicyListRequest() ([]PolicyList, error) { + var plist []PolicyList + err := hnsCall("GET", "/policylists/", "", &plist) + if err != nil { + return nil, err + } + + return plist, nil +} + +// PolicyListRequest makes a HNS call to modify/query a network policy list +func PolicyListRequest(method, path, request string) (*PolicyList, error) { + policylist := &PolicyList{} + err := hnsCall(method, "/policylists/"+path, request, &policylist) + if err != nil { + return nil, err + } + + return policylist, nil +} + +// GetPolicyListByID get the policy list by ID +func GetPolicyListByID(policyListID string) (*PolicyList, error) { + return PolicyListRequest("GET", policyListID, "") +} + +// Create PolicyList by sending PolicyListRequest to HNS. +func (policylist *PolicyList) Create() (*PolicyList, error) { + operation := "Create" + title := "hcsshim::PolicyList::" + operation + logrus.Debugf(title+" id=%s", policylist.ID) + jsonString, err := json.Marshal(policylist) + if err != nil { + return nil, err + } + return PolicyListRequest("POST", "", string(jsonString)) +} + +// Delete deletes PolicyList +func (policylist *PolicyList) Delete() (*PolicyList, error) { + operation := "Delete" + title := "hcsshim::PolicyList::" + operation + logrus.Debugf(title+" id=%s", policylist.ID) + + return PolicyListRequest("DELETE", policylist.ID, "") +} + +// AddEndpoint add an endpoint to a Policy List +func (policylist *PolicyList) AddEndpoint(endpoint *HNSEndpoint) (*PolicyList, error) { + operation := "AddEndpoint" + title := "hcsshim::PolicyList::" + operation + logrus.Debugf(title+" id=%s, endpointId:%s", policylist.ID, endpoint.Id) + + _, err := policylist.Delete() + if err != nil { + return nil, err + } + + // Add Endpoint to the Existing List + policylist.EndpointReferences = append(policylist.EndpointReferences, "/endpoints/"+endpoint.Id) + + return policylist.Create() +} + +// RemoveEndpoint removes an endpoint from the Policy List +func (policylist *PolicyList) RemoveEndpoint(endpoint *HNSEndpoint) (*PolicyList, error) { + operation := "RemoveEndpoint" + title := "hcsshim::PolicyList::" + operation + logrus.Debugf(title+" id=%s, endpointId:%s", policylist.ID, endpoint.Id) + + _, err := policylist.Delete() + if err != nil { + return nil, err + } + + elementToRemove := "/endpoints/" + endpoint.Id + + var references []string + + for _, endpointReference := range policylist.EndpointReferences { + if endpointReference == elementToRemove { + continue + } + references = append(references, endpointReference) + } + policylist.EndpointReferences = references + return policylist.Create() +} + +// AddLoadBalancer policy list for the specified endpoints +func AddLoadBalancer(endpoints []HNSEndpoint, isILB bool, sourceVIP, vip string, protocol uint16, internalPort uint16, externalPort uint16) (*PolicyList, error) { + operation := "AddLoadBalancer" + title := "hcsshim::PolicyList::" + operation + logrus.Debugf(title+" endpointId=%v, isILB=%v, sourceVIP=%s, vip=%s, protocol=%v, internalPort=%v, externalPort=%v", endpoints, isILB, sourceVIP, vip, protocol, internalPort, externalPort) + + policylist := &PolicyList{} + + elbPolicy := &ELBPolicy{ + SourceVIP: sourceVIP, + ILB: isILB, + } + + if len(vip) > 0 { + elbPolicy.VIPs = []string{vip} + } + elbPolicy.Type = ExternalLoadBalancer + elbPolicy.Protocol = protocol + elbPolicy.InternalPort = internalPort + elbPolicy.ExternalPort = externalPort + + for _, endpoint := range endpoints { + policylist.EndpointReferences = append(policylist.EndpointReferences, "/endpoints/"+endpoint.Id) + } + + jsonString, err := json.Marshal(elbPolicy) + if err != nil { + return nil, err + } + policylist.Policies = append(policylist.Policies, jsonString) + return policylist.Create() +} + +// AddRoute adds route policy list for the specified endpoints +func AddRoute(endpoints []HNSEndpoint, destinationPrefix string, nextHop string, encapEnabled bool) (*PolicyList, error) { + operation := "AddRoute" + title := "hcsshim::PolicyList::" + operation + logrus.Debugf(title+" destinationPrefix:%s", destinationPrefix) + + policylist := &PolicyList{} + + rPolicy := &RoutePolicy{ + DestinationPrefix: destinationPrefix, + NextHop: nextHop, + EncapEnabled: encapEnabled, + } + rPolicy.Type = Route + + for _, endpoint := range endpoints { + policylist.EndpointReferences = append(policylist.EndpointReferences, "/endpoints/"+endpoint.Id) + } + + jsonString, err := json.Marshal(rPolicy) + if err != nil { + return nil, err + } + + policylist.Policies = append(policylist.Policies, jsonString) + return policylist.Create() +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnssupport.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnssupport.go new file mode 100644 index 00000000000..d5efba7f284 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnssupport.go @@ -0,0 +1,49 @@ +package hns + +import ( + "github.com/sirupsen/logrus" +) + +type HNSSupportedFeatures struct { + Acl HNSAclFeatures `json:"ACL"` +} + +type HNSAclFeatures struct { + AclAddressLists bool `json:"AclAddressLists"` + AclNoHostRulePriority bool `json:"AclHostRulePriority"` + AclPortRanges bool `json:"AclPortRanges"` + AclRuleId bool `json:"AclRuleId"` +} + +func GetHNSSupportedFeatures() HNSSupportedFeatures { + var hnsFeatures HNSSupportedFeatures + + globals, err := GetHNSGlobals() + if err != nil { + // Expected on pre-1803 builds, all features will be false/unsupported + logrus.Debugf("Unable to obtain HNS globals: %s", err) + return hnsFeatures + } + + hnsFeatures.Acl = HNSAclFeatures{ + AclAddressLists: isHNSFeatureSupported(globals.Version, HNSVersion1803), + AclNoHostRulePriority: isHNSFeatureSupported(globals.Version, HNSVersion1803), + AclPortRanges: isHNSFeatureSupported(globals.Version, HNSVersion1803), + AclRuleId: isHNSFeatureSupported(globals.Version, HNSVersion1803), + } + + return hnsFeatures +} + +func isHNSFeatureSupported(currentVersion HNSVersion, minVersionSupported HNSVersion) bool { + if currentVersion.Major < minVersionSupported.Major { + return false + } + if currentVersion.Major > minVersionSupported.Major { + return true + } + if currentVersion.Minor < minVersionSupported.Minor { + return false + } + return true +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/namespace.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/namespace.go new file mode 100644 index 00000000000..d3b04eefe0c --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/namespace.go @@ -0,0 +1,111 @@ +package hns + +import ( + "encoding/json" + "fmt" + "os" + "path" + "strings" +) + +type namespaceRequest struct { + IsDefault bool `json:",omitempty"` +} + +type namespaceEndpointRequest struct { + ID string `json:"Id"` +} + +type NamespaceResource struct { + Type string + Data json.RawMessage +} + +type namespaceResourceRequest struct { + Type string + Data interface{} +} + +type Namespace struct { + ID string + IsDefault bool `json:",omitempty"` + ResourceList []NamespaceResource `json:",omitempty"` + CompartmentId uint32 `json:",omitempty"` +} + +func issueNamespaceRequest(id *string, method, subpath string, request interface{}) (*Namespace, error) { + var err error + hnspath := "/namespaces/" + if id != nil { + hnspath = path.Join(hnspath, *id) + } + if subpath != "" { + hnspath = path.Join(hnspath, subpath) + } + var reqJSON []byte + if request != nil { + if reqJSON, err = json.Marshal(request); err != nil { + return nil, err + } + } + var ns Namespace + err = hnsCall(method, hnspath, string(reqJSON), &ns) + if err != nil { + if strings.Contains(err.Error(), "Element not found.") { + return nil, os.ErrNotExist + } + return nil, fmt.Errorf("%s %s: %s", method, hnspath, err) + } + return &ns, err +} + +func CreateNamespace() (string, error) { + req := namespaceRequest{} + ns, err := issueNamespaceRequest(nil, "POST", "", &req) + if err != nil { + return "", err + } + return ns.ID, nil +} + +func RemoveNamespace(id string) error { + _, err := issueNamespaceRequest(&id, "DELETE", "", nil) + return err +} + +func GetNamespaceEndpoints(id string) ([]string, error) { + ns, err := issueNamespaceRequest(&id, "GET", "", nil) + if err != nil { + return nil, err + } + var endpoints []string + for _, rsrc := range ns.ResourceList { + if rsrc.Type == "Endpoint" { + var endpoint namespaceEndpointRequest + err = json.Unmarshal(rsrc.Data, &endpoint) + if err != nil { + return nil, fmt.Errorf("unmarshal endpoint: %s", err) + } + endpoints = append(endpoints, endpoint.ID) + } + } + return endpoints, nil +} + +func AddNamespaceEndpoint(id string, endpointID string) error { + resource := namespaceResourceRequest{ + Type: "Endpoint", + Data: namespaceEndpointRequest{endpointID}, + } + _, err := issueNamespaceRequest(&id, "POST", "addresource", &resource) + return err +} + +func RemoveNamespaceEndpoint(id string, endpointID string) error { + resource := namespaceResourceRequest{ + Type: "Endpoint", + Data: namespaceEndpointRequest{endpointID}, + } + _, err := issueNamespaceRequest(&id, "POST", "removeresource", &resource) + return err +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/zsyscall_windows.go new file mode 100644 index 00000000000..204633a4887 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/zsyscall_windows.go @@ -0,0 +1,76 @@ +// Code generated mksyscall_windows.exe DO NOT EDIT + +package hns + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return nil + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modvmcompute = windows.NewLazySystemDLL("vmcompute.dll") + + procHNSCall = modvmcompute.NewProc("HNSCall") +) + +func _hnsCall(method string, path string, object string, response **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(method) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(path) + if hr != nil { + return + } + var _p2 *uint16 + _p2, hr = syscall.UTF16PtrFromString(object) + if hr != nil { + return + } + return __hnsCall(_p0, _p1, _p2, response) +} + +func __hnsCall(method *uint16, path *uint16, object *uint16, response **uint16) (hr error) { + if hr = procHNSCall.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHNSCall.Addr(), 4, uintptr(unsafe.Pointer(method)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(object)), uintptr(unsafe.Pointer(response)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/interop/interop.go b/vendor/github.com/Microsoft/hcsshim/internal/interop/interop.go new file mode 100644 index 00000000000..922f7c679e0 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/interop/interop.go @@ -0,0 +1,23 @@ +package interop + +import ( + "syscall" + "unsafe" +) + +//go:generate go run ../../mksyscall_windows.go -output zsyscall_windows.go interop.go + +//sys coTaskMemFree(buffer unsafe.Pointer) = api_ms_win_core_com_l1_1_0.CoTaskMemFree + +func ConvertAndFreeCoTaskMemString(buffer *uint16) string { + str := syscall.UTF16ToString((*[1 << 29]uint16)(unsafe.Pointer(buffer))[:]) + coTaskMemFree(unsafe.Pointer(buffer)) + return str +} + +func Win32FromHresult(hr uintptr) syscall.Errno { + if hr&0x1fff0000 == 0x00070000 { + return syscall.Errno(hr & 0xffff) + } + return syscall.Errno(hr) +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/interop/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/interop/zsyscall_windows.go new file mode 100644 index 00000000000..12b0c71c5ae --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/interop/zsyscall_windows.go @@ -0,0 +1,48 @@ +// Code generated mksyscall_windows.exe DO NOT EDIT + +package interop + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return nil + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modapi_ms_win_core_com_l1_1_0 = windows.NewLazySystemDLL("api-ms-win-core-com-l1-1-0.dll") + + procCoTaskMemFree = modapi_ms_win_core_com_l1_1_0.NewProc("CoTaskMemFree") +) + +func coTaskMemFree(buffer unsafe.Pointer) { + syscall.Syscall(procCoTaskMemFree.Addr(), 1, uintptr(buffer), 0, 0) + return +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/log/g.go b/vendor/github.com/Microsoft/hcsshim/internal/log/g.go new file mode 100644 index 00000000000..ba6b1a4a53a --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/log/g.go @@ -0,0 +1,23 @@ +package log + +import ( + "context" + + "github.com/sirupsen/logrus" + "go.opencensus.io/trace" +) + +// G returns a `logrus.Entry` with the `TraceID, SpanID` from `ctx` if `ctx` +// contains an OpenCensus `trace.Span`. +func G(ctx context.Context) *logrus.Entry { + span := trace.FromContext(ctx) + if span != nil { + sctx := span.SpanContext() + return logrus.WithFields(logrus.Fields{ + "traceID": sctx.TraceID.String(), + "spanID": sctx.SpanID.String(), + // "parentSpanID": TODO: JTERRY75 - Try to convince OC to export this? + }) + } + return logrus.NewEntry(logrus.StandardLogger()) +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/logfields/fields.go b/vendor/github.com/Microsoft/hcsshim/internal/logfields/fields.go new file mode 100644 index 00000000000..cf2c166d9b8 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/logfields/fields.go @@ -0,0 +1,32 @@ +package logfields + +const ( + // Identifiers + + ContainerID = "cid" + UVMID = "uvm-id" + ProcessID = "pid" + + // Common Misc + + // Timeout represents an operation timeout. + Timeout = "timeout" + JSON = "json" + + // Keys/values + + Field = "field" + OCIAnnotation = "oci-annotation" + Value = "value" + + // Golang type's + + ExpectedType = "expected-type" + Bool = "bool" + Uint32 = "uint32" + Uint64 = "uint64" + + // runhcs + + VMShimOperation = "vmshim-op" +) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/longpath/longpath.go b/vendor/github.com/Microsoft/hcsshim/internal/longpath/longpath.go new file mode 100644 index 00000000000..e5b8b85e09a --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/longpath/longpath.go @@ -0,0 +1,24 @@ +package longpath + +import ( + "path/filepath" + "strings" +) + +// LongAbs makes a path absolute and returns it in NT long path form. +func LongAbs(path string) (string, error) { + if strings.HasPrefix(path, `\\?\`) || strings.HasPrefix(path, `\\.\`) { + return path, nil + } + if !filepath.IsAbs(path) { + absPath, err := filepath.Abs(path) + if err != nil { + return "", err + } + path = absPath + } + if strings.HasPrefix(path, `\\`) { + return `\\?\UNC\` + path[2:], nil + } + return `\\?\` + path, nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/mergemaps/merge.go b/vendor/github.com/Microsoft/hcsshim/internal/mergemaps/merge.go new file mode 100644 index 00000000000..7e95efb30d7 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/mergemaps/merge.go @@ -0,0 +1,52 @@ +package mergemaps + +import "encoding/json" + +// Merge recursively merges map `fromMap` into map `ToMap`. Any pre-existing values +// in ToMap are overwritten. Values in fromMap are added to ToMap. +// From http://stackoverflow.com/questions/40491438/merging-two-json-strings-in-golang +func Merge(fromMap, ToMap interface{}) interface{} { + switch fromMap := fromMap.(type) { + case map[string]interface{}: + ToMap, ok := ToMap.(map[string]interface{}) + if !ok { + return fromMap + } + for keyToMap, valueToMap := range ToMap { + if valueFromMap, ok := fromMap[keyToMap]; ok { + fromMap[keyToMap] = Merge(valueFromMap, valueToMap) + } else { + fromMap[keyToMap] = valueToMap + } + } + case nil: + // merge(nil, map[string]interface{...}) -> map[string]interface{...} + ToMap, ok := ToMap.(map[string]interface{}) + if ok { + return ToMap + } + } + return fromMap +} + +// MergeJSON merges the contents of a JSON string into an object representation, +// returning a new object suitable for translating to JSON. +func MergeJSON(object interface{}, additionalJSON []byte) (interface{}, error) { + if len(additionalJSON) == 0 { + return object, nil + } + objectJSON, err := json.Marshal(object) + if err != nil { + return nil, err + } + var objectMap, newMap map[string]interface{} + err = json.Unmarshal(objectJSON, &objectMap) + if err != nil { + return nil, err + } + err = json.Unmarshal(additionalJSON, &newMap) + if err != nil { + return nil, err + } + return Merge(newMap, objectMap), nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/oc/exporter.go b/vendor/github.com/Microsoft/hcsshim/internal/oc/exporter.go new file mode 100644 index 00000000000..f428bdaf720 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/oc/exporter.go @@ -0,0 +1,43 @@ +package oc + +import ( + "github.com/sirupsen/logrus" + "go.opencensus.io/trace" +) + +var _ = (trace.Exporter)(&LogrusExporter{}) + +// LogrusExporter is an OpenCensus `trace.Exporter` that exports +// `trace.SpanData` to logrus output. +type LogrusExporter struct { +} + +// ExportSpan exports `s` based on the the following rules: +// +// 1. All output will contain `s.Attributes`, `s.TraceID`, `s.SpanID`, +// `s.ParentSpanID` for correlation +// +// 2. Any calls to .Annotate will not be supported. +// +// 3. The span itself will be written at `logrus.InfoLevel` unless +// `s.Status.Code != 0` in which case it will be written at `logrus.ErrorLevel` +// providing `s.Status.Message` as the error value. +func (le *LogrusExporter) ExportSpan(s *trace.SpanData) { + // Combine all span annotations with traceID, spanID, parentSpanID + baseEntry := logrus.WithFields(logrus.Fields(s.Attributes)) + baseEntry.Data["traceID"] = s.TraceID.String() + baseEntry.Data["spanID"] = s.SpanID.String() + baseEntry.Data["parentSpanID"] = s.ParentSpanID.String() + baseEntry.Data["startTime"] = s.StartTime + baseEntry.Data["endTime"] = s.EndTime + baseEntry.Data["duration"] = s.EndTime.Sub(s.StartTime).String() + baseEntry.Data["name"] = s.Name + baseEntry.Time = s.StartTime + + level := logrus.InfoLevel + if s.Status.Code != 0 { + level = logrus.ErrorLevel + baseEntry.Data[logrus.ErrorKey] = s.Status.Message + } + baseEntry.Log(level, "Span") +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/oc/span.go b/vendor/github.com/Microsoft/hcsshim/internal/oc/span.go new file mode 100644 index 00000000000..fee4765cbc4 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/oc/span.go @@ -0,0 +1,17 @@ +package oc + +import ( + "go.opencensus.io/trace" +) + +// SetSpanStatus sets `span.SetStatus` to the proper status depending on `err`. If +// `err` is `nil` assumes `trace.StatusCodeOk`. +func SetSpanStatus(span *trace.Span, err error) { + status := trace.Status{} + if err != nil { + // TODO: JTERRY75 - Handle errors in a non-generic way + status.Code = trace.StatusCodeUnknown + status.Message = err.Error() + } + span.SetStatus(status) +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen.go b/vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen.go new file mode 100644 index 00000000000..66b8d7e0353 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen.go @@ -0,0 +1,375 @@ +package safefile + +import ( + "errors" + "io" + "os" + "path/filepath" + "strings" + "syscall" + "unicode/utf16" + "unsafe" + + "github.com/Microsoft/hcsshim/internal/longpath" + "github.com/Microsoft/hcsshim/internal/winapi" + + winio "github.com/Microsoft/go-winio" +) + +func OpenRoot(path string) (*os.File, error) { + longpath, err := longpath.LongAbs(path) + if err != nil { + return nil, err + } + return winio.OpenForBackup(longpath, syscall.GENERIC_READ, syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, syscall.OPEN_EXISTING) +} + +func cleanGoStringRelativePath(path string) (string, error) { + path = filepath.Clean(path) + if strings.Contains(path, ":") { + // Since alternate data streams must follow the file they + // are attached to, finding one here (out of order) is invalid. + return "", errors.New("path contains invalid character `:`") + } + fspath := filepath.FromSlash(path) + if len(fspath) > 0 && fspath[0] == '\\' { + return "", errors.New("expected relative path") + } + return fspath, nil +} + +func ntRelativePath(path string) ([]uint16, error) { + fspath, err := cleanGoStringRelativePath(path) + if err != nil { + return nil, err + } + + path16 := utf16.Encode(([]rune)(fspath)) + if len(path16) > 32767 { + return nil, syscall.ENAMETOOLONG + } + + return path16, nil +} + +// openRelativeInternal opens a relative path from the given root, failing if +// any of the intermediate path components are reparse points. +func openRelativeInternal(path string, root *os.File, accessMask uint32, shareFlags uint32, createDisposition uint32, flags uint32) (*os.File, error) { + var ( + h uintptr + iosb winapi.IOStatusBlock + oa winapi.ObjectAttributes + ) + + cleanRelativePath, err := cleanGoStringRelativePath(path) + if err != nil { + return nil, err + } + + if root == nil || root.Fd() == 0 { + return nil, errors.New("missing root directory") + } + + pathUnicode, err := winapi.NewUnicodeString(cleanRelativePath) + if err != nil { + return nil, err + } + + oa.Length = unsafe.Sizeof(oa) + oa.ObjectName = pathUnicode + oa.RootDirectory = uintptr(root.Fd()) + oa.Attributes = winapi.OBJ_DONT_REPARSE + status := winapi.NtCreateFile( + &h, + accessMask|syscall.SYNCHRONIZE, + &oa, + &iosb, + nil, + 0, + shareFlags, + createDisposition, + winapi.FILE_OPEN_FOR_BACKUP_INTENT|winapi.FILE_SYNCHRONOUS_IO_NONALERT|flags, + nil, + 0, + ) + if status != 0 { + return nil, winapi.RtlNtStatusToDosError(status) + } + + fullPath, err := longpath.LongAbs(filepath.Join(root.Name(), path)) + if err != nil { + syscall.Close(syscall.Handle(h)) + return nil, err + } + + return os.NewFile(h, fullPath), nil +} + +// OpenRelative opens a relative path from the given root, failing if +// any of the intermediate path components are reparse points. +func OpenRelative(path string, root *os.File, accessMask uint32, shareFlags uint32, createDisposition uint32, flags uint32) (*os.File, error) { + f, err := openRelativeInternal(path, root, accessMask, shareFlags, createDisposition, flags) + if err != nil { + err = &os.PathError{Op: "open", Path: filepath.Join(root.Name(), path), Err: err} + } + return f, err +} + +// LinkRelative creates a hard link from oldname to newname (relative to oldroot +// and newroot), failing if any of the intermediate path components are reparse +// points. +func LinkRelative(oldname string, oldroot *os.File, newname string, newroot *os.File) error { + // Open the old file. + oldf, err := openRelativeInternal( + oldname, + oldroot, + syscall.FILE_WRITE_ATTRIBUTES, + syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, + winapi.FILE_OPEN, + 0, + ) + if err != nil { + return &os.LinkError{Op: "link", Old: filepath.Join(oldroot.Name(), oldname), New: filepath.Join(newroot.Name(), newname), Err: err} + } + defer oldf.Close() + + // Open the parent of the new file. + var parent *os.File + parentPath := filepath.Dir(newname) + if parentPath != "." { + parent, err = openRelativeInternal( + parentPath, + newroot, + syscall.GENERIC_READ, + syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, + winapi.FILE_OPEN, + winapi.FILE_DIRECTORY_FILE) + if err != nil { + return &os.LinkError{Op: "link", Old: oldf.Name(), New: filepath.Join(newroot.Name(), newname), Err: err} + } + defer parent.Close() + + fi, err := winio.GetFileBasicInfo(parent) + if err != nil { + return err + } + if (fi.FileAttributes & syscall.FILE_ATTRIBUTE_REPARSE_POINT) != 0 { + return &os.LinkError{Op: "link", Old: oldf.Name(), New: filepath.Join(newroot.Name(), newname), Err: winapi.RtlNtStatusToDosError(winapi.STATUS_REPARSE_POINT_ENCOUNTERED)} + } + + } else { + parent = newroot + } + + // Issue an NT call to create the link. This will be safe because NT will + // not open any more directories to create the link, so it cannot walk any + // more reparse points. + newbase := filepath.Base(newname) + newbase16, err := ntRelativePath(newbase) + if err != nil { + return err + } + + size := int(unsafe.Offsetof(winapi.FileLinkInformation{}.FileName)) + len(newbase16)*2 + linkinfoBuffer := winapi.LocalAlloc(0, size) + defer winapi.LocalFree(linkinfoBuffer) + + linkinfo := (*winapi.FileLinkInformation)(unsafe.Pointer(linkinfoBuffer)) + linkinfo.RootDirectory = parent.Fd() + linkinfo.FileNameLength = uint32(len(newbase16) * 2) + copy(winapi.Uint16BufferToSlice(&linkinfo.FileName[0], len(newbase16)), newbase16) + + var iosb winapi.IOStatusBlock + status := winapi.NtSetInformationFile( + oldf.Fd(), + &iosb, + linkinfoBuffer, + uint32(size), + winapi.FileLinkInformationClass, + ) + if status != 0 { + return &os.LinkError{Op: "link", Old: oldf.Name(), New: filepath.Join(parent.Name(), newbase), Err: winapi.RtlNtStatusToDosError(status)} + } + + return nil +} + +// deleteOnClose marks a file to be deleted when the handle is closed. +func deleteOnClose(f *os.File) error { + disposition := winapi.FileDispositionInformationEx{Flags: winapi.FILE_DISPOSITION_DELETE} + var iosb winapi.IOStatusBlock + status := winapi.NtSetInformationFile( + f.Fd(), + &iosb, + uintptr(unsafe.Pointer(&disposition)), + uint32(unsafe.Sizeof(disposition)), + winapi.FileDispositionInformationExClass, + ) + if status != 0 { + return winapi.RtlNtStatusToDosError(status) + } + return nil +} + +// clearReadOnly clears the readonly attribute on a file. +func clearReadOnly(f *os.File) error { + bi, err := winio.GetFileBasicInfo(f) + if err != nil { + return err + } + if bi.FileAttributes&syscall.FILE_ATTRIBUTE_READONLY == 0 { + return nil + } + sbi := winio.FileBasicInfo{ + FileAttributes: bi.FileAttributes &^ syscall.FILE_ATTRIBUTE_READONLY, + } + if sbi.FileAttributes == 0 { + sbi.FileAttributes = syscall.FILE_ATTRIBUTE_NORMAL + } + return winio.SetFileBasicInfo(f, &sbi) +} + +// RemoveRelative removes a file or directory relative to a root, failing if any +// intermediate path components are reparse points. +func RemoveRelative(path string, root *os.File) error { + f, err := openRelativeInternal( + path, + root, + winapi.FILE_READ_ATTRIBUTES|winapi.FILE_WRITE_ATTRIBUTES|winapi.DELETE, + syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, + winapi.FILE_OPEN, + winapi.FILE_OPEN_REPARSE_POINT) + if err == nil { + defer f.Close() + err = deleteOnClose(f) + if err == syscall.ERROR_ACCESS_DENIED { + // Maybe the file is marked readonly. Clear the bit and retry. + _ = clearReadOnly(f) + err = deleteOnClose(f) + } + } + if err != nil { + return &os.PathError{Op: "remove", Path: filepath.Join(root.Name(), path), Err: err} + } + return nil +} + +// RemoveAllRelative removes a directory tree relative to a root, failing if any +// intermediate path components are reparse points. +func RemoveAllRelative(path string, root *os.File) error { + fi, err := LstatRelative(path, root) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + fileAttributes := fi.Sys().(*syscall.Win32FileAttributeData).FileAttributes + if fileAttributes&syscall.FILE_ATTRIBUTE_DIRECTORY == 0 || fileAttributes&syscall.FILE_ATTRIBUTE_REPARSE_POINT != 0 { + // If this is a reparse point, it can't have children. Simple remove will do. + err := RemoveRelative(path, root) + if err == nil || os.IsNotExist(err) { + return nil + } + return err + } + + // It is necessary to use os.Open as Readdirnames does not work with + // OpenRelative. This is safe because the above lstatrelative fails + // if the target is outside the root, and we know this is not a + // symlink from the above FILE_ATTRIBUTE_REPARSE_POINT check. + fd, err := os.Open(filepath.Join(root.Name(), path)) + if err != nil { + if os.IsNotExist(err) { + // Race. It was deleted between the Lstat and Open. + // Return nil per RemoveAll's docs. + return nil + } + return err + } + + // Remove contents & return first error. + for { + names, err1 := fd.Readdirnames(100) + for _, name := range names { + err1 := RemoveAllRelative(path+string(os.PathSeparator)+name, root) + if err == nil { + err = err1 + } + } + if err1 == io.EOF { + break + } + // If Readdirnames returned an error, use it. + if err == nil { + err = err1 + } + if len(names) == 0 { + break + } + } + fd.Close() + + // Remove directory. + err1 := RemoveRelative(path, root) + if err1 == nil || os.IsNotExist(err1) { + return nil + } + if err == nil { + err = err1 + } + return err +} + +// MkdirRelative creates a directory relative to a root, failing if any +// intermediate path components are reparse points. +func MkdirRelative(path string, root *os.File) error { + f, err := openRelativeInternal( + path, + root, + 0, + syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, + winapi.FILE_CREATE, + winapi.FILE_DIRECTORY_FILE) + if err == nil { + f.Close() + } else { + err = &os.PathError{Op: "mkdir", Path: filepath.Join(root.Name(), path), Err: err} + } + return err +} + +// LstatRelative performs a stat operation on a file relative to a root, failing +// if any intermediate path components are reparse points. +func LstatRelative(path string, root *os.File) (os.FileInfo, error) { + f, err := openRelativeInternal( + path, + root, + winapi.FILE_READ_ATTRIBUTES, + syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, + winapi.FILE_OPEN, + winapi.FILE_OPEN_REPARSE_POINT) + if err != nil { + return nil, &os.PathError{Op: "stat", Path: filepath.Join(root.Name(), path), Err: err} + } + defer f.Close() + return f.Stat() +} + +// EnsureNotReparsePointRelative validates that a given file (relative to a +// root) and all intermediate path components are not a reparse points. +func EnsureNotReparsePointRelative(path string, root *os.File) error { + // Perform an open with OBJ_DONT_REPARSE but without specifying FILE_OPEN_REPARSE_POINT. + f, err := OpenRelative( + path, + root, + 0, + syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, + winapi.FILE_OPEN, + 0) + if err != nil { + return err + } + f.Close() + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/timeout/timeout.go b/vendor/github.com/Microsoft/hcsshim/internal/timeout/timeout.go new file mode 100644 index 00000000000..eaf39fa5132 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/timeout/timeout.go @@ -0,0 +1,74 @@ +package timeout + +import ( + "os" + "strconv" + "time" +) + +var ( + // defaultTimeout is the timeout for most operations that is not overridden. + defaultTimeout = 4 * time.Minute + + // defaultTimeoutTestdRetry is the retry loop timeout for testd to respond + // for a disk to come online in LCOW. + defaultTimeoutTestdRetry = 5 * time.Second +) + +// External variables for HCSShim consumers to use. +var ( + // SystemCreate is the timeout for creating a compute system + SystemCreate time.Duration = defaultTimeout + + // SystemStart is the timeout for starting a compute system + SystemStart time.Duration = defaultTimeout + + // SystemPause is the timeout for pausing a compute system + SystemPause time.Duration = defaultTimeout + + // SystemResume is the timeout for resuming a compute system + SystemResume time.Duration = defaultTimeout + + // SystemSave is the timeout for saving a compute system + SystemSave time.Duration = defaultTimeout + + // SyscallWatcher is the timeout before warning of a potential stuck platform syscall. + SyscallWatcher time.Duration = defaultTimeout + + // Tar2VHD is the timeout for the tar2vhd operation to complete + Tar2VHD time.Duration = defaultTimeout + + // ExternalCommandToStart is the timeout for external commands to start + ExternalCommandToStart = defaultTimeout + + // ExternalCommandToComplete is the timeout for external commands to complete. + // Generally this means copying data from their stdio pipes. + ExternalCommandToComplete = defaultTimeout + + // TestDRetryLoop is the timeout for testd retry loop when onlining a SCSI disk in LCOW + TestDRetryLoop = defaultTimeoutTestdRetry +) + +func init() { + SystemCreate = durationFromEnvironment("HCSSHIM_TIMEOUT_SYSTEMCREATE", SystemCreate) + SystemStart = durationFromEnvironment("HCSSHIM_TIMEOUT_SYSTEMSTART", SystemStart) + SystemPause = durationFromEnvironment("HCSSHIM_TIMEOUT_SYSTEMPAUSE", SystemPause) + SystemResume = durationFromEnvironment("HCSSHIM_TIMEOUT_SYSTEMRESUME", SystemResume) + SystemSave = durationFromEnvironment("HCSSHIM_TIMEOUT_SYSTEMSAVE", SystemSave) + SyscallWatcher = durationFromEnvironment("HCSSHIM_TIMEOUT_SYSCALLWATCHER", SyscallWatcher) + Tar2VHD = durationFromEnvironment("HCSSHIM_TIMEOUT_TAR2VHD", Tar2VHD) + ExternalCommandToStart = durationFromEnvironment("HCSSHIM_TIMEOUT_EXTERNALCOMMANDSTART", ExternalCommandToStart) + ExternalCommandToComplete = durationFromEnvironment("HCSSHIM_TIMEOUT_EXTERNALCOMMANDCOMPLETE", ExternalCommandToComplete) + TestDRetryLoop = durationFromEnvironment("HCSSHIM_TIMEOUT_TESTDRETRYLOOP", TestDRetryLoop) +} + +func durationFromEnvironment(env string, defaultValue time.Duration) time.Duration { + envTimeout := os.Getenv(env) + if len(envTimeout) > 0 { + e, err := strconv.Atoi(envTimeout) + if err == nil && e > 0 { + return time.Second * time.Duration(e) + } + } + return defaultValue +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/vmcompute.go b/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/vmcompute.go new file mode 100644 index 00000000000..e7f114b67aa --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/vmcompute.go @@ -0,0 +1,610 @@ +package vmcompute + +import ( + gcontext "context" + "syscall" + "time" + + "github.com/Microsoft/hcsshim/internal/interop" + "github.com/Microsoft/hcsshim/internal/log" + "github.com/Microsoft/hcsshim/internal/logfields" + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/Microsoft/hcsshim/internal/timeout" + "go.opencensus.io/trace" +) + +//go:generate go run ../../mksyscall_windows.go -output zsyscall_windows.go vmcompute.go + +//sys hcsEnumerateComputeSystems(query string, computeSystems **uint16, result **uint16) (hr error) = vmcompute.HcsEnumerateComputeSystems? +//sys hcsCreateComputeSystem(id string, configuration string, identity syscall.Handle, computeSystem *HcsSystem, result **uint16) (hr error) = vmcompute.HcsCreateComputeSystem? +//sys hcsOpenComputeSystem(id string, computeSystem *HcsSystem, result **uint16) (hr error) = vmcompute.HcsOpenComputeSystem? +//sys hcsCloseComputeSystem(computeSystem HcsSystem) (hr error) = vmcompute.HcsCloseComputeSystem? +//sys hcsStartComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsStartComputeSystem? +//sys hcsShutdownComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsShutdownComputeSystem? +//sys hcsTerminateComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsTerminateComputeSystem? +//sys hcsPauseComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsPauseComputeSystem? +//sys hcsResumeComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsResumeComputeSystem? +//sys hcsGetComputeSystemProperties(computeSystem HcsSystem, propertyQuery string, properties **uint16, result **uint16) (hr error) = vmcompute.HcsGetComputeSystemProperties? +//sys hcsModifyComputeSystem(computeSystem HcsSystem, configuration string, result **uint16) (hr error) = vmcompute.HcsModifyComputeSystem? +//sys hcsModifyServiceSettings(settings string, result **uint16) (hr error) = vmcompute.HcsModifyServiceSettings? +//sys hcsRegisterComputeSystemCallback(computeSystem HcsSystem, callback uintptr, context uintptr, callbackHandle *HcsCallback) (hr error) = vmcompute.HcsRegisterComputeSystemCallback? +//sys hcsUnregisterComputeSystemCallback(callbackHandle HcsCallback) (hr error) = vmcompute.HcsUnregisterComputeSystemCallback? +//sys hcsSaveComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsSaveComputeSystem? + +//sys hcsCreateProcess(computeSystem HcsSystem, processParameters string, processInformation *HcsProcessInformation, process *HcsProcess, result **uint16) (hr error) = vmcompute.HcsCreateProcess? +//sys hcsOpenProcess(computeSystem HcsSystem, pid uint32, process *HcsProcess, result **uint16) (hr error) = vmcompute.HcsOpenProcess? +//sys hcsCloseProcess(process HcsProcess) (hr error) = vmcompute.HcsCloseProcess? +//sys hcsTerminateProcess(process HcsProcess, result **uint16) (hr error) = vmcompute.HcsTerminateProcess? +//sys hcsSignalProcess(process HcsProcess, options string, result **uint16) (hr error) = vmcompute.HcsSignalProcess? +//sys hcsGetProcessInfo(process HcsProcess, processInformation *HcsProcessInformation, result **uint16) (hr error) = vmcompute.HcsGetProcessInfo? +//sys hcsGetProcessProperties(process HcsProcess, processProperties **uint16, result **uint16) (hr error) = vmcompute.HcsGetProcessProperties? +//sys hcsModifyProcess(process HcsProcess, settings string, result **uint16) (hr error) = vmcompute.HcsModifyProcess? +//sys hcsGetServiceProperties(propertyQuery string, properties **uint16, result **uint16) (hr error) = vmcompute.HcsGetServiceProperties? +//sys hcsRegisterProcessCallback(process HcsProcess, callback uintptr, context uintptr, callbackHandle *HcsCallback) (hr error) = vmcompute.HcsRegisterProcessCallback? +//sys hcsUnregisterProcessCallback(callbackHandle HcsCallback) (hr error) = vmcompute.HcsUnregisterProcessCallback? + +// errVmcomputeOperationPending is an error encountered when the operation is being completed asynchronously +const errVmcomputeOperationPending = syscall.Errno(0xC0370103) + +// HcsSystem is the handle associated with a created compute system. +type HcsSystem syscall.Handle + +// HcsProcess is the handle associated with a created process in a compute +// system. +type HcsProcess syscall.Handle + +// HcsCallback is the handle associated with the function to call when events +// occur. +type HcsCallback syscall.Handle + +// HcsProcessInformation is the structure used when creating or getting process +// info. +type HcsProcessInformation struct { + // ProcessId is the pid of the created process. + ProcessId uint32 + reserved uint32 //nolint:structcheck + // StdInput is the handle associated with the stdin of the process. + StdInput syscall.Handle + // StdOutput is the handle associated with the stdout of the process. + StdOutput syscall.Handle + // StdError is the handle associated with the stderr of the process. + StdError syscall.Handle +} + +func execute(ctx gcontext.Context, timeout time.Duration, f func() error) error { + if timeout > 0 { + var cancel gcontext.CancelFunc + ctx, cancel = gcontext.WithTimeout(ctx, timeout) + defer cancel() + } + + done := make(chan error, 1) + go func() { + done <- f() + }() + select { + case <-ctx.Done(): + if ctx.Err() == gcontext.DeadlineExceeded { + log.G(ctx).WithField(logfields.Timeout, timeout). + Warning("Syscall did not complete within operation timeout. This may indicate a platform issue. If it appears to be making no forward progress, obtain the stacks and see if there is a syscall stuck in the platform API for a significant length of time.") + } + return ctx.Err() + case err := <-done: + return err + } +} + +func HcsEnumerateComputeSystems(ctx gcontext.Context, query string) (computeSystems, result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsEnumerateComputeSystems") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + oc.SetSpanStatus(span, hr) + }() + span.AddAttributes(trace.StringAttribute("query", query)) + + return computeSystems, result, execute(ctx, timeout.SyscallWatcher, func() error { + var ( + computeSystemsp *uint16 + resultp *uint16 + ) + err := hcsEnumerateComputeSystems(query, &computeSystemsp, &resultp) + if computeSystemsp != nil { + computeSystems = interop.ConvertAndFreeCoTaskMemString(computeSystemsp) + } + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsCreateComputeSystem(ctx gcontext.Context, id string, configuration string, identity syscall.Handle) (computeSystem HcsSystem, result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsCreateComputeSystem") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + if hr != errVmcomputeOperationPending { + oc.SetSpanStatus(span, hr) + } + }() + span.AddAttributes( + trace.StringAttribute("id", id), + trace.StringAttribute("configuration", configuration)) + + return computeSystem, result, execute(ctx, timeout.SystemCreate, func() error { + var resultp *uint16 + err := hcsCreateComputeSystem(id, configuration, identity, &computeSystem, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsOpenComputeSystem(ctx gcontext.Context, id string) (computeSystem HcsSystem, result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsOpenComputeSystem") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + oc.SetSpanStatus(span, hr) + }() + + return computeSystem, result, execute(ctx, timeout.SyscallWatcher, func() error { + var resultp *uint16 + err := hcsOpenComputeSystem(id, &computeSystem, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsCloseComputeSystem(ctx gcontext.Context, computeSystem HcsSystem) (hr error) { + ctx, span := trace.StartSpan(ctx, "HcsCloseComputeSystem") + defer span.End() + defer func() { oc.SetSpanStatus(span, hr) }() + + return execute(ctx, timeout.SyscallWatcher, func() error { + return hcsCloseComputeSystem(computeSystem) + }) +} + +func HcsStartComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, options string) (result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsStartComputeSystem") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + if hr != errVmcomputeOperationPending { + oc.SetSpanStatus(span, hr) + } + }() + span.AddAttributes(trace.StringAttribute("options", options)) + + return result, execute(ctx, timeout.SystemStart, func() error { + var resultp *uint16 + err := hcsStartComputeSystem(computeSystem, options, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsShutdownComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, options string) (result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsShutdownComputeSystem") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + if hr != errVmcomputeOperationPending { + oc.SetSpanStatus(span, hr) + } + }() + span.AddAttributes(trace.StringAttribute("options", options)) + + return result, execute(ctx, timeout.SyscallWatcher, func() error { + var resultp *uint16 + err := hcsShutdownComputeSystem(computeSystem, options, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsTerminateComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, options string) (result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsTerminateComputeSystem") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + if hr != errVmcomputeOperationPending { + oc.SetSpanStatus(span, hr) + } + }() + span.AddAttributes(trace.StringAttribute("options", options)) + + return result, execute(ctx, timeout.SyscallWatcher, func() error { + var resultp *uint16 + err := hcsTerminateComputeSystem(computeSystem, options, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsPauseComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, options string) (result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsPauseComputeSystem") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + if hr != errVmcomputeOperationPending { + oc.SetSpanStatus(span, hr) + } + }() + span.AddAttributes(trace.StringAttribute("options", options)) + + return result, execute(ctx, timeout.SystemPause, func() error { + var resultp *uint16 + err := hcsPauseComputeSystem(computeSystem, options, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsResumeComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, options string) (result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsResumeComputeSystem") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + if hr != errVmcomputeOperationPending { + oc.SetSpanStatus(span, hr) + } + }() + span.AddAttributes(trace.StringAttribute("options", options)) + + return result, execute(ctx, timeout.SystemResume, func() error { + var resultp *uint16 + err := hcsResumeComputeSystem(computeSystem, options, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsGetComputeSystemProperties(ctx gcontext.Context, computeSystem HcsSystem, propertyQuery string) (properties, result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsGetComputeSystemProperties") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + oc.SetSpanStatus(span, hr) + }() + span.AddAttributes(trace.StringAttribute("propertyQuery", propertyQuery)) + + return properties, result, execute(ctx, timeout.SyscallWatcher, func() error { + var ( + propertiesp *uint16 + resultp *uint16 + ) + err := hcsGetComputeSystemProperties(computeSystem, propertyQuery, &propertiesp, &resultp) + if propertiesp != nil { + properties = interop.ConvertAndFreeCoTaskMemString(propertiesp) + } + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsModifyComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, configuration string) (result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsModifyComputeSystem") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + oc.SetSpanStatus(span, hr) + }() + span.AddAttributes(trace.StringAttribute("configuration", configuration)) + + return result, execute(ctx, timeout.SyscallWatcher, func() error { + var resultp *uint16 + err := hcsModifyComputeSystem(computeSystem, configuration, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsModifyServiceSettings(ctx gcontext.Context, settings string) (result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsModifyServiceSettings") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + oc.SetSpanStatus(span, hr) + }() + span.AddAttributes(trace.StringAttribute("settings", settings)) + + return result, execute(ctx, timeout.SyscallWatcher, func() error { + var resultp *uint16 + err := hcsModifyServiceSettings(settings, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsRegisterComputeSystemCallback(ctx gcontext.Context, computeSystem HcsSystem, callback uintptr, context uintptr) (callbackHandle HcsCallback, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsRegisterComputeSystemCallback") + defer span.End() + defer func() { oc.SetSpanStatus(span, hr) }() + + return callbackHandle, execute(ctx, timeout.SyscallWatcher, func() error { + return hcsRegisterComputeSystemCallback(computeSystem, callback, context, &callbackHandle) + }) +} + +func HcsUnregisterComputeSystemCallback(ctx gcontext.Context, callbackHandle HcsCallback) (hr error) { + ctx, span := trace.StartSpan(ctx, "HcsUnregisterComputeSystemCallback") + defer span.End() + defer func() { oc.SetSpanStatus(span, hr) }() + + return execute(ctx, timeout.SyscallWatcher, func() error { + return hcsUnregisterComputeSystemCallback(callbackHandle) + }) +} + +func HcsCreateProcess(ctx gcontext.Context, computeSystem HcsSystem, processParameters string) (processInformation HcsProcessInformation, process HcsProcess, result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsCreateProcess") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + oc.SetSpanStatus(span, hr) + }() + span.AddAttributes(trace.StringAttribute("processParameters", processParameters)) + + return processInformation, process, result, execute(ctx, timeout.SyscallWatcher, func() error { + var resultp *uint16 + err := hcsCreateProcess(computeSystem, processParameters, &processInformation, &process, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsOpenProcess(ctx gcontext.Context, computeSystem HcsSystem, pid uint32) (process HcsProcess, result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsOpenProcess") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + oc.SetSpanStatus(span, hr) + }() + span.AddAttributes(trace.Int64Attribute("pid", int64(pid))) + + return process, result, execute(ctx, timeout.SyscallWatcher, func() error { + var resultp *uint16 + err := hcsOpenProcess(computeSystem, pid, &process, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsCloseProcess(ctx gcontext.Context, process HcsProcess) (hr error) { + ctx, span := trace.StartSpan(ctx, "HcsCloseProcess") + defer span.End() + defer func() { oc.SetSpanStatus(span, hr) }() + + return execute(ctx, timeout.SyscallWatcher, func() error { + return hcsCloseProcess(process) + }) +} + +func HcsTerminateProcess(ctx gcontext.Context, process HcsProcess) (result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsTerminateProcess") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + oc.SetSpanStatus(span, hr) + }() + + return result, execute(ctx, timeout.SyscallWatcher, func() error { + var resultp *uint16 + err := hcsTerminateProcess(process, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsSignalProcess(ctx gcontext.Context, process HcsProcess, options string) (result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsSignalProcess") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + oc.SetSpanStatus(span, hr) + }() + span.AddAttributes(trace.StringAttribute("options", options)) + + return result, execute(ctx, timeout.SyscallWatcher, func() error { + var resultp *uint16 + err := hcsSignalProcess(process, options, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsGetProcessInfo(ctx gcontext.Context, process HcsProcess) (processInformation HcsProcessInformation, result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsGetProcessInfo") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + oc.SetSpanStatus(span, hr) + }() + + return processInformation, result, execute(ctx, timeout.SyscallWatcher, func() error { + var resultp *uint16 + err := hcsGetProcessInfo(process, &processInformation, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsGetProcessProperties(ctx gcontext.Context, process HcsProcess) (processProperties, result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsGetProcessProperties") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + oc.SetSpanStatus(span, hr) + }() + + return processProperties, result, execute(ctx, timeout.SyscallWatcher, func() error { + var ( + processPropertiesp *uint16 + resultp *uint16 + ) + err := hcsGetProcessProperties(process, &processPropertiesp, &resultp) + if processPropertiesp != nil { + processProperties = interop.ConvertAndFreeCoTaskMemString(processPropertiesp) + } + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsModifyProcess(ctx gcontext.Context, process HcsProcess, settings string) (result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsModifyProcess") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + oc.SetSpanStatus(span, hr) + }() + span.AddAttributes(trace.StringAttribute("settings", settings)) + + return result, execute(ctx, timeout.SyscallWatcher, func() error { + var resultp *uint16 + err := hcsModifyProcess(process, settings, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsGetServiceProperties(ctx gcontext.Context, propertyQuery string) (properties, result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsGetServiceProperties") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + oc.SetSpanStatus(span, hr) + }() + span.AddAttributes(trace.StringAttribute("propertyQuery", propertyQuery)) + + return properties, result, execute(ctx, timeout.SyscallWatcher, func() error { + var ( + propertiesp *uint16 + resultp *uint16 + ) + err := hcsGetServiceProperties(propertyQuery, &propertiesp, &resultp) + if propertiesp != nil { + properties = interop.ConvertAndFreeCoTaskMemString(propertiesp) + } + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsRegisterProcessCallback(ctx gcontext.Context, process HcsProcess, callback uintptr, context uintptr) (callbackHandle HcsCallback, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsRegisterProcessCallback") + defer span.End() + defer func() { oc.SetSpanStatus(span, hr) }() + + return callbackHandle, execute(ctx, timeout.SyscallWatcher, func() error { + return hcsRegisterProcessCallback(process, callback, context, &callbackHandle) + }) +} + +func HcsUnregisterProcessCallback(ctx gcontext.Context, callbackHandle HcsCallback) (hr error) { + ctx, span := trace.StartSpan(ctx, "HcsUnregisterProcessCallback") + defer span.End() + defer func() { oc.SetSpanStatus(span, hr) }() + + return execute(ctx, timeout.SyscallWatcher, func() error { + return hcsUnregisterProcessCallback(callbackHandle) + }) +} + +func HcsSaveComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, options string) (result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsSaveComputeSystem") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + if hr != errVmcomputeOperationPending { + oc.SetSpanStatus(span, hr) + } + }() + + return result, execute(ctx, timeout.SyscallWatcher, func() error { + var resultp *uint16 + err := hcsSaveComputeSystem(computeSystem, options, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/zsyscall_windows.go new file mode 100644 index 00000000000..cae55058deb --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/zsyscall_windows.go @@ -0,0 +1,581 @@ +// Code generated mksyscall_windows.exe DO NOT EDIT + +package vmcompute + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return nil + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modvmcompute = windows.NewLazySystemDLL("vmcompute.dll") + + procHcsEnumerateComputeSystems = modvmcompute.NewProc("HcsEnumerateComputeSystems") + procHcsCreateComputeSystem = modvmcompute.NewProc("HcsCreateComputeSystem") + procHcsOpenComputeSystem = modvmcompute.NewProc("HcsOpenComputeSystem") + procHcsCloseComputeSystem = modvmcompute.NewProc("HcsCloseComputeSystem") + procHcsStartComputeSystem = modvmcompute.NewProc("HcsStartComputeSystem") + procHcsShutdownComputeSystem = modvmcompute.NewProc("HcsShutdownComputeSystem") + procHcsTerminateComputeSystem = modvmcompute.NewProc("HcsTerminateComputeSystem") + procHcsPauseComputeSystem = modvmcompute.NewProc("HcsPauseComputeSystem") + procHcsResumeComputeSystem = modvmcompute.NewProc("HcsResumeComputeSystem") + procHcsGetComputeSystemProperties = modvmcompute.NewProc("HcsGetComputeSystemProperties") + procHcsModifyComputeSystem = modvmcompute.NewProc("HcsModifyComputeSystem") + procHcsModifyServiceSettings = modvmcompute.NewProc("HcsModifyServiceSettings") + procHcsRegisterComputeSystemCallback = modvmcompute.NewProc("HcsRegisterComputeSystemCallback") + procHcsUnregisterComputeSystemCallback = modvmcompute.NewProc("HcsUnregisterComputeSystemCallback") + procHcsSaveComputeSystem = modvmcompute.NewProc("HcsSaveComputeSystem") + procHcsCreateProcess = modvmcompute.NewProc("HcsCreateProcess") + procHcsOpenProcess = modvmcompute.NewProc("HcsOpenProcess") + procHcsCloseProcess = modvmcompute.NewProc("HcsCloseProcess") + procHcsTerminateProcess = modvmcompute.NewProc("HcsTerminateProcess") + procHcsSignalProcess = modvmcompute.NewProc("HcsSignalProcess") + procHcsGetProcessInfo = modvmcompute.NewProc("HcsGetProcessInfo") + procHcsGetProcessProperties = modvmcompute.NewProc("HcsGetProcessProperties") + procHcsModifyProcess = modvmcompute.NewProc("HcsModifyProcess") + procHcsGetServiceProperties = modvmcompute.NewProc("HcsGetServiceProperties") + procHcsRegisterProcessCallback = modvmcompute.NewProc("HcsRegisterProcessCallback") + procHcsUnregisterProcessCallback = modvmcompute.NewProc("HcsUnregisterProcessCallback") +) + +func hcsEnumerateComputeSystems(query string, computeSystems **uint16, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(query) + if hr != nil { + return + } + return _hcsEnumerateComputeSystems(_p0, computeSystems, result) +} + +func _hcsEnumerateComputeSystems(query *uint16, computeSystems **uint16, result **uint16) (hr error) { + if hr = procHcsEnumerateComputeSystems.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsEnumerateComputeSystems.Addr(), 3, uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(computeSystems)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsCreateComputeSystem(id string, configuration string, identity syscall.Handle, computeSystem *HcsSystem, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(id) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(configuration) + if hr != nil { + return + } + return _hcsCreateComputeSystem(_p0, _p1, identity, computeSystem, result) +} + +func _hcsCreateComputeSystem(id *uint16, configuration *uint16, identity syscall.Handle, computeSystem *HcsSystem, result **uint16) (hr error) { + if hr = procHcsCreateComputeSystem.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHcsCreateComputeSystem.Addr(), 5, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(configuration)), uintptr(identity), uintptr(unsafe.Pointer(computeSystem)), uintptr(unsafe.Pointer(result)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsOpenComputeSystem(id string, computeSystem *HcsSystem, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(id) + if hr != nil { + return + } + return _hcsOpenComputeSystem(_p0, computeSystem, result) +} + +func _hcsOpenComputeSystem(id *uint16, computeSystem *HcsSystem, result **uint16) (hr error) { + if hr = procHcsOpenComputeSystem.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsOpenComputeSystem.Addr(), 3, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(computeSystem)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsCloseComputeSystem(computeSystem HcsSystem) (hr error) { + if hr = procHcsCloseComputeSystem.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsCloseComputeSystem.Addr(), 1, uintptr(computeSystem), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsStartComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(options) + if hr != nil { + return + } + return _hcsStartComputeSystem(computeSystem, _p0, result) +} + +func _hcsStartComputeSystem(computeSystem HcsSystem, options *uint16, result **uint16) (hr error) { + if hr = procHcsStartComputeSystem.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsStartComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsShutdownComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(options) + if hr != nil { + return + } + return _hcsShutdownComputeSystem(computeSystem, _p0, result) +} + +func _hcsShutdownComputeSystem(computeSystem HcsSystem, options *uint16, result **uint16) (hr error) { + if hr = procHcsShutdownComputeSystem.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsShutdownComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsTerminateComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(options) + if hr != nil { + return + } + return _hcsTerminateComputeSystem(computeSystem, _p0, result) +} + +func _hcsTerminateComputeSystem(computeSystem HcsSystem, options *uint16, result **uint16) (hr error) { + if hr = procHcsTerminateComputeSystem.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsTerminateComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsPauseComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(options) + if hr != nil { + return + } + return _hcsPauseComputeSystem(computeSystem, _p0, result) +} + +func _hcsPauseComputeSystem(computeSystem HcsSystem, options *uint16, result **uint16) (hr error) { + if hr = procHcsPauseComputeSystem.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsPauseComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsResumeComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(options) + if hr != nil { + return + } + return _hcsResumeComputeSystem(computeSystem, _p0, result) +} + +func _hcsResumeComputeSystem(computeSystem HcsSystem, options *uint16, result **uint16) (hr error) { + if hr = procHcsResumeComputeSystem.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsResumeComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsGetComputeSystemProperties(computeSystem HcsSystem, propertyQuery string, properties **uint16, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(propertyQuery) + if hr != nil { + return + } + return _hcsGetComputeSystemProperties(computeSystem, _p0, properties, result) +} + +func _hcsGetComputeSystemProperties(computeSystem HcsSystem, propertyQuery *uint16, properties **uint16, result **uint16) (hr error) { + if hr = procHcsGetComputeSystemProperties.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHcsGetComputeSystemProperties.Addr(), 4, uintptr(computeSystem), uintptr(unsafe.Pointer(propertyQuery)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsModifyComputeSystem(computeSystem HcsSystem, configuration string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(configuration) + if hr != nil { + return + } + return _hcsModifyComputeSystem(computeSystem, _p0, result) +} + +func _hcsModifyComputeSystem(computeSystem HcsSystem, configuration *uint16, result **uint16) (hr error) { + if hr = procHcsModifyComputeSystem.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsModifyComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(configuration)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsModifyServiceSettings(settings string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(settings) + if hr != nil { + return + } + return _hcsModifyServiceSettings(_p0, result) +} + +func _hcsModifyServiceSettings(settings *uint16, result **uint16) (hr error) { + if hr = procHcsModifyServiceSettings.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsModifyServiceSettings.Addr(), 2, uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsRegisterComputeSystemCallback(computeSystem HcsSystem, callback uintptr, context uintptr, callbackHandle *HcsCallback) (hr error) { + if hr = procHcsRegisterComputeSystemCallback.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHcsRegisterComputeSystemCallback.Addr(), 4, uintptr(computeSystem), uintptr(callback), uintptr(context), uintptr(unsafe.Pointer(callbackHandle)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsUnregisterComputeSystemCallback(callbackHandle HcsCallback) (hr error) { + if hr = procHcsUnregisterComputeSystemCallback.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsUnregisterComputeSystemCallback.Addr(), 1, uintptr(callbackHandle), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsSaveComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(options) + if hr != nil { + return + } + return _hcsSaveComputeSystem(computeSystem, _p0, result) +} + +func _hcsSaveComputeSystem(computeSystem HcsSystem, options *uint16, result **uint16) (hr error) { + if hr = procHcsSaveComputeSystem.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsSaveComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsCreateProcess(computeSystem HcsSystem, processParameters string, processInformation *HcsProcessInformation, process *HcsProcess, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(processParameters) + if hr != nil { + return + } + return _hcsCreateProcess(computeSystem, _p0, processInformation, process, result) +} + +func _hcsCreateProcess(computeSystem HcsSystem, processParameters *uint16, processInformation *HcsProcessInformation, process *HcsProcess, result **uint16) (hr error) { + if hr = procHcsCreateProcess.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHcsCreateProcess.Addr(), 5, uintptr(computeSystem), uintptr(unsafe.Pointer(processParameters)), uintptr(unsafe.Pointer(processInformation)), uintptr(unsafe.Pointer(process)), uintptr(unsafe.Pointer(result)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsOpenProcess(computeSystem HcsSystem, pid uint32, process *HcsProcess, result **uint16) (hr error) { + if hr = procHcsOpenProcess.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHcsOpenProcess.Addr(), 4, uintptr(computeSystem), uintptr(pid), uintptr(unsafe.Pointer(process)), uintptr(unsafe.Pointer(result)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsCloseProcess(process HcsProcess) (hr error) { + if hr = procHcsCloseProcess.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsCloseProcess.Addr(), 1, uintptr(process), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsTerminateProcess(process HcsProcess, result **uint16) (hr error) { + if hr = procHcsTerminateProcess.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsTerminateProcess.Addr(), 2, uintptr(process), uintptr(unsafe.Pointer(result)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsSignalProcess(process HcsProcess, options string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(options) + if hr != nil { + return + } + return _hcsSignalProcess(process, _p0, result) +} + +func _hcsSignalProcess(process HcsProcess, options *uint16, result **uint16) (hr error) { + if hr = procHcsSignalProcess.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsSignalProcess.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsGetProcessInfo(process HcsProcess, processInformation *HcsProcessInformation, result **uint16) (hr error) { + if hr = procHcsGetProcessInfo.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsGetProcessInfo.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(processInformation)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsGetProcessProperties(process HcsProcess, processProperties **uint16, result **uint16) (hr error) { + if hr = procHcsGetProcessProperties.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsGetProcessProperties.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(processProperties)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsModifyProcess(process HcsProcess, settings string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(settings) + if hr != nil { + return + } + return _hcsModifyProcess(process, _p0, result) +} + +func _hcsModifyProcess(process HcsProcess, settings *uint16, result **uint16) (hr error) { + if hr = procHcsModifyProcess.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsModifyProcess.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsGetServiceProperties(propertyQuery string, properties **uint16, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(propertyQuery) + if hr != nil { + return + } + return _hcsGetServiceProperties(_p0, properties, result) +} + +func _hcsGetServiceProperties(propertyQuery *uint16, properties **uint16, result **uint16) (hr error) { + if hr = procHcsGetServiceProperties.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsGetServiceProperties.Addr(), 3, uintptr(unsafe.Pointer(propertyQuery)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsRegisterProcessCallback(process HcsProcess, callback uintptr, context uintptr, callbackHandle *HcsCallback) (hr error) { + if hr = procHcsRegisterProcessCallback.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHcsRegisterProcessCallback.Addr(), 4, uintptr(process), uintptr(callback), uintptr(context), uintptr(unsafe.Pointer(callbackHandle)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsUnregisterProcessCallback(callbackHandle HcsCallback) (hr error) { + if hr = procHcsUnregisterProcessCallback.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsUnregisterProcessCallback.Addr(), 1, uintptr(callbackHandle), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/activatelayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/activatelayer.go new file mode 100644 index 00000000000..5debe974d41 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/activatelayer.go @@ -0,0 +1,27 @@ +package wclayer + +import ( + "context" + + "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/Microsoft/hcsshim/internal/oc" + "go.opencensus.io/trace" +) + +// ActivateLayer will find the layer with the given id and mount it's filesystem. +// For a read/write layer, the mounted filesystem will appear as a volume on the +// host, while a read-only layer is generally expected to be a no-op. +// An activated layer must later be deactivated via DeactivateLayer. +func ActivateLayer(ctx context.Context, path string) (err error) { + title := "hcsshim::ActivateLayer" + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("path", path)) + + err = activateLayer(&stdDriverInfo, path) + if err != nil { + return hcserror.New(err, title, "") + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayer.go new file mode 100644 index 00000000000..3ec708d1ed3 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayer.go @@ -0,0 +1,182 @@ +package wclayer + +import ( + "context" + "errors" + "os" + "path/filepath" + "syscall" + + "github.com/Microsoft/go-winio" + "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/Microsoft/hcsshim/internal/safefile" + "github.com/Microsoft/hcsshim/internal/winapi" + "go.opencensus.io/trace" +) + +type baseLayerWriter struct { + ctx context.Context + s *trace.Span + + root *os.File + f *os.File + bw *winio.BackupFileWriter + err error + hasUtilityVM bool + dirInfo []dirInfo +} + +type dirInfo struct { + path string + fileInfo winio.FileBasicInfo +} + +// reapplyDirectoryTimes reapplies directory modification, creation, etc. times +// after processing of the directory tree has completed. The times are expected +// to be ordered such that parent directories come before child directories. +func reapplyDirectoryTimes(root *os.File, dis []dirInfo) error { + for i := range dis { + di := &dis[len(dis)-i-1] // reverse order: process child directories first + f, err := safefile.OpenRelative(di.path, root, syscall.GENERIC_READ|syscall.GENERIC_WRITE, syscall.FILE_SHARE_READ, winapi.FILE_OPEN, winapi.FILE_DIRECTORY_FILE|syscall.FILE_FLAG_OPEN_REPARSE_POINT) + if err != nil { + return err + } + + err = winio.SetFileBasicInfo(f, &di.fileInfo) + f.Close() + if err != nil { + return err + } + + } + return nil +} + +func (w *baseLayerWriter) closeCurrentFile() error { + if w.f != nil { + err := w.bw.Close() + err2 := w.f.Close() + w.f = nil + w.bw = nil + if err != nil { + return err + } + if err2 != nil { + return err2 + } + } + return nil +} + +func (w *baseLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) (err error) { + defer func() { + if err != nil { + w.err = err + } + }() + + err = w.closeCurrentFile() + if err != nil { + return err + } + + if filepath.ToSlash(name) == `UtilityVM/Files` { + w.hasUtilityVM = true + } + + var f *os.File + defer func() { + if f != nil { + f.Close() + } + }() + + extraFlags := uint32(0) + if fileInfo.FileAttributes&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 { + extraFlags |= winapi.FILE_DIRECTORY_FILE + w.dirInfo = append(w.dirInfo, dirInfo{name, *fileInfo}) + } + + mode := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE | winio.WRITE_DAC | winio.WRITE_OWNER | winio.ACCESS_SYSTEM_SECURITY) + f, err = safefile.OpenRelative(name, w.root, mode, syscall.FILE_SHARE_READ, winapi.FILE_CREATE, extraFlags) + if err != nil { + return hcserror.New(err, "Failed to safefile.OpenRelative", name) + } + + err = winio.SetFileBasicInfo(f, fileInfo) + if err != nil { + return hcserror.New(err, "Failed to SetFileBasicInfo", name) + } + + w.f = f + w.bw = winio.NewBackupFileWriter(f, true) + f = nil + return nil +} + +func (w *baseLayerWriter) AddLink(name string, target string) (err error) { + defer func() { + if err != nil { + w.err = err + } + }() + + err = w.closeCurrentFile() + if err != nil { + return err + } + + return safefile.LinkRelative(target, w.root, name, w.root) +} + +func (w *baseLayerWriter) Remove(name string) error { + return errors.New("base layer cannot have tombstones") +} + +func (w *baseLayerWriter) Write(b []byte) (int, error) { + n, err := w.bw.Write(b) + if err != nil { + w.err = err + } + return n, err +} + +func (w *baseLayerWriter) Close() (err error) { + defer w.s.End() + defer func() { oc.SetSpanStatus(w.s, err) }() + defer func() { + w.root.Close() + w.root = nil + }() + + err = w.closeCurrentFile() + if err != nil { + return err + } + if w.err == nil { + // Restore the file times of all the directories, since they may have + // been modified by creating child directories. + err = reapplyDirectoryTimes(w.root, w.dirInfo) + if err != nil { + return err + } + + err = ProcessBaseLayer(w.ctx, w.root.Name()) + if err != nil { + return err + } + + if w.hasUtilityVM { + err := safefile.EnsureNotReparsePointRelative("UtilityVM", w.root) + if err != nil { + return err + } + err = ProcessUtilityVMImage(w.ctx, filepath.Join(w.root.Name(), "UtilityVM")) + if err != nil { + return err + } + } + } + return w.err +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createlayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createlayer.go new file mode 100644 index 00000000000..480aee8725c --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createlayer.go @@ -0,0 +1,27 @@ +package wclayer + +import ( + "context" + + "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/Microsoft/hcsshim/internal/oc" + "go.opencensus.io/trace" +) + +// CreateLayer creates a new, empty, read-only layer on the filesystem based on +// the parent layer provided. +func CreateLayer(ctx context.Context, path, parent string) (err error) { + title := "hcsshim::CreateLayer" + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("path", path), + trace.StringAttribute("parent", parent)) + + err = createLayer(&stdDriverInfo, path, parent) + if err != nil { + return hcserror.New(err, title, "") + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createscratchlayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createscratchlayer.go new file mode 100644 index 00000000000..131aa94f14b --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createscratchlayer.go @@ -0,0 +1,34 @@ +package wclayer + +import ( + "context" + "strings" + + "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/Microsoft/hcsshim/internal/oc" + "go.opencensus.io/trace" +) + +// CreateScratchLayer creates and populates new read-write layer for use by a container. +// This requires the full list of paths to all parent layers up to the base +func CreateScratchLayer(ctx context.Context, path string, parentLayerPaths []string) (err error) { + title := "hcsshim::CreateScratchLayer" + ctx, span := trace.StartSpan(ctx, title) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("path", path), + trace.StringAttribute("parentLayerPaths", strings.Join(parentLayerPaths, ", "))) + + // Generate layer descriptors + layers, err := layerPathsToDescriptors(ctx, parentLayerPaths) + if err != nil { + return err + } + + err = createSandboxLayer(&stdDriverInfo, path, 0, layers) + if err != nil { + return hcserror.New(err, title, "") + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/deactivatelayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/deactivatelayer.go new file mode 100644 index 00000000000..d5bf2f5bdc5 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/deactivatelayer.go @@ -0,0 +1,24 @@ +package wclayer + +import ( + "context" + + "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/Microsoft/hcsshim/internal/oc" + "go.opencensus.io/trace" +) + +// DeactivateLayer will dismount a layer that was mounted via ActivateLayer. +func DeactivateLayer(ctx context.Context, path string) (err error) { + title := "hcsshim::DeactivateLayer" + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("path", path)) + + err = deactivateLayer(&stdDriverInfo, path) + if err != nil { + return hcserror.New(err, title+"- failed", "") + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/destroylayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/destroylayer.go new file mode 100644 index 00000000000..424467ac331 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/destroylayer.go @@ -0,0 +1,25 @@ +package wclayer + +import ( + "context" + + "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/Microsoft/hcsshim/internal/oc" + "go.opencensus.io/trace" +) + +// DestroyLayer will remove the on-disk files representing the layer with the given +// path, including that layer's containing folder, if any. +func DestroyLayer(ctx context.Context, path string) (err error) { + title := "hcsshim::DestroyLayer" + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("path", path)) + + err = destroyLayer(&stdDriverInfo, path) + if err != nil { + return hcserror.New(err, title, "") + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go new file mode 100644 index 00000000000..035c9041e68 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go @@ -0,0 +1,140 @@ +package wclayer + +import ( + "context" + "os" + "path/filepath" + "syscall" + "unsafe" + + "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/Microsoft/hcsshim/osversion" + "go.opencensus.io/trace" +) + +// ExpandScratchSize expands the size of a layer to at least size bytes. +func ExpandScratchSize(ctx context.Context, path string, size uint64) (err error) { + title := "hcsshim::ExpandScratchSize" + ctx, span := trace.StartSpan(ctx, title) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("path", path), + trace.Int64Attribute("size", int64(size))) + + err = expandSandboxSize(&stdDriverInfo, path, size) + if err != nil { + return hcserror.New(err, title, "") + } + + // Manually expand the volume now in order to work around bugs in 19H1 and + // prerelease versions of Vb. Remove once this is fixed in Windows. + if build := osversion.Build(); build >= osversion.V19H1 && build < 19020 { + err = expandSandboxVolume(ctx, path) + if err != nil { + return err + } + } + return nil +} + +type virtualStorageType struct { + DeviceID uint32 + VendorID [16]byte +} + +type openVersion2 struct { + GetInfoOnly int32 // bool but 4-byte aligned + ReadOnly int32 // bool but 4-byte aligned + ResiliencyGUID [16]byte // GUID +} + +type openVirtualDiskParameters struct { + Version uint32 // Must always be set to 2 + Version2 openVersion2 +} + +func attachVhd(path string) (syscall.Handle, error) { + var ( + defaultType virtualStorageType + handle syscall.Handle + ) + parameters := openVirtualDiskParameters{Version: 2} + err := openVirtualDisk( + &defaultType, + path, + 0, + 0, + ¶meters, + &handle) + if err != nil { + return 0, &os.PathError{Op: "OpenVirtualDisk", Path: path, Err: err} + } + err = attachVirtualDisk(handle, 0, 0, 0, 0, 0) + if err != nil { + syscall.Close(handle) + return 0, &os.PathError{Op: "AttachVirtualDisk", Path: path, Err: err} + } + return handle, nil +} + +func expandSandboxVolume(ctx context.Context, path string) error { + // Mount the sandbox VHD temporarily. + vhdPath := filepath.Join(path, "sandbox.vhdx") + vhd, err := attachVhd(vhdPath) + if err != nil { + return &os.PathError{Op: "OpenVirtualDisk", Path: vhdPath, Err: err} + } + defer syscall.Close(vhd) + + // Open the volume. + volumePath, err := GetLayerMountPath(ctx, path) + if err != nil { + return err + } + if volumePath[len(volumePath)-1] == '\\' { + volumePath = volumePath[:len(volumePath)-1] + } + volume, err := os.OpenFile(volumePath, os.O_RDWR, 0) + if err != nil { + return err + } + defer volume.Close() + + // Get the volume's underlying partition size in NTFS clusters. + var ( + partitionSize int64 + bytes uint32 + ) + const _IOCTL_DISK_GET_LENGTH_INFO = 0x0007405C + err = syscall.DeviceIoControl(syscall.Handle(volume.Fd()), _IOCTL_DISK_GET_LENGTH_INFO, nil, 0, (*byte)(unsafe.Pointer(&partitionSize)), 8, &bytes, nil) + if err != nil { + return &os.PathError{Op: "IOCTL_DISK_GET_LENGTH_INFO", Path: volume.Name(), Err: err} + } + const ( + clusterSize = 4096 + sectorSize = 512 + ) + targetClusters := partitionSize / clusterSize + + // Get the volume's current size in NTFS clusters. + var volumeSize int64 + err = getDiskFreeSpaceEx(volume.Name()+"\\", nil, &volumeSize, nil) + if err != nil { + return &os.PathError{Op: "GetDiskFreeSpaceEx", Path: volume.Name(), Err: err} + } + volumeClusters := volumeSize / clusterSize + + // Only resize the volume if there is space to grow, otherwise this will + // fail with invalid parameter. NTFS reserves one cluster. + if volumeClusters+1 < targetClusters { + targetSectors := targetClusters * (clusterSize / sectorSize) + const _FSCTL_EXTEND_VOLUME = 0x000900F0 + err = syscall.DeviceIoControl(syscall.Handle(volume.Fd()), _FSCTL_EXTEND_VOLUME, (*byte)(unsafe.Pointer(&targetSectors)), 8, nil, 0, &bytes, nil) + if err != nil { + return &os.PathError{Op: "FSCTL_EXTEND_VOLUME", Path: volume.Name(), Err: err} + } + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/exportlayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/exportlayer.go new file mode 100644 index 00000000000..97b27eb7d6b --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/exportlayer.go @@ -0,0 +1,94 @@ +package wclayer + +import ( + "context" + "io/ioutil" + "os" + "strings" + + "github.com/Microsoft/go-winio" + "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/Microsoft/hcsshim/internal/oc" + "go.opencensus.io/trace" +) + +// ExportLayer will create a folder at exportFolderPath and fill that folder with +// the transport format version of the layer identified by layerId. This transport +// format includes any metadata required for later importing the layer (using +// ImportLayer), and requires the full list of parent layer paths in order to +// perform the export. +func ExportLayer(ctx context.Context, path string, exportFolderPath string, parentLayerPaths []string) (err error) { + title := "hcsshim::ExportLayer" + ctx, span := trace.StartSpan(ctx, title) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("path", path), + trace.StringAttribute("exportFolderPath", exportFolderPath), + trace.StringAttribute("parentLayerPaths", strings.Join(parentLayerPaths, ", "))) + + // Generate layer descriptors + layers, err := layerPathsToDescriptors(ctx, parentLayerPaths) + if err != nil { + return err + } + + err = exportLayer(&stdDriverInfo, path, exportFolderPath, layers) + if err != nil { + return hcserror.New(err, title, "") + } + return nil +} + +type LayerReader interface { + Next() (string, int64, *winio.FileBasicInfo, error) + Read(b []byte) (int, error) + Close() error +} + +// NewLayerReader returns a new layer reader for reading the contents of an on-disk layer. +// The caller must have taken the SeBackupPrivilege privilege +// to call this and any methods on the resulting LayerReader. +func NewLayerReader(ctx context.Context, path string, parentLayerPaths []string) (_ LayerReader, err error) { + ctx, span := trace.StartSpan(ctx, "hcsshim::NewLayerReader") + defer func() { + if err != nil { + oc.SetSpanStatus(span, err) + span.End() + } + }() + span.AddAttributes( + trace.StringAttribute("path", path), + trace.StringAttribute("parentLayerPaths", strings.Join(parentLayerPaths, ", "))) + + exportPath, err := ioutil.TempDir("", "hcs") + if err != nil { + return nil, err + } + err = ExportLayer(ctx, path, exportPath, parentLayerPaths) + if err != nil { + os.RemoveAll(exportPath) + return nil, err + } + return &legacyLayerReaderWrapper{ + ctx: ctx, + s: span, + legacyLayerReader: newLegacyLayerReader(exportPath), + }, nil +} + +type legacyLayerReaderWrapper struct { + ctx context.Context + s *trace.Span + + *legacyLayerReader +} + +func (r *legacyLayerReaderWrapper) Close() (err error) { + defer r.s.End() + defer func() { oc.SetSpanStatus(r.s, err) }() + + err = r.legacyLayerReader.Close() + os.RemoveAll(r.root) + return err +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getlayermountpath.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getlayermountpath.go new file mode 100644 index 00000000000..8d213f5871a --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getlayermountpath.go @@ -0,0 +1,50 @@ +package wclayer + +import ( + "context" + "syscall" + + "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/Microsoft/hcsshim/internal/log" + "github.com/Microsoft/hcsshim/internal/oc" + "go.opencensus.io/trace" +) + +// GetLayerMountPath will look for a mounted layer with the given path and return +// the path at which that layer can be accessed. This path may be a volume path +// if the layer is a mounted read-write layer, otherwise it is expected to be the +// folder path at which the layer is stored. +func GetLayerMountPath(ctx context.Context, path string) (_ string, err error) { + title := "hcsshim::GetLayerMountPath" + ctx, span := trace.StartSpan(ctx, title) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("path", path)) + + var mountPathLength uintptr = 0 + + // Call the procedure itself. + log.G(ctx).Debug("Calling proc (1)") + err = getLayerMountPath(&stdDriverInfo, path, &mountPathLength, nil) + if err != nil { + return "", hcserror.New(err, title, "(first call)") + } + + // Allocate a mount path of the returned length. + if mountPathLength == 0 { + return "", nil + } + mountPathp := make([]uint16, mountPathLength) + mountPathp[0] = 0 + + // Call the procedure again + log.G(ctx).Debug("Calling proc (2)") + err = getLayerMountPath(&stdDriverInfo, path, &mountPathLength, &mountPathp[0]) + if err != nil { + return "", hcserror.New(err, title, "(second call)") + } + + mountPath := syscall.UTF16ToString(mountPathp[0:]) + span.AddAttributes(trace.StringAttribute("mountPath", mountPath)) + return mountPath, nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getsharedbaseimages.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getsharedbaseimages.go new file mode 100644 index 00000000000..ae1fff84036 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getsharedbaseimages.go @@ -0,0 +1,29 @@ +package wclayer + +import ( + "context" + + "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/Microsoft/hcsshim/internal/interop" + "github.com/Microsoft/hcsshim/internal/oc" + "go.opencensus.io/trace" +) + +// GetSharedBaseImages will enumerate the images stored in the common central +// image store and return descriptive info about those images for the purpose +// of registering them with the graphdriver, graph, and tagstore. +func GetSharedBaseImages(ctx context.Context) (_ string, err error) { + title := "hcsshim::GetSharedBaseImages" + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + + var buffer *uint16 + err = getBaseImages(&buffer) + if err != nil { + return "", hcserror.New(err, title, "") + } + imageData := interop.ConvertAndFreeCoTaskMemString(buffer) + span.AddAttributes(trace.StringAttribute("imageData", imageData)) + return imageData, nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/grantvmaccess.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/grantvmaccess.go new file mode 100644 index 00000000000..4b282fef9db --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/grantvmaccess.go @@ -0,0 +1,26 @@ +package wclayer + +import ( + "context" + + "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/Microsoft/hcsshim/internal/oc" + "go.opencensus.io/trace" +) + +// GrantVmAccess adds access to a file for a given VM +func GrantVmAccess(ctx context.Context, vmid string, filepath string) (err error) { + title := "hcsshim::GrantVmAccess" + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("vm-id", vmid), + trace.StringAttribute("path", filepath)) + + err = grantVmAccess(vmid, filepath) + if err != nil { + return hcserror.New(err, title, "") + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/importlayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/importlayer.go new file mode 100644 index 00000000000..687550f0be3 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/importlayer.go @@ -0,0 +1,166 @@ +package wclayer + +import ( + "context" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/Microsoft/go-winio" + "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/Microsoft/hcsshim/internal/safefile" + "go.opencensus.io/trace" +) + +// ImportLayer will take the contents of the folder at importFolderPath and import +// that into a layer with the id layerId. Note that in order to correctly populate +// the layer and interperet the transport format, all parent layers must already +// be present on the system at the paths provided in parentLayerPaths. +func ImportLayer(ctx context.Context, path string, importFolderPath string, parentLayerPaths []string) (err error) { + title := "hcsshim::ImportLayer" + ctx, span := trace.StartSpan(ctx, title) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("path", path), + trace.StringAttribute("importFolderPath", importFolderPath), + trace.StringAttribute("parentLayerPaths", strings.Join(parentLayerPaths, ", "))) + + // Generate layer descriptors + layers, err := layerPathsToDescriptors(ctx, parentLayerPaths) + if err != nil { + return err + } + + err = importLayer(&stdDriverInfo, path, importFolderPath, layers) + if err != nil { + return hcserror.New(err, title, "") + } + return nil +} + +// LayerWriter is an interface that supports writing a new container image layer. +type LayerWriter interface { + // Add adds a file to the layer with given metadata. + Add(name string, fileInfo *winio.FileBasicInfo) error + // AddLink adds a hard link to the layer. The target must already have been added. + AddLink(name string, target string) error + // Remove removes a file that was present in a parent layer from the layer. + Remove(name string) error + // Write writes data to the current file. The data must be in the format of a Win32 + // backup stream. + Write(b []byte) (int, error) + // Close finishes the layer writing process and releases any resources. + Close() error +} + +type legacyLayerWriterWrapper struct { + ctx context.Context + s *trace.Span + + *legacyLayerWriter + path string + parentLayerPaths []string +} + +func (r *legacyLayerWriterWrapper) Close() (err error) { + defer r.s.End() + defer func() { oc.SetSpanStatus(r.s, err) }() + defer os.RemoveAll(r.root.Name()) + defer r.legacyLayerWriter.CloseRoots() + + err = r.legacyLayerWriter.Close() + if err != nil { + return err + } + + if err = ImportLayer(r.ctx, r.destRoot.Name(), r.path, r.parentLayerPaths); err != nil { + return err + } + for _, name := range r.Tombstones { + if err = safefile.RemoveRelative(name, r.destRoot); err != nil && !os.IsNotExist(err) { + return err + } + } + // Add any hard links that were collected. + for _, lnk := range r.PendingLinks { + if err = safefile.RemoveRelative(lnk.Path, r.destRoot); err != nil && !os.IsNotExist(err) { + return err + } + if err = safefile.LinkRelative(lnk.Target, lnk.TargetRoot, lnk.Path, r.destRoot); err != nil { + return err + } + } + + // The reapplyDirectoryTimes must be called AFTER we are done with Tombstone + // deletion and hard link creation. This is because Tombstone deletion and hard link + // creation updates the directory last write timestamps so that will change the + // timestamps added by the `Add` call. Some container applications depend on the + // correctness of these timestamps and so we should change the timestamps back to + // the original value (i.e the value provided in the Add call) after this + // processing is done. + err = reapplyDirectoryTimes(r.destRoot, r.changedDi) + if err != nil { + return err + } + + // Prepare the utility VM for use if one is present in the layer. + if r.HasUtilityVM { + err := safefile.EnsureNotReparsePointRelative("UtilityVM", r.destRoot) + if err != nil { + return err + } + err = ProcessUtilityVMImage(r.ctx, filepath.Join(r.destRoot.Name(), "UtilityVM")) + if err != nil { + return err + } + } + return nil +} + +// NewLayerWriter returns a new layer writer for creating a layer on disk. +// The caller must have taken the SeBackupPrivilege and SeRestorePrivilege privileges +// to call this and any methods on the resulting LayerWriter. +func NewLayerWriter(ctx context.Context, path string, parentLayerPaths []string) (_ LayerWriter, err error) { + ctx, span := trace.StartSpan(ctx, "hcsshim::NewLayerWriter") + defer func() { + if err != nil { + oc.SetSpanStatus(span, err) + span.End() + } + }() + span.AddAttributes( + trace.StringAttribute("path", path), + trace.StringAttribute("parentLayerPaths", strings.Join(parentLayerPaths, ", "))) + + if len(parentLayerPaths) == 0 { + // This is a base layer. It gets imported differently. + f, err := safefile.OpenRoot(path) + if err != nil { + return nil, err + } + return &baseLayerWriter{ + ctx: ctx, + s: span, + root: f, + }, nil + } + + importPath, err := ioutil.TempDir("", "hcs") + if err != nil { + return nil, err + } + w, err := newLegacyLayerWriter(importPath, parentLayerPaths, path) + if err != nil { + return nil, err + } + return &legacyLayerWriterWrapper{ + ctx: ctx, + s: span, + legacyLayerWriter: w, + path: importPath, + parentLayerPaths: parentLayerPaths, + }, nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerexists.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerexists.go new file mode 100644 index 00000000000..01e67233939 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerexists.go @@ -0,0 +1,28 @@ +package wclayer + +import ( + "context" + + "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/Microsoft/hcsshim/internal/oc" + "go.opencensus.io/trace" +) + +// LayerExists will return true if a layer with the given id exists and is known +// to the system. +func LayerExists(ctx context.Context, path string) (_ bool, err error) { + title := "hcsshim::LayerExists" + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("path", path)) + + // Call the procedure itself. + var exists uint32 + err = layerExists(&stdDriverInfo, path, &exists) + if err != nil { + return false, hcserror.New(err, title, "") + } + span.AddAttributes(trace.BoolAttribute("layer-exists", exists != 0)) + return exists != 0, nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerid.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerid.go new file mode 100644 index 00000000000..0ce34a30f86 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerid.go @@ -0,0 +1,22 @@ +package wclayer + +import ( + "context" + "path/filepath" + + "github.com/Microsoft/go-winio/pkg/guid" + "github.com/Microsoft/hcsshim/internal/oc" + "go.opencensus.io/trace" +) + +// LayerID returns the layer ID of a layer on disk. +func LayerID(ctx context.Context, path string) (_ guid.GUID, err error) { + title := "hcsshim::LayerID" + ctx, span := trace.StartSpan(ctx, title) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("path", path)) + + _, file := filepath.Split(path) + return NameToGuid(ctx, file) +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerutils.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerutils.go new file mode 100644 index 00000000000..1ec893c6af7 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerutils.go @@ -0,0 +1,97 @@ +package wclayer + +// This file contains utility functions to support storage (graph) related +// functionality. + +import ( + "context" + "syscall" + + "github.com/Microsoft/go-winio/pkg/guid" + "github.com/sirupsen/logrus" +) + +/* To pass into syscall, we need a struct matching the following: +enum GraphDriverType +{ + DiffDriver, + FilterDriver +}; + +struct DriverInfo { + GraphDriverType Flavour; + LPCWSTR HomeDir; +}; +*/ + +type driverInfo struct { + Flavour int + HomeDirp *uint16 +} + +var ( + utf16EmptyString uint16 + stdDriverInfo = driverInfo{1, &utf16EmptyString} +) + +/* To pass into syscall, we need a struct matching the following: +typedef struct _WC_LAYER_DESCRIPTOR { + + // + // The ID of the layer + // + + GUID LayerId; + + // + // Additional flags + // + + union { + struct { + ULONG Reserved : 31; + ULONG Dirty : 1; // Created from sandbox as a result of snapshot + }; + ULONG Value; + } Flags; + + // + // Path to the layer root directory, null-terminated + // + + PCWSTR Path; + +} WC_LAYER_DESCRIPTOR, *PWC_LAYER_DESCRIPTOR; +*/ +type WC_LAYER_DESCRIPTOR struct { + LayerId guid.GUID + Flags uint32 + Pathp *uint16 +} + +func layerPathsToDescriptors(ctx context.Context, parentLayerPaths []string) ([]WC_LAYER_DESCRIPTOR, error) { + // Array of descriptors that gets constructed. + var layers []WC_LAYER_DESCRIPTOR + + for i := 0; i < len(parentLayerPaths); i++ { + g, err := LayerID(ctx, parentLayerPaths[i]) + if err != nil { + logrus.WithError(err).Debug("Failed to convert name to guid") + return nil, err + } + + p, err := syscall.UTF16PtrFromString(parentLayerPaths[i]) + if err != nil { + logrus.WithError(err).Debug("Failed conversion of parentLayerPath to pointer") + return nil, err + } + + layers = append(layers, WC_LAYER_DESCRIPTOR{ + LayerId: g, + Flags: 0, + Pathp: p, + }) + } + + return layers, nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go new file mode 100644 index 00000000000..b7f3064f26b --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go @@ -0,0 +1,811 @@ +package wclayer + +import ( + "bufio" + "encoding/binary" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "syscall" + + "github.com/Microsoft/go-winio" + "github.com/Microsoft/hcsshim/internal/longpath" + "github.com/Microsoft/hcsshim/internal/safefile" + "github.com/Microsoft/hcsshim/internal/winapi" +) + +var errorIterationCanceled = errors.New("") + +var mutatedUtilityVMFiles = map[string]bool{ + `EFI\Microsoft\Boot\BCD`: true, + `EFI\Microsoft\Boot\BCD.LOG`: true, + `EFI\Microsoft\Boot\BCD.LOG1`: true, + `EFI\Microsoft\Boot\BCD.LOG2`: true, +} + +const ( + filesPath = `Files` + hivesPath = `Hives` + utilityVMPath = `UtilityVM` + utilityVMFilesPath = `UtilityVM\Files` +) + +func openFileOrDir(path string, mode uint32, createDisposition uint32) (file *os.File, err error) { + return winio.OpenForBackup(path, mode, syscall.FILE_SHARE_READ, createDisposition) +} + +func hasPathPrefix(p, prefix string) bool { + return strings.HasPrefix(p, prefix) && len(p) > len(prefix) && p[len(prefix)] == '\\' +} + +type fileEntry struct { + path string + fi os.FileInfo + err error +} + +type legacyLayerReader struct { + root string + result chan *fileEntry + proceed chan bool + currentFile *os.File + backupReader *winio.BackupFileReader +} + +// newLegacyLayerReader returns a new LayerReader that can read the Windows +// container layer transport format from disk. +func newLegacyLayerReader(root string) *legacyLayerReader { + r := &legacyLayerReader{ + root: root, + result: make(chan *fileEntry), + proceed: make(chan bool), + } + go r.walk() + return r +} + +func readTombstones(path string) (map[string]([]string), error) { + tf, err := os.Open(filepath.Join(path, "tombstones.txt")) + if err != nil { + return nil, err + } + defer tf.Close() + s := bufio.NewScanner(tf) + if !s.Scan() || s.Text() != "\xef\xbb\xbfVersion 1.0" { + return nil, errors.New("invalid tombstones file") + } + + ts := make(map[string]([]string)) + for s.Scan() { + t := filepath.Join(filesPath, s.Text()[1:]) // skip leading `\` + dir := filepath.Dir(t) + ts[dir] = append(ts[dir], t) + } + if err = s.Err(); err != nil { + return nil, err + } + + return ts, nil +} + +func (r *legacyLayerReader) walkUntilCancelled() error { + root, err := longpath.LongAbs(r.root) + if err != nil { + return err + } + + r.root = root + ts, err := readTombstones(r.root) + if err != nil { + return err + } + + err = filepath.Walk(r.root, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + // Indirect fix for https://github.com/moby/moby/issues/32838#issuecomment-343610048. + // Handle failure from what may be a golang bug in the conversion of + // UTF16 to UTF8 in files which are left in the recycle bin. Os.Lstat + // which is called by filepath.Walk will fail when a filename contains + // unicode characters. Skip the recycle bin regardless which is goodness. + if strings.EqualFold(path, filepath.Join(r.root, `Files\$Recycle.Bin`)) && info.IsDir() { + return filepath.SkipDir + } + + if path == r.root || path == filepath.Join(r.root, "tombstones.txt") || strings.HasSuffix(path, ".$wcidirs$") { + return nil + } + + r.result <- &fileEntry{path, info, nil} + if !<-r.proceed { + return errorIterationCanceled + } + + // List all the tombstones. + if info.IsDir() { + relPath, err := filepath.Rel(r.root, path) + if err != nil { + return err + } + if dts, ok := ts[relPath]; ok { + for _, t := range dts { + r.result <- &fileEntry{filepath.Join(r.root, t), nil, nil} + if !<-r.proceed { + return errorIterationCanceled + } + } + } + } + return nil + }) + if err == errorIterationCanceled { + return nil + } + if err == nil { + return io.EOF + } + return err +} + +func (r *legacyLayerReader) walk() { + defer close(r.result) + if !<-r.proceed { + return + } + + err := r.walkUntilCancelled() + if err != nil { + for { + r.result <- &fileEntry{err: err} + if !<-r.proceed { + return + } + } + } +} + +func (r *legacyLayerReader) reset() { + if r.backupReader != nil { + r.backupReader.Close() + r.backupReader = nil + } + if r.currentFile != nil { + r.currentFile.Close() + r.currentFile = nil + } +} + +func findBackupStreamSize(r io.Reader) (int64, error) { + br := winio.NewBackupStreamReader(r) + for { + hdr, err := br.Next() + if err != nil { + if err == io.EOF { + err = nil + } + return 0, err + } + if hdr.Id == winio.BackupData { + return hdr.Size, nil + } + } +} + +func (r *legacyLayerReader) Next() (path string, size int64, fileInfo *winio.FileBasicInfo, err error) { + r.reset() + r.proceed <- true + fe := <-r.result + if fe == nil { + err = errors.New("LegacyLayerReader closed") + return + } + if fe.err != nil { + err = fe.err + return + } + + path, err = filepath.Rel(r.root, fe.path) + if err != nil { + return + } + + if fe.fi == nil { + // This is a tombstone. Return a nil fileInfo. + return + } + + if fe.fi.IsDir() && hasPathPrefix(path, filesPath) { + fe.path += ".$wcidirs$" + } + + f, err := openFileOrDir(fe.path, syscall.GENERIC_READ, syscall.OPEN_EXISTING) + if err != nil { + return + } + defer func() { + if f != nil { + f.Close() + } + }() + + fileInfo, err = winio.GetFileBasicInfo(f) + if err != nil { + return + } + + if !hasPathPrefix(path, filesPath) { + size = fe.fi.Size() + r.backupReader = winio.NewBackupFileReader(f, false) + if path == hivesPath || path == filesPath { + // The Hives directory has a non-deterministic file time because of the + // nature of the import process. Use the times from System_Delta. + var g *os.File + g, err = os.Open(filepath.Join(r.root, hivesPath, `System_Delta`)) + if err != nil { + return + } + attr := fileInfo.FileAttributes + fileInfo, err = winio.GetFileBasicInfo(g) + g.Close() + if err != nil { + return + } + fileInfo.FileAttributes = attr + } + + // The creation time and access time get reset for files outside of the Files path. + fileInfo.CreationTime = fileInfo.LastWriteTime + fileInfo.LastAccessTime = fileInfo.LastWriteTime + + } else { + // The file attributes are written before the backup stream. + var attr uint32 + err = binary.Read(f, binary.LittleEndian, &attr) + if err != nil { + return + } + fileInfo.FileAttributes = attr + beginning := int64(4) + + // Find the accurate file size. + if !fe.fi.IsDir() { + size, err = findBackupStreamSize(f) + if err != nil { + err = &os.PathError{Op: "findBackupStreamSize", Path: fe.path, Err: err} + return + } + } + + // Return back to the beginning of the backup stream. + _, err = f.Seek(beginning, 0) + if err != nil { + return + } + } + + r.currentFile = f + f = nil + return +} + +func (r *legacyLayerReader) Read(b []byte) (int, error) { + if r.backupReader == nil { + if r.currentFile == nil { + return 0, io.EOF + } + return r.currentFile.Read(b) + } + return r.backupReader.Read(b) +} + +func (r *legacyLayerReader) Seek(offset int64, whence int) (int64, error) { + if r.backupReader == nil { + if r.currentFile == nil { + return 0, errors.New("no current file") + } + return r.currentFile.Seek(offset, whence) + } + return 0, errors.New("seek not supported on this stream") +} + +func (r *legacyLayerReader) Close() error { + r.proceed <- false + <-r.result + r.reset() + return nil +} + +type pendingLink struct { + Path, Target string + TargetRoot *os.File +} + +type pendingDir struct { + Path string + Root *os.File +} + +type legacyLayerWriter struct { + root *os.File + destRoot *os.File + parentRoots []*os.File + currentFile *os.File + bufWriter *bufio.Writer + currentFileName string + currentFileRoot *os.File + backupWriter *winio.BackupFileWriter + Tombstones []string + HasUtilityVM bool + changedDi []dirInfo + addedFiles map[string]bool + PendingLinks []pendingLink + pendingDirs []pendingDir + currentIsDir bool +} + +// newLegacyLayerWriter returns a LayerWriter that can write the contaler layer +// transport format to disk. +func newLegacyLayerWriter(root string, parentRoots []string, destRoot string) (w *legacyLayerWriter, err error) { + w = &legacyLayerWriter{ + addedFiles: make(map[string]bool), + } + defer func() { + if err != nil { + w.CloseRoots() + w = nil + } + }() + w.root, err = safefile.OpenRoot(root) + if err != nil { + return + } + w.destRoot, err = safefile.OpenRoot(destRoot) + if err != nil { + return + } + for _, r := range parentRoots { + f, err := safefile.OpenRoot(r) + if err != nil { + return w, err + } + w.parentRoots = append(w.parentRoots, f) + } + w.bufWriter = bufio.NewWriterSize(ioutil.Discard, 65536) + return +} + +func (w *legacyLayerWriter) CloseRoots() { + if w.root != nil { + w.root.Close() + w.root = nil + } + if w.destRoot != nil { + w.destRoot.Close() + w.destRoot = nil + } + for i := range w.parentRoots { + _ = w.parentRoots[i].Close() + } + w.parentRoots = nil +} + +func (w *legacyLayerWriter) initUtilityVM() error { + if !w.HasUtilityVM { + err := safefile.MkdirRelative(utilityVMPath, w.destRoot) + if err != nil { + return err + } + // Server 2016 does not support multiple layers for the utility VM, so + // clone the utility VM from the parent layer into this layer. Use hard + // links to avoid unnecessary copying, since most of the files are + // immutable. + err = cloneTree(w.parentRoots[0], w.destRoot, utilityVMFilesPath, mutatedUtilityVMFiles) + if err != nil { + return fmt.Errorf("cloning the parent utility VM image failed: %s", err) + } + w.HasUtilityVM = true + } + return nil +} + +func (w *legacyLayerWriter) reset() error { + err := w.bufWriter.Flush() + if err != nil { + return err + } + w.bufWriter.Reset(ioutil.Discard) + if w.currentIsDir { + r := w.currentFile + br := winio.NewBackupStreamReader(r) + // Seek to the beginning of the backup stream, skipping the fileattrs + if _, err := r.Seek(4, io.SeekStart); err != nil { + return err + } + + for { + bhdr, err := br.Next() + if err == io.EOF { + // end of backupstream data + break + } + if err != nil { + return err + } + switch bhdr.Id { + case winio.BackupReparseData: + // The current file is a `.$wcidirs$` metadata file that + // describes a directory reparse point. Delete the placeholder + // directory to prevent future files being added into the + // destination of the reparse point during the ImportLayer call + if err := safefile.RemoveRelative(w.currentFileName, w.currentFileRoot); err != nil { + return err + } + w.pendingDirs = append(w.pendingDirs, pendingDir{Path: w.currentFileName, Root: w.currentFileRoot}) + default: + // ignore all other stream types, as we only care about directory reparse points + } + } + w.currentIsDir = false + } + if w.backupWriter != nil { + w.backupWriter.Close() + w.backupWriter = nil + } + if w.currentFile != nil { + w.currentFile.Close() + w.currentFile = nil + w.currentFileName = "" + w.currentFileRoot = nil + } + return nil +} + +// copyFileWithMetadata copies a file using the backup/restore APIs in order to preserve metadata +func copyFileWithMetadata(srcRoot, destRoot *os.File, subPath string, isDir bool) (fileInfo *winio.FileBasicInfo, err error) { + src, err := safefile.OpenRelative( + subPath, + srcRoot, + syscall.GENERIC_READ|winio.ACCESS_SYSTEM_SECURITY, + syscall.FILE_SHARE_READ, + winapi.FILE_OPEN, + winapi.FILE_OPEN_REPARSE_POINT) + if err != nil { + return nil, err + } + defer src.Close() + srcr := winio.NewBackupFileReader(src, true) + defer srcr.Close() + + fileInfo, err = winio.GetFileBasicInfo(src) + if err != nil { + return nil, err + } + + extraFlags := uint32(0) + if isDir { + extraFlags |= winapi.FILE_DIRECTORY_FILE + } + dest, err := safefile.OpenRelative( + subPath, + destRoot, + syscall.GENERIC_READ|syscall.GENERIC_WRITE|winio.WRITE_DAC|winio.WRITE_OWNER|winio.ACCESS_SYSTEM_SECURITY, + syscall.FILE_SHARE_READ, + winapi.FILE_CREATE, + extraFlags) + if err != nil { + return nil, err + } + defer dest.Close() + + err = winio.SetFileBasicInfo(dest, fileInfo) + if err != nil { + return nil, err + } + + destw := winio.NewBackupFileWriter(dest, true) + defer func() { + cerr := destw.Close() + if err == nil { + err = cerr + } + }() + + _, err = io.Copy(destw, srcr) + if err != nil { + return nil, err + } + + return fileInfo, nil +} + +// cloneTree clones a directory tree using hard links. It skips hard links for +// the file names in the provided map and just copies those files. +func cloneTree(srcRoot *os.File, destRoot *os.File, subPath string, mutatedFiles map[string]bool) error { + var di []dirInfo + err := safefile.EnsureNotReparsePointRelative(subPath, srcRoot) + if err != nil { + return err + } + err = filepath.Walk(filepath.Join(srcRoot.Name(), subPath), func(srcFilePath string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + relPath, err := filepath.Rel(srcRoot.Name(), srcFilePath) + if err != nil { + return err + } + + fileAttributes := info.Sys().(*syscall.Win32FileAttributeData).FileAttributes + // Directories, reparse points, and files that will be mutated during + // utility VM import must be copied. All other files can be hard linked. + isReparsePoint := fileAttributes&syscall.FILE_ATTRIBUTE_REPARSE_POINT != 0 + // In go1.9, FileInfo.IsDir() returns false if the directory is also a symlink. + // See: https://github.com/golang/go/commit/1989921aef60c83e6f9127a8448fb5ede10e9acc + // Fixes the problem by checking syscall.FILE_ATTRIBUTE_DIRECTORY directly + isDir := fileAttributes&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 + + if isDir || isReparsePoint || mutatedFiles[relPath] { + fi, err := copyFileWithMetadata(srcRoot, destRoot, relPath, isDir) + if err != nil { + return err + } + if isDir { + di = append(di, dirInfo{path: relPath, fileInfo: *fi}) + } + } else { + err = safefile.LinkRelative(relPath, srcRoot, relPath, destRoot) + if err != nil { + return err + } + } + + return nil + }) + if err != nil { + return err + } + + return reapplyDirectoryTimes(destRoot, di) +} + +func (w *legacyLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) error { + if err := w.reset(); err != nil { + return err + } + + if name == utilityVMPath { + return w.initUtilityVM() + } + + if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 { + w.changedDi = append(w.changedDi, dirInfo{path: name, fileInfo: *fileInfo}) + } + + name = filepath.Clean(name) + if hasPathPrefix(name, utilityVMPath) { + if !w.HasUtilityVM { + return errors.New("missing UtilityVM directory") + } + if !hasPathPrefix(name, utilityVMFilesPath) && name != utilityVMFilesPath { + return errors.New("invalid UtilityVM layer") + } + createDisposition := uint32(winapi.FILE_OPEN) + if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 { + st, err := safefile.LstatRelative(name, w.destRoot) + if err != nil && !os.IsNotExist(err) { + return err + } + if st != nil { + // Delete the existing file/directory if it is not the same type as this directory. + existingAttr := st.Sys().(*syscall.Win32FileAttributeData).FileAttributes + if (uint32(fileInfo.FileAttributes)^existingAttr)&(syscall.FILE_ATTRIBUTE_DIRECTORY|syscall.FILE_ATTRIBUTE_REPARSE_POINT) != 0 { + if err = safefile.RemoveAllRelative(name, w.destRoot); err != nil { + return err + } + st = nil + } + } + if st == nil { + if err = safefile.MkdirRelative(name, w.destRoot); err != nil { + return err + } + } + } else { + // Overwrite any existing hard link. + err := safefile.RemoveRelative(name, w.destRoot) + if err != nil && !os.IsNotExist(err) { + return err + } + createDisposition = winapi.FILE_CREATE + } + + f, err := safefile.OpenRelative( + name, + w.destRoot, + syscall.GENERIC_READ|syscall.GENERIC_WRITE|winio.WRITE_DAC|winio.WRITE_OWNER|winio.ACCESS_SYSTEM_SECURITY, + syscall.FILE_SHARE_READ, + createDisposition, + winapi.FILE_OPEN_REPARSE_POINT, + ) + if err != nil { + return err + } + defer func() { + if f != nil { + f.Close() + _ = safefile.RemoveRelative(name, w.destRoot) + } + }() + + err = winio.SetFileBasicInfo(f, fileInfo) + if err != nil { + return err + } + + w.backupWriter = winio.NewBackupFileWriter(f, true) + w.bufWriter.Reset(w.backupWriter) + w.currentFile = f + w.currentFileName = name + w.currentFileRoot = w.destRoot + w.addedFiles[name] = true + f = nil + return nil + } + + fname := name + if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 { + err := safefile.MkdirRelative(name, w.root) + if err != nil { + return err + } + fname += ".$wcidirs$" + w.currentIsDir = true + } + + f, err := safefile.OpenRelative(fname, w.root, syscall.GENERIC_READ|syscall.GENERIC_WRITE, syscall.FILE_SHARE_READ, winapi.FILE_CREATE, 0) + if err != nil { + return err + } + defer func() { + if f != nil { + f.Close() + _ = safefile.RemoveRelative(fname, w.root) + } + }() + + strippedFi := *fileInfo + strippedFi.FileAttributes = 0 + err = winio.SetFileBasicInfo(f, &strippedFi) + if err != nil { + return err + } + + if hasPathPrefix(name, hivesPath) { + w.backupWriter = winio.NewBackupFileWriter(f, false) + w.bufWriter.Reset(w.backupWriter) + } else { + w.bufWriter.Reset(f) + // The file attributes are written before the stream. + err = binary.Write(w.bufWriter, binary.LittleEndian, uint32(fileInfo.FileAttributes)) + if err != nil { + w.bufWriter.Reset(ioutil.Discard) + return err + } + } + + w.currentFile = f + w.currentFileName = name + w.currentFileRoot = w.root + w.addedFiles[name] = true + f = nil + return nil +} + +func (w *legacyLayerWriter) AddLink(name string, target string) error { + if err := w.reset(); err != nil { + return err + } + + target = filepath.Clean(target) + var roots []*os.File + if hasPathPrefix(target, filesPath) { + // Look for cross-layer hard link targets in the parent layers, since + // nothing is in the destination path yet. + roots = w.parentRoots + } else if hasPathPrefix(target, utilityVMFilesPath) { + // Since the utility VM is fully cloned into the destination path + // already, look for cross-layer hard link targets directly in the + // destination path. + roots = []*os.File{w.destRoot} + } + + if roots == nil || (!hasPathPrefix(name, filesPath) && !hasPathPrefix(name, utilityVMFilesPath)) { + return errors.New("invalid hard link in layer") + } + + // Find to try the target of the link in a previously added file. If that + // fails, search in parent layers. + var selectedRoot *os.File + if _, ok := w.addedFiles[target]; ok { + selectedRoot = w.destRoot + } else { + for _, r := range roots { + if _, err := safefile.LstatRelative(target, r); err != nil { + if !os.IsNotExist(err) { + return err + } + } else { + selectedRoot = r + break + } + } + if selectedRoot == nil { + return fmt.Errorf("failed to find link target for '%s' -> '%s'", name, target) + } + } + + // The link can't be written until after the ImportLayer call. + w.PendingLinks = append(w.PendingLinks, pendingLink{ + Path: name, + Target: target, + TargetRoot: selectedRoot, + }) + w.addedFiles[name] = true + return nil +} + +func (w *legacyLayerWriter) Remove(name string) error { + name = filepath.Clean(name) + if hasPathPrefix(name, filesPath) { + w.Tombstones = append(w.Tombstones, name) + } else if hasPathPrefix(name, utilityVMFilesPath) { + err := w.initUtilityVM() + if err != nil { + return err + } + // Make sure the path exists; os.RemoveAll will not fail if the file is + // already gone, and this needs to be a fatal error for diagnostics + // purposes. + if _, err := safefile.LstatRelative(name, w.destRoot); err != nil { + return err + } + err = safefile.RemoveAllRelative(name, w.destRoot) + if err != nil { + return err + } + } else { + return fmt.Errorf("invalid tombstone %s", name) + } + + return nil +} + +func (w *legacyLayerWriter) Write(b []byte) (int, error) { + if w.backupWriter == nil && w.currentFile == nil { + return 0, errors.New("closed") + } + return w.bufWriter.Write(b) +} + +func (w *legacyLayerWriter) Close() error { + if err := w.reset(); err != nil { + return err + } + if err := safefile.RemoveRelative("tombstones.txt", w.root); err != nil && !os.IsNotExist(err) { + return err + } + for _, pd := range w.pendingDirs { + err := safefile.MkdirRelative(pd.Path, pd.Root) + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/nametoguid.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/nametoguid.go new file mode 100644 index 00000000000..09950297cef --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/nametoguid.go @@ -0,0 +1,29 @@ +package wclayer + +import ( + "context" + + "github.com/Microsoft/go-winio/pkg/guid" + "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/Microsoft/hcsshim/internal/oc" + "go.opencensus.io/trace" +) + +// NameToGuid converts the given string into a GUID using the algorithm in the +// Host Compute Service, ensuring GUIDs generated with the same string are common +// across all clients. +func NameToGuid(ctx context.Context, name string) (_ guid.GUID, err error) { + title := "hcsshim::NameToGuid" + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("objectName", name)) + + var id guid.GUID + err = nameToGuid(name, &id) + if err != nil { + return guid.GUID{}, hcserror.New(err, title, "") + } + span.AddAttributes(trace.StringAttribute("guid", id.String())) + return id, nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/preparelayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/preparelayer.go new file mode 100644 index 00000000000..90129faefbb --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/preparelayer.go @@ -0,0 +1,44 @@ +package wclayer + +import ( + "context" + "strings" + "sync" + + "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/Microsoft/hcsshim/internal/oc" + "go.opencensus.io/trace" +) + +var prepareLayerLock sync.Mutex + +// PrepareLayer finds a mounted read-write layer matching path and enables the +// the filesystem filter for use on that layer. This requires the paths to all +// parent layers, and is necessary in order to view or interact with the layer +// as an actual filesystem (reading and writing files, creating directories, etc). +// Disabling the filter must be done via UnprepareLayer. +func PrepareLayer(ctx context.Context, path string, parentLayerPaths []string) (err error) { + title := "hcsshim::PrepareLayer" + ctx, span := trace.StartSpan(ctx, title) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("path", path), + trace.StringAttribute("parentLayerPaths", strings.Join(parentLayerPaths, ", "))) + + // Generate layer descriptors + layers, err := layerPathsToDescriptors(ctx, parentLayerPaths) + if err != nil { + return err + } + + // This lock is a temporary workaround for a Windows bug. Only allowing one + // call to prepareLayer at a time vastly reduces the chance of a timeout. + prepareLayerLock.Lock() + defer prepareLayerLock.Unlock() + err = prepareLayer(&stdDriverInfo, path, layers) + if err != nil { + return hcserror.New(err, title, "") + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/processimage.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/processimage.go new file mode 100644 index 00000000000..30bcdff5f55 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/processimage.go @@ -0,0 +1,41 @@ +package wclayer + +import ( + "context" + "os" + + "github.com/Microsoft/hcsshim/internal/oc" + "go.opencensus.io/trace" +) + +// ProcessBaseLayer post-processes a base layer that has had its files extracted. +// The files should have been extracted to \Files. +func ProcessBaseLayer(ctx context.Context, path string) (err error) { + title := "hcsshim::ProcessBaseLayer" + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("path", path)) + + err = processBaseImage(path) + if err != nil { + return &os.PathError{Op: title, Path: path, Err: err} + } + return nil +} + +// ProcessUtilityVMImage post-processes a utility VM image that has had its files extracted. +// The files should have been extracted to \Files. +func ProcessUtilityVMImage(ctx context.Context, path string) (err error) { + title := "hcsshim::ProcessUtilityVMImage" + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("path", path)) + + err = processUtilityImage(path) + if err != nil { + return &os.PathError{Op: title, Path: path, Err: err} + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/unpreparelayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/unpreparelayer.go new file mode 100644 index 00000000000..71b130c525f --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/unpreparelayer.go @@ -0,0 +1,25 @@ +package wclayer + +import ( + "context" + + "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/Microsoft/hcsshim/internal/oc" + "go.opencensus.io/trace" +) + +// UnprepareLayer disables the filesystem filter for the read-write layer with +// the given id. +func UnprepareLayer(ctx context.Context, path string) (err error) { + title := "hcsshim::UnprepareLayer" + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("path", path)) + + err = unprepareLayer(&stdDriverInfo, path) + if err != nil { + return hcserror.New(err, title, "") + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/wclayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/wclayer.go new file mode 100644 index 00000000000..9b1e06d50c5 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/wclayer.go @@ -0,0 +1,35 @@ +// Package wclayer provides bindings to HCS's legacy layer management API and +// provides a higher level interface around these calls for container layer +// management. +package wclayer + +import "github.com/Microsoft/go-winio/pkg/guid" + +//go:generate go run ../../mksyscall_windows.go -output zsyscall_windows.go wclayer.go + +//sys activateLayer(info *driverInfo, id string) (hr error) = vmcompute.ActivateLayer? +//sys copyLayer(info *driverInfo, srcId string, dstId string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.CopyLayer? +//sys createLayer(info *driverInfo, id string, parent string) (hr error) = vmcompute.CreateLayer? +//sys createSandboxLayer(info *driverInfo, id string, parent uintptr, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.CreateSandboxLayer? +//sys expandSandboxSize(info *driverInfo, id string, size uint64) (hr error) = vmcompute.ExpandSandboxSize? +//sys deactivateLayer(info *driverInfo, id string) (hr error) = vmcompute.DeactivateLayer? +//sys destroyLayer(info *driverInfo, id string) (hr error) = vmcompute.DestroyLayer? +//sys exportLayer(info *driverInfo, id string, path string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.ExportLayer? +//sys getLayerMountPath(info *driverInfo, id string, length *uintptr, buffer *uint16) (hr error) = vmcompute.GetLayerMountPath? +//sys getBaseImages(buffer **uint16) (hr error) = vmcompute.GetBaseImages? +//sys importLayer(info *driverInfo, id string, path string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.ImportLayer? +//sys layerExists(info *driverInfo, id string, exists *uint32) (hr error) = vmcompute.LayerExists? +//sys nameToGuid(name string, guid *_guid) (hr error) = vmcompute.NameToGuid? +//sys prepareLayer(info *driverInfo, id string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.PrepareLayer? +//sys unprepareLayer(info *driverInfo, id string) (hr error) = vmcompute.UnprepareLayer? +//sys processBaseImage(path string) (hr error) = vmcompute.ProcessBaseImage? +//sys processUtilityImage(path string) (hr error) = vmcompute.ProcessUtilityImage? + +//sys grantVmAccess(vmid string, filepath string) (hr error) = vmcompute.GrantVmAccess? + +//sys openVirtualDisk(virtualStorageType *virtualStorageType, path string, virtualDiskAccessMask uint32, flags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (err error) [failretval != 0] = virtdisk.OpenVirtualDisk +//sys attachVirtualDisk(handle syscall.Handle, sd uintptr, flags uint32, providerFlags uint32, params uintptr, overlapped uintptr) (err error) [failretval != 0] = virtdisk.AttachVirtualDisk + +//sys getDiskFreeSpaceEx(directoryName string, freeBytesAvailableToCaller *int64, totalNumberOfBytes *int64, totalNumberOfFreeBytes *int64) (err error) = GetDiskFreeSpaceExW + +type _guid = guid.GUID diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/zsyscall_windows.go new file mode 100644 index 00000000000..67f917f07e6 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/zsyscall_windows.go @@ -0,0 +1,569 @@ +// Code generated mksyscall_windows.exe DO NOT EDIT + +package wclayer + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return nil + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modvmcompute = windows.NewLazySystemDLL("vmcompute.dll") + modvirtdisk = windows.NewLazySystemDLL("virtdisk.dll") + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + + procActivateLayer = modvmcompute.NewProc("ActivateLayer") + procCopyLayer = modvmcompute.NewProc("CopyLayer") + procCreateLayer = modvmcompute.NewProc("CreateLayer") + procCreateSandboxLayer = modvmcompute.NewProc("CreateSandboxLayer") + procExpandSandboxSize = modvmcompute.NewProc("ExpandSandboxSize") + procDeactivateLayer = modvmcompute.NewProc("DeactivateLayer") + procDestroyLayer = modvmcompute.NewProc("DestroyLayer") + procExportLayer = modvmcompute.NewProc("ExportLayer") + procGetLayerMountPath = modvmcompute.NewProc("GetLayerMountPath") + procGetBaseImages = modvmcompute.NewProc("GetBaseImages") + procImportLayer = modvmcompute.NewProc("ImportLayer") + procLayerExists = modvmcompute.NewProc("LayerExists") + procNameToGuid = modvmcompute.NewProc("NameToGuid") + procPrepareLayer = modvmcompute.NewProc("PrepareLayer") + procUnprepareLayer = modvmcompute.NewProc("UnprepareLayer") + procProcessBaseImage = modvmcompute.NewProc("ProcessBaseImage") + procProcessUtilityImage = modvmcompute.NewProc("ProcessUtilityImage") + procGrantVmAccess = modvmcompute.NewProc("GrantVmAccess") + procOpenVirtualDisk = modvirtdisk.NewProc("OpenVirtualDisk") + procAttachVirtualDisk = modvirtdisk.NewProc("AttachVirtualDisk") + procGetDiskFreeSpaceExW = modkernel32.NewProc("GetDiskFreeSpaceExW") +) + +func activateLayer(info *driverInfo, id string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(id) + if hr != nil { + return + } + return _activateLayer(info, _p0) +} + +func _activateLayer(info *driverInfo, id *uint16) (hr error) { + if hr = procActivateLayer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procActivateLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func copyLayer(info *driverInfo, srcId string, dstId string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(srcId) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(dstId) + if hr != nil { + return + } + return _copyLayer(info, _p0, _p1, descriptors) +} + +func _copyLayer(info *driverInfo, srcId *uint16, dstId *uint16, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { + var _p2 *WC_LAYER_DESCRIPTOR + if len(descriptors) > 0 { + _p2 = &descriptors[0] + } + if hr = procCopyLayer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procCopyLayer.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(srcId)), uintptr(unsafe.Pointer(dstId)), uintptr(unsafe.Pointer(_p2)), uintptr(len(descriptors)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func createLayer(info *driverInfo, id string, parent string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(id) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(parent) + if hr != nil { + return + } + return _createLayer(info, _p0, _p1) +} + +func _createLayer(info *driverInfo, id *uint16, parent *uint16) (hr error) { + if hr = procCreateLayer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procCreateLayer.Addr(), 3, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(parent))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func createSandboxLayer(info *driverInfo, id string, parent uintptr, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(id) + if hr != nil { + return + } + return _createSandboxLayer(info, _p0, parent, descriptors) +} + +func _createSandboxLayer(info *driverInfo, id *uint16, parent uintptr, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { + var _p1 *WC_LAYER_DESCRIPTOR + if len(descriptors) > 0 { + _p1 = &descriptors[0] + } + if hr = procCreateSandboxLayer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procCreateSandboxLayer.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(parent), uintptr(unsafe.Pointer(_p1)), uintptr(len(descriptors)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func expandSandboxSize(info *driverInfo, id string, size uint64) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(id) + if hr != nil { + return + } + return _expandSandboxSize(info, _p0, size) +} + +func _expandSandboxSize(info *driverInfo, id *uint16, size uint64) (hr error) { + if hr = procExpandSandboxSize.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procExpandSandboxSize.Addr(), 3, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(size)) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func deactivateLayer(info *driverInfo, id string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(id) + if hr != nil { + return + } + return _deactivateLayer(info, _p0) +} + +func _deactivateLayer(info *driverInfo, id *uint16) (hr error) { + if hr = procDeactivateLayer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procDeactivateLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func destroyLayer(info *driverInfo, id string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(id) + if hr != nil { + return + } + return _destroyLayer(info, _p0) +} + +func _destroyLayer(info *driverInfo, id *uint16) (hr error) { + if hr = procDestroyLayer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procDestroyLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func exportLayer(info *driverInfo, id string, path string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(id) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(path) + if hr != nil { + return + } + return _exportLayer(info, _p0, _p1, descriptors) +} + +func _exportLayer(info *driverInfo, id *uint16, path *uint16, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { + var _p2 *WC_LAYER_DESCRIPTOR + if len(descriptors) > 0 { + _p2 = &descriptors[0] + } + if hr = procExportLayer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procExportLayer.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(_p2)), uintptr(len(descriptors)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func getLayerMountPath(info *driverInfo, id string, length *uintptr, buffer *uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(id) + if hr != nil { + return + } + return _getLayerMountPath(info, _p0, length, buffer) +} + +func _getLayerMountPath(info *driverInfo, id *uint16, length *uintptr, buffer *uint16) (hr error) { + if hr = procGetLayerMountPath.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procGetLayerMountPath.Addr(), 4, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(length)), uintptr(unsafe.Pointer(buffer)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func getBaseImages(buffer **uint16) (hr error) { + if hr = procGetBaseImages.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procGetBaseImages.Addr(), 1, uintptr(unsafe.Pointer(buffer)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func importLayer(info *driverInfo, id string, path string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(id) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(path) + if hr != nil { + return + } + return _importLayer(info, _p0, _p1, descriptors) +} + +func _importLayer(info *driverInfo, id *uint16, path *uint16, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { + var _p2 *WC_LAYER_DESCRIPTOR + if len(descriptors) > 0 { + _p2 = &descriptors[0] + } + if hr = procImportLayer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procImportLayer.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(_p2)), uintptr(len(descriptors)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func layerExists(info *driverInfo, id string, exists *uint32) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(id) + if hr != nil { + return + } + return _layerExists(info, _p0, exists) +} + +func _layerExists(info *driverInfo, id *uint16, exists *uint32) (hr error) { + if hr = procLayerExists.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procLayerExists.Addr(), 3, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(exists))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func nameToGuid(name string, guid *_guid) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(name) + if hr != nil { + return + } + return _nameToGuid(_p0, guid) +} + +func _nameToGuid(name *uint16, guid *_guid) (hr error) { + if hr = procNameToGuid.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procNameToGuid.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(guid)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func prepareLayer(info *driverInfo, id string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(id) + if hr != nil { + return + } + return _prepareLayer(info, _p0, descriptors) +} + +func _prepareLayer(info *driverInfo, id *uint16, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { + var _p1 *WC_LAYER_DESCRIPTOR + if len(descriptors) > 0 { + _p1 = &descriptors[0] + } + if hr = procPrepareLayer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procPrepareLayer.Addr(), 4, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(_p1)), uintptr(len(descriptors)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func unprepareLayer(info *driverInfo, id string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(id) + if hr != nil { + return + } + return _unprepareLayer(info, _p0) +} + +func _unprepareLayer(info *driverInfo, id *uint16) (hr error) { + if hr = procUnprepareLayer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procUnprepareLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func processBaseImage(path string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(path) + if hr != nil { + return + } + return _processBaseImage(_p0) +} + +func _processBaseImage(path *uint16) (hr error) { + if hr = procProcessBaseImage.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procProcessBaseImage.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func processUtilityImage(path string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(path) + if hr != nil { + return + } + return _processUtilityImage(_p0) +} + +func _processUtilityImage(path *uint16) (hr error) { + if hr = procProcessUtilityImage.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procProcessUtilityImage.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func grantVmAccess(vmid string, filepath string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(vmid) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(filepath) + if hr != nil { + return + } + return _grantVmAccess(_p0, _p1) +} + +func _grantVmAccess(vmid *uint16, filepath *uint16) (hr error) { + if hr = procGrantVmAccess.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procGrantVmAccess.Addr(), 2, uintptr(unsafe.Pointer(vmid)), uintptr(unsafe.Pointer(filepath)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func openVirtualDisk(virtualStorageType *virtualStorageType, path string, virtualDiskAccessMask uint32, flags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(path) + if err != nil { + return + } + return _openVirtualDisk(virtualStorageType, _p0, virtualDiskAccessMask, flags, parameters, handle) +} + +func _openVirtualDisk(virtualStorageType *virtualStorageType, path *uint16, virtualDiskAccessMask uint32, flags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (err error) { + r1, _, e1 := syscall.Syscall6(procOpenVirtualDisk.Addr(), 6, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(flags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(handle))) + if r1 != 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func attachVirtualDisk(handle syscall.Handle, sd uintptr, flags uint32, providerFlags uint32, params uintptr, overlapped uintptr) (err error) { + r1, _, e1 := syscall.Syscall6(procAttachVirtualDisk.Addr(), 6, uintptr(handle), uintptr(sd), uintptr(flags), uintptr(providerFlags), uintptr(params), uintptr(overlapped)) + if r1 != 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func getDiskFreeSpaceEx(directoryName string, freeBytesAvailableToCaller *int64, totalNumberOfBytes *int64, totalNumberOfFreeBytes *int64) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(directoryName) + if err != nil { + return + } + return _getDiskFreeSpaceEx(_p0, freeBytesAvailableToCaller, totalNumberOfBytes, totalNumberOfFreeBytes) +} + +func _getDiskFreeSpaceEx(directoryName *uint16, freeBytesAvailableToCaller *int64, totalNumberOfBytes *int64, totalNumberOfFreeBytes *int64) (err error) { + r1, _, e1 := syscall.Syscall6(procGetDiskFreeSpaceExW.Addr(), 4, uintptr(unsafe.Pointer(directoryName)), uintptr(unsafe.Pointer(freeBytesAvailableToCaller)), uintptr(unsafe.Pointer(totalNumberOfBytes)), uintptr(unsafe.Pointer(totalNumberOfFreeBytes)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/console.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/console.go new file mode 100644 index 00000000000..def9525417e --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/console.go @@ -0,0 +1,44 @@ +package winapi + +import ( + "unsafe" + + "golang.org/x/sys/windows" +) + +const PSEUDOCONSOLE_INHERIT_CURSOR = 0x1 + +// CreatePseudoConsole creates a windows pseudo console. +func CreatePseudoConsole(size windows.Coord, hInput windows.Handle, hOutput windows.Handle, dwFlags uint32, hpcon *windows.Handle) error { + // We need this wrapper as the function takes a COORD struct and not a pointer to one, so we need to cast to something beforehand. + return createPseudoConsole(*((*uint32)(unsafe.Pointer(&size))), hInput, hOutput, 0, hpcon) +} + +// ResizePseudoConsole resizes the internal buffers of the pseudo console to the width and height specified in `size`. +func ResizePseudoConsole(hpcon windows.Handle, size windows.Coord) error { + // We need this wrapper as the function takes a COORD struct and not a pointer to one, so we need to cast to something beforehand. + return resizePseudoConsole(hpcon, *((*uint32)(unsafe.Pointer(&size)))) +} + +// HRESULT WINAPI CreatePseudoConsole( +// _In_ COORD size, +// _In_ HANDLE hInput, +// _In_ HANDLE hOutput, +// _In_ DWORD dwFlags, +// _Out_ HPCON* phPC +// ); +// +//sys createPseudoConsole(size uint32, hInput windows.Handle, hOutput windows.Handle, dwFlags uint32, hpcon *windows.Handle) (hr error) = kernel32.CreatePseudoConsole + +// void WINAPI ClosePseudoConsole( +// _In_ HPCON hPC +// ); +// +//sys ClosePseudoConsole(hpc windows.Handle) = kernel32.ClosePseudoConsole + +// HRESULT WINAPI ResizePseudoConsole( +// _In_ HPCON hPC , +// _In_ COORD size +// ); +// +//sys resizePseudoConsole(hPc windows.Handle, size uint32) (hr error) = kernel32.ResizePseudoConsole diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/devices.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/devices.go new file mode 100644 index 00000000000..df28ea24216 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/devices.go @@ -0,0 +1,13 @@ +package winapi + +import "github.com/Microsoft/go-winio/pkg/guid" + +//sys CMGetDeviceIDListSize(pulLen *uint32, pszFilter *byte, uFlags uint32) (hr error) = cfgmgr32.CM_Get_Device_ID_List_SizeA +//sys CMGetDeviceIDList(pszFilter *byte, buffer *byte, bufferLen uint32, uFlags uint32) (hr error)= cfgmgr32.CM_Get_Device_ID_ListA +//sys CMLocateDevNode(pdnDevInst *uint32, pDeviceID string, uFlags uint32) (hr error) = cfgmgr32.CM_Locate_DevNodeW +//sys CMGetDevNodeProperty(dnDevInst uint32, propertyKey *DevPropKey, propertyType *uint32, propertyBuffer *uint16, propertyBufferSize *uint32, uFlags uint32) (hr error) = cfgmgr32.CM_Get_DevNode_PropertyW + +type DevPropKey struct { + Fmtid guid.GUID + Pid uint32 +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/errors.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/errors.go new file mode 100644 index 00000000000..4e80ef68c92 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/errors.go @@ -0,0 +1,15 @@ +package winapi + +import "syscall" + +//sys RtlNtStatusToDosError(status uint32) (winerr error) = ntdll.RtlNtStatusToDosError + +const ( + STATUS_REPARSE_POINT_ENCOUNTERED = 0xC000050B + ERROR_NO_MORE_ITEMS = 0x103 + ERROR_MORE_DATA syscall.Errno = 234 +) + +func NTSuccess(status uint32) bool { + return status == 0 +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/filesystem.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/filesystem.go new file mode 100644 index 00000000000..7ce52afd5e1 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/filesystem.go @@ -0,0 +1,110 @@ +package winapi + +//sys NtCreateFile(handle *uintptr, accessMask uint32, oa *ObjectAttributes, iosb *IOStatusBlock, allocationSize *uint64, fileAttributes uint32, shareAccess uint32, createDisposition uint32, createOptions uint32, eaBuffer *byte, eaLength uint32) (status uint32) = ntdll.NtCreateFile +//sys NtSetInformationFile(handle uintptr, iosb *IOStatusBlock, information uintptr, length uint32, class uint32) (status uint32) = ntdll.NtSetInformationFile + +//sys NtOpenDirectoryObject(handle *uintptr, accessMask uint32, oa *ObjectAttributes) (status uint32) = ntdll.NtOpenDirectoryObject +//sys NtQueryDirectoryObject(handle uintptr, buffer *byte, length uint32, singleEntry bool, restartScan bool, context *uint32, returnLength *uint32)(status uint32) = ntdll.NtQueryDirectoryObject + +const ( + FileLinkInformationClass = 11 + FileDispositionInformationExClass = 64 + + FILE_READ_ATTRIBUTES = 0x0080 + FILE_WRITE_ATTRIBUTES = 0x0100 + DELETE = 0x10000 + + FILE_OPEN = 1 + FILE_CREATE = 2 + + FILE_LIST_DIRECTORY = 0x00000001 + FILE_DIRECTORY_FILE = 0x00000001 + FILE_SYNCHRONOUS_IO_NONALERT = 0x00000020 + FILE_OPEN_FOR_BACKUP_INTENT = 0x00004000 + FILE_OPEN_REPARSE_POINT = 0x00200000 + + FILE_DISPOSITION_DELETE = 0x00000001 + + OBJ_DONT_REPARSE = 0x1000 + + STATUS_MORE_ENTRIES = 0x105 + STATUS_NO_MORE_ENTRIES = 0x8000001a +) + +// Select entries from FILE_INFO_BY_HANDLE_CLASS. +// +// C declaration: +// typedef enum _FILE_INFO_BY_HANDLE_CLASS { +// FileBasicInfo, +// FileStandardInfo, +// FileNameInfo, +// FileRenameInfo, +// FileDispositionInfo, +// FileAllocationInfo, +// FileEndOfFileInfo, +// FileStreamInfo, +// FileCompressionInfo, +// FileAttributeTagInfo, +// FileIdBothDirectoryInfo, +// FileIdBothDirectoryRestartInfo, +// FileIoPriorityHintInfo, +// FileRemoteProtocolInfo, +// FileFullDirectoryInfo, +// FileFullDirectoryRestartInfo, +// FileStorageInfo, +// FileAlignmentInfo, +// FileIdInfo, +// FileIdExtdDirectoryInfo, +// FileIdExtdDirectoryRestartInfo, +// FileDispositionInfoEx, +// FileRenameInfoEx, +// FileCaseSensitiveInfo, +// FileNormalizedNameInfo, +// MaximumFileInfoByHandleClass +// } FILE_INFO_BY_HANDLE_CLASS, *PFILE_INFO_BY_HANDLE_CLASS; +// +// Documentation: https://docs.microsoft.com/en-us/windows/win32/api/minwinbase/ne-minwinbase-file_info_by_handle_class +const ( + FileIdInfo = 18 +) + +type FileDispositionInformationEx struct { + Flags uintptr +} + +type IOStatusBlock struct { + Status, Information uintptr +} + +type ObjectAttributes struct { + Length uintptr + RootDirectory uintptr + ObjectName *UnicodeString + Attributes uintptr + SecurityDescriptor uintptr + SecurityQoS uintptr +} + +type ObjectDirectoryInformation struct { + Name UnicodeString + TypeName UnicodeString +} + +type FileLinkInformation struct { + ReplaceIfExists bool + RootDirectory uintptr + FileNameLength uint32 + FileName [1]uint16 +} + +// C declaration: +// typedef struct _FILE_ID_INFO { +// ULONGLONG VolumeSerialNumber; +// FILE_ID_128 FileId; +// } FILE_ID_INFO, *PFILE_ID_INFO; +// +// Documentation: https://docs.microsoft.com/en-us/windows/win32/api/winbase/ns-winbase-file_id_info +type FILE_ID_INFO struct { + VolumeSerialNumber uint64 + FileID [16]byte +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/iocp.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/iocp.go new file mode 100644 index 00000000000..4e609cbf1cd --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/iocp.go @@ -0,0 +1,3 @@ +package winapi + +//sys GetQueuedCompletionStatus(cphandle windows.Handle, qty *uint32, key *uintptr, overlapped **windows.Overlapped, timeout uint32) (err error) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go new file mode 100644 index 00000000000..ba12b1ad92e --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go @@ -0,0 +1,215 @@ +package winapi + +import ( + "unsafe" + + "golang.org/x/sys/windows" +) + +// Messages that can be received from an assigned io completion port. +// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_associate_completion_port +const ( + JOB_OBJECT_MSG_END_OF_JOB_TIME uint32 = 1 + JOB_OBJECT_MSG_END_OF_PROCESS_TIME uint32 = 2 + JOB_OBJECT_MSG_ACTIVE_PROCESS_LIMIT uint32 = 3 + JOB_OBJECT_MSG_ACTIVE_PROCESS_ZERO uint32 = 4 + JOB_OBJECT_MSG_NEW_PROCESS uint32 = 6 + JOB_OBJECT_MSG_EXIT_PROCESS uint32 = 7 + JOB_OBJECT_MSG_ABNORMAL_EXIT_PROCESS uint32 = 8 + JOB_OBJECT_MSG_PROCESS_MEMORY_LIMIT uint32 = 9 + JOB_OBJECT_MSG_JOB_MEMORY_LIMIT uint32 = 10 + JOB_OBJECT_MSG_NOTIFICATION_LIMIT uint32 = 11 +) + +// Access rights for creating or opening job objects. +// +// https://docs.microsoft.com/en-us/windows/win32/procthread/job-object-security-and-access-rights +const JOB_OBJECT_ALL_ACCESS = 0x1F001F + +// IO limit flags +// +// https://docs.microsoft.com/en-us/windows/win32/api/jobapi2/ns-jobapi2-jobobject_io_rate_control_information +const JOB_OBJECT_IO_RATE_CONTROL_ENABLE = 0x1 + +const JOBOBJECT_IO_ATTRIBUTION_CONTROL_ENABLE uint32 = 0x1 + +// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_cpu_rate_control_information +const ( + JOB_OBJECT_CPU_RATE_CONTROL_ENABLE uint32 = 1 << iota + JOB_OBJECT_CPU_RATE_CONTROL_WEIGHT_BASED + JOB_OBJECT_CPU_RATE_CONTROL_HARD_CAP + JOB_OBJECT_CPU_RATE_CONTROL_NOTIFY + JOB_OBJECT_CPU_RATE_CONTROL_MIN_MAX_RATE +) + +// JobObjectInformationClass values. Used for a call to QueryInformationJobObject +// +// https://docs.microsoft.com/en-us/windows/win32/api/jobapi2/nf-jobapi2-queryinformationjobobject +const ( + JobObjectBasicAccountingInformation uint32 = 1 + JobObjectBasicProcessIdList uint32 = 3 + JobObjectBasicAndIoAccountingInformation uint32 = 8 + JobObjectLimitViolationInformation uint32 = 13 + JobObjectMemoryUsageInformation uint32 = 28 + JobObjectNotificationLimitInformation2 uint32 = 33 + JobObjectIoAttribution uint32 = 42 +) + +// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_basic_limit_information +type JOBOBJECT_BASIC_LIMIT_INFORMATION struct { + PerProcessUserTimeLimit int64 + PerJobUserTimeLimit int64 + LimitFlags uint32 + MinimumWorkingSetSize uintptr + MaximumWorkingSetSize uintptr + ActiveProcessLimit uint32 + Affinity uintptr + PriorityClass uint32 + SchedulingClass uint32 +} + +// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_cpu_rate_control_information +type JOBOBJECT_CPU_RATE_CONTROL_INFORMATION struct { + ControlFlags uint32 + Value uint32 +} + +// https://docs.microsoft.com/en-us/windows/win32/api/jobapi2/ns-jobapi2-jobobject_io_rate_control_information +type JOBOBJECT_IO_RATE_CONTROL_INFORMATION struct { + MaxIops int64 + MaxBandwidth int64 + ReservationIops int64 + BaseIOSize uint32 + VolumeName string + ControlFlags uint32 +} + +// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_basic_process_id_list +type JOBOBJECT_BASIC_PROCESS_ID_LIST struct { + NumberOfAssignedProcesses uint32 + NumberOfProcessIdsInList uint32 + ProcessIdList [1]uintptr +} + +// AllPids returns all the process Ids in the job object. +func (p *JOBOBJECT_BASIC_PROCESS_ID_LIST) AllPids() []uintptr { + return (*[(1 << 27) - 1]uintptr)(unsafe.Pointer(&p.ProcessIdList[0]))[:p.NumberOfProcessIdsInList] +} + +// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_basic_accounting_information +type JOBOBJECT_BASIC_ACCOUNTING_INFORMATION struct { + TotalUserTime int64 + TotalKernelTime int64 + ThisPeriodTotalUserTime int64 + ThisPeriodTotalKernelTime int64 + TotalPageFaultCount uint32 + TotalProcesses uint32 + ActiveProcesses uint32 + TotalTerminateProcesses uint32 +} + +//https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_basic_and_io_accounting_information +type JOBOBJECT_BASIC_AND_IO_ACCOUNTING_INFORMATION struct { + BasicInfo JOBOBJECT_BASIC_ACCOUNTING_INFORMATION + IoInfo windows.IO_COUNTERS +} + +// typedef struct _JOBOBJECT_MEMORY_USAGE_INFORMATION { +// ULONG64 JobMemory; +// ULONG64 PeakJobMemoryUsed; +// } JOBOBJECT_MEMORY_USAGE_INFORMATION, *PJOBOBJECT_MEMORY_USAGE_INFORMATION; +// +type JOBOBJECT_MEMORY_USAGE_INFORMATION struct { + JobMemory uint64 + PeakJobMemoryUsed uint64 +} + +// typedef struct _JOBOBJECT_IO_ATTRIBUTION_STATS { +// ULONG_PTR IoCount; +// ULONGLONG TotalNonOverlappedQueueTime; +// ULONGLONG TotalNonOverlappedServiceTime; +// ULONGLONG TotalSize; +// } JOBOBJECT_IO_ATTRIBUTION_STATS, *PJOBOBJECT_IO_ATTRIBUTION_STATS; +// +type JOBOBJECT_IO_ATTRIBUTION_STATS struct { + IoCount uintptr + TotalNonOverlappedQueueTime uint64 + TotalNonOverlappedServiceTime uint64 + TotalSize uint64 +} + +// typedef struct _JOBOBJECT_IO_ATTRIBUTION_INFORMATION { +// ULONG ControlFlags; +// JOBOBJECT_IO_ATTRIBUTION_STATS ReadStats; +// JOBOBJECT_IO_ATTRIBUTION_STATS WriteStats; +// } JOBOBJECT_IO_ATTRIBUTION_INFORMATION, *PJOBOBJECT_IO_ATTRIBUTION_INFORMATION; +// +type JOBOBJECT_IO_ATTRIBUTION_INFORMATION struct { + ControlFlags uint32 + ReadStats JOBOBJECT_IO_ATTRIBUTION_STATS + WriteStats JOBOBJECT_IO_ATTRIBUTION_STATS +} + +// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_associate_completion_port +type JOBOBJECT_ASSOCIATE_COMPLETION_PORT struct { + CompletionKey windows.Handle + CompletionPort windows.Handle +} + +// BOOL IsProcessInJob( +// HANDLE ProcessHandle, +// HANDLE JobHandle, +// PBOOL Result +// ); +// +//sys IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result *bool) (err error) = kernel32.IsProcessInJob + +// BOOL QueryInformationJobObject( +// HANDLE hJob, +// JOBOBJECTINFOCLASS JobObjectInformationClass, +// LPVOID lpJobObjectInformation, +// DWORD cbJobObjectInformationLength, +// LPDWORD lpReturnLength +// ); +// +//sys QueryInformationJobObject(jobHandle windows.Handle, infoClass uint32, jobObjectInfo uintptr, jobObjectInformationLength uint32, lpReturnLength *uint32) (err error) = kernel32.QueryInformationJobObject + +// HANDLE OpenJobObjectW( +// DWORD dwDesiredAccess, +// BOOL bInheritHandle, +// LPCWSTR lpName +// ); +// +//sys OpenJobObject(desiredAccess uint32, inheritHandle bool, lpName *uint16) (handle windows.Handle, err error) = kernel32.OpenJobObjectW + +// DWORD SetIoRateControlInformationJobObject( +// HANDLE hJob, +// JOBOBJECT_IO_RATE_CONTROL_INFORMATION *IoRateControlInfo +// ); +// +//sys SetIoRateControlInformationJobObject(jobHandle windows.Handle, ioRateControlInfo *JOBOBJECT_IO_RATE_CONTROL_INFORMATION) (ret uint32, err error) = kernel32.SetIoRateControlInformationJobObject + +// DWORD QueryIoRateControlInformationJobObject( +// HANDLE hJob, +// PCWSTR VolumeName, +// JOBOBJECT_IO_RATE_CONTROL_INFORMATION **InfoBlocks, +// ULONG *InfoBlockCount +// ); +//sys QueryIoRateControlInformationJobObject(jobHandle windows.Handle, volumeName *uint16, ioRateControlInfo **JOBOBJECT_IO_RATE_CONTROL_INFORMATION, infoBlockCount *uint32) (ret uint32, err error) = kernel32.QueryIoRateControlInformationJobObject + +// NTSTATUS +// NtOpenJobObject ( +// _Out_ PHANDLE JobHandle, +// _In_ ACCESS_MASK DesiredAccess, +// _In_ POBJECT_ATTRIBUTES ObjectAttributes +// ); +//sys NtOpenJobObject(jobHandle *windows.Handle, desiredAccess uint32, objAttributes *ObjectAttributes) (status uint32) = ntdll.NtOpenJobObject + +// NTSTATUS +// NTAPI +// NtCreateJobObject ( +// _Out_ PHANDLE JobHandle, +// _In_ ACCESS_MASK DesiredAccess, +// _In_opt_ POBJECT_ATTRIBUTES ObjectAttributes +// ); +//sys NtCreateJobObject(jobHandle *windows.Handle, desiredAccess uint32, objAttributes *ObjectAttributes) (status uint32) = ntdll.NtCreateJobObject diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/logon.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/logon.go new file mode 100644 index 00000000000..b6e7cfd4601 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/logon.go @@ -0,0 +1,30 @@ +package winapi + +// BOOL LogonUserA( +// LPCWSTR lpszUsername, +// LPCWSTR lpszDomain, +// LPCWSTR lpszPassword, +// DWORD dwLogonType, +// DWORD dwLogonProvider, +// PHANDLE phToken +// ); +// +//sys LogonUser(username *uint16, domain *uint16, password *uint16, logonType uint32, logonProvider uint32, token *windows.Token) (err error) = advapi32.LogonUserW + +// Logon types +const ( + LOGON32_LOGON_INTERACTIVE uint32 = 2 + LOGON32_LOGON_NETWORK uint32 = 3 + LOGON32_LOGON_BATCH uint32 = 4 + LOGON32_LOGON_SERVICE uint32 = 5 + LOGON32_LOGON_UNLOCK uint32 = 7 + LOGON32_LOGON_NETWORK_CLEARTEXT uint32 = 8 + LOGON32_LOGON_NEW_CREDENTIALS uint32 = 9 +) + +// Logon providers +const ( + LOGON32_PROVIDER_DEFAULT uint32 = 0 + LOGON32_PROVIDER_WINNT40 uint32 = 2 + LOGON32_PROVIDER_WINNT50 uint32 = 3 +) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/memory.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/memory.go new file mode 100644 index 00000000000..53f62948c90 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/memory.go @@ -0,0 +1,4 @@ +package winapi + +//sys LocalAlloc(flags uint32, size int) (ptr uintptr) = kernel32.LocalAlloc +//sys LocalFree(ptr uintptr) = kernel32.LocalFree diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/net.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/net.go new file mode 100644 index 00000000000..f37910024f7 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/net.go @@ -0,0 +1,3 @@ +package winapi + +//sys SetJobCompartmentId(handle windows.Handle, compartmentId uint32) (win32Err error) = iphlpapi.SetJobCompartmentId diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/path.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/path.go new file mode 100644 index 00000000000..908920e8722 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/path.go @@ -0,0 +1,11 @@ +package winapi + +// DWORD SearchPathW( +// LPCWSTR lpPath, +// LPCWSTR lpFileName, +// LPCWSTR lpExtension, +// DWORD nBufferLength, +// LPWSTR lpBuffer, +// LPWSTR *lpFilePart +// ); +//sys SearchPath(lpPath *uint16, lpFileName *uint16, lpExtension *uint16, nBufferLength uint32, lpBuffer *uint16, lpFilePath *uint16) (size uint32, err error) = kernel32.SearchPathW diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go new file mode 100644 index 00000000000..37839435b93 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go @@ -0,0 +1,8 @@ +package winapi + +const PROCESS_ALL_ACCESS uint32 = 2097151 + +const ( + PROC_THREAD_ATTRIBUTE_PSEUDOCONSOLE = 0x20016 + PROC_THREAD_ATTRIBUTE_JOB_LIST = 0x2000D +) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/processor.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/processor.go new file mode 100644 index 00000000000..ce79ac2cdb8 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/processor.go @@ -0,0 +1,7 @@ +package winapi + +// Get count from all processor groups. +// https://docs.microsoft.com/en-us/windows/win32/procthread/processor-groups +const ALL_PROCESSOR_GROUPS = 0xFFFF + +//sys GetActiveProcessorCount(groupNumber uint16) (amount uint32) = kernel32.GetActiveProcessorCount diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/system.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/system.go new file mode 100644 index 00000000000..327f57d7c29 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/system.go @@ -0,0 +1,52 @@ +package winapi + +import "golang.org/x/sys/windows" + +const SystemProcessInformation = 5 + +const STATUS_INFO_LENGTH_MISMATCH = 0xC0000004 + +// __kernel_entry NTSTATUS NtQuerySystemInformation( +// SYSTEM_INFORMATION_CLASS SystemInformationClass, +// PVOID SystemInformation, +// ULONG SystemInformationLength, +// PULONG ReturnLength +// ); +//sys NtQuerySystemInformation(systemInfoClass int, systemInformation uintptr, systemInfoLength uint32, returnLength *uint32) (status uint32) = ntdll.NtQuerySystemInformation + +type SYSTEM_PROCESS_INFORMATION struct { + NextEntryOffset uint32 // ULONG + NumberOfThreads uint32 // ULONG + WorkingSetPrivateSize int64 // LARGE_INTEGER + HardFaultCount uint32 // ULONG + NumberOfThreadsHighWatermark uint32 // ULONG + CycleTime uint64 // ULONGLONG + CreateTime int64 // LARGE_INTEGER + UserTime int64 // LARGE_INTEGER + KernelTime int64 // LARGE_INTEGER + ImageName UnicodeString // UNICODE_STRING + BasePriority int32 // KPRIORITY + UniqueProcessID windows.Handle // HANDLE + InheritedFromUniqueProcessID windows.Handle // HANDLE + HandleCount uint32 // ULONG + SessionID uint32 // ULONG + UniqueProcessKey *uint32 // ULONG_PTR + PeakVirtualSize uintptr // SIZE_T + VirtualSize uintptr // SIZE_T + PageFaultCount uint32 // ULONG + PeakWorkingSetSize uintptr // SIZE_T + WorkingSetSize uintptr // SIZE_T + QuotaPeakPagedPoolUsage uintptr // SIZE_T + QuotaPagedPoolUsage uintptr // SIZE_T + QuotaPeakNonPagedPoolUsage uintptr // SIZE_T + QuotaNonPagedPoolUsage uintptr // SIZE_T + PagefileUsage uintptr // SIZE_T + PeakPagefileUsage uintptr // SIZE_T + PrivatePageCount uintptr // SIZE_T + ReadOperationCount int64 // LARGE_INTEGER + WriteOperationCount int64 // LARGE_INTEGER + OtherOperationCount int64 // LARGE_INTEGER + ReadTransferCount int64 // LARGE_INTEGER + WriteTransferCount int64 // LARGE_INTEGER + OtherTransferCount int64 // LARGE_INTEGER +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/thread.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/thread.go new file mode 100644 index 00000000000..4724713e3e4 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/thread.go @@ -0,0 +1,12 @@ +package winapi + +// HANDLE CreateRemoteThread( +// HANDLE hProcess, +// LPSECURITY_ATTRIBUTES lpThreadAttributes, +// SIZE_T dwStackSize, +// LPTHREAD_START_ROUTINE lpStartAddress, +// LPVOID lpParameter, +// DWORD dwCreationFlags, +// LPDWORD lpThreadId +// ); +//sys CreateRemoteThread(process windows.Handle, sa *windows.SecurityAttributes, stackSize uint32, startAddr uintptr, parameter uintptr, creationFlags uint32, threadID *uint32) (handle windows.Handle, err error) = kernel32.CreateRemoteThread diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/utils.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/utils.go new file mode 100644 index 00000000000..859b753c246 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/utils.go @@ -0,0 +1,80 @@ +package winapi + +import ( + "errors" + "reflect" + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +// Uint16BufferToSlice wraps a uint16 pointer-and-length into a slice +// for easier interop with Go APIs +func Uint16BufferToSlice(buffer *uint16, bufferLength int) (result []uint16) { + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&result)) + hdr.Data = uintptr(unsafe.Pointer(buffer)) + hdr.Cap = bufferLength + hdr.Len = bufferLength + + return +} + +// UnicodeString corresponds to UNICODE_STRING win32 struct defined here +// https://docs.microsoft.com/en-us/windows/win32/api/ntdef/ns-ntdef-_unicode_string +type UnicodeString struct { + Length uint16 + MaximumLength uint16 + Buffer *uint16 +} + +// NTSTRSAFE_UNICODE_STRING_MAX_CCH is a constant defined in ntstrsafe.h. This value +// denotes the maximum number of wide chars a path can have. +const NTSTRSAFE_UNICODE_STRING_MAX_CCH = 32767 + +//String converts a UnicodeString to a golang string +func (uni UnicodeString) String() string { + // UnicodeString is not guaranteed to be null terminated, therefore + // use the UnicodeString's Length field + return windows.UTF16ToString(Uint16BufferToSlice(uni.Buffer, int(uni.Length/2))) +} + +// NewUnicodeString allocates a new UnicodeString and copies `s` into +// the buffer of the new UnicodeString. +func NewUnicodeString(s string) (*UnicodeString, error) { + buf, err := windows.UTF16FromString(s) + if err != nil { + return nil, err + } + + if len(buf) > NTSTRSAFE_UNICODE_STRING_MAX_CCH { + return nil, syscall.ENAMETOOLONG + } + + uni := &UnicodeString{ + // The length is in bytes and should not include the trailing null character. + Length: uint16((len(buf) - 1) * 2), + MaximumLength: uint16((len(buf) - 1) * 2), + Buffer: &buf[0], + } + return uni, nil +} + +// ConvertStringSetToSlice is a helper function used to convert the contents of +// `buf` into a string slice. `buf` contains a set of null terminated strings +// with an additional null at the end to indicate the end of the set. +func ConvertStringSetToSlice(buf []byte) ([]string, error) { + var results []string + prev := 0 + for i := range buf { + if buf[i] == 0 { + if prev == i { + // found two null characters in a row, return result + return results, nil + } + results = append(results, string(buf[prev:i])) + prev = i + 1 + } + } + return nil, errors.New("string set malformed: missing null terminator at end of buffer") +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go new file mode 100644 index 00000000000..1d4ba3c4f8e --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go @@ -0,0 +1,5 @@ +// Package winapi contains various low-level bindings to Windows APIs. It can +// be thought of as an extension to golang.org/x/sys/windows. +package winapi + +//go:generate go run ..\..\mksyscall_windows.go -output zsyscall_windows.go console.go system.go net.go path.go thread.go iocp.go jobobject.go logon.go memory.go process.go processor.go devices.go filesystem.go errors.go diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go new file mode 100644 index 00000000000..4eb64b4c0c4 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go @@ -0,0 +1,360 @@ +// Code generated mksyscall_windows.exe DO NOT EDIT + +package winapi + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return nil + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + modntdll = windows.NewLazySystemDLL("ntdll.dll") + modiphlpapi = windows.NewLazySystemDLL("iphlpapi.dll") + modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") + modcfgmgr32 = windows.NewLazySystemDLL("cfgmgr32.dll") + + procCreatePseudoConsole = modkernel32.NewProc("CreatePseudoConsole") + procClosePseudoConsole = modkernel32.NewProc("ClosePseudoConsole") + procResizePseudoConsole = modkernel32.NewProc("ResizePseudoConsole") + procNtQuerySystemInformation = modntdll.NewProc("NtQuerySystemInformation") + procSetJobCompartmentId = modiphlpapi.NewProc("SetJobCompartmentId") + procSearchPathW = modkernel32.NewProc("SearchPathW") + procCreateRemoteThread = modkernel32.NewProc("CreateRemoteThread") + procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus") + procIsProcessInJob = modkernel32.NewProc("IsProcessInJob") + procQueryInformationJobObject = modkernel32.NewProc("QueryInformationJobObject") + procOpenJobObjectW = modkernel32.NewProc("OpenJobObjectW") + procSetIoRateControlInformationJobObject = modkernel32.NewProc("SetIoRateControlInformationJobObject") + procQueryIoRateControlInformationJobObject = modkernel32.NewProc("QueryIoRateControlInformationJobObject") + procNtOpenJobObject = modntdll.NewProc("NtOpenJobObject") + procNtCreateJobObject = modntdll.NewProc("NtCreateJobObject") + procLogonUserW = modadvapi32.NewProc("LogonUserW") + procLocalAlloc = modkernel32.NewProc("LocalAlloc") + procLocalFree = modkernel32.NewProc("LocalFree") + procGetActiveProcessorCount = modkernel32.NewProc("GetActiveProcessorCount") + procCM_Get_Device_ID_List_SizeA = modcfgmgr32.NewProc("CM_Get_Device_ID_List_SizeA") + procCM_Get_Device_ID_ListA = modcfgmgr32.NewProc("CM_Get_Device_ID_ListA") + procCM_Locate_DevNodeW = modcfgmgr32.NewProc("CM_Locate_DevNodeW") + procCM_Get_DevNode_PropertyW = modcfgmgr32.NewProc("CM_Get_DevNode_PropertyW") + procNtCreateFile = modntdll.NewProc("NtCreateFile") + procNtSetInformationFile = modntdll.NewProc("NtSetInformationFile") + procNtOpenDirectoryObject = modntdll.NewProc("NtOpenDirectoryObject") + procNtQueryDirectoryObject = modntdll.NewProc("NtQueryDirectoryObject") + procRtlNtStatusToDosError = modntdll.NewProc("RtlNtStatusToDosError") +) + +func createPseudoConsole(size uint32, hInput windows.Handle, hOutput windows.Handle, dwFlags uint32, hpcon *windows.Handle) (hr error) { + r0, _, _ := syscall.Syscall6(procCreatePseudoConsole.Addr(), 5, uintptr(size), uintptr(hInput), uintptr(hOutput), uintptr(dwFlags), uintptr(unsafe.Pointer(hpcon)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func ClosePseudoConsole(hpc windows.Handle) { + syscall.Syscall(procClosePseudoConsole.Addr(), 1, uintptr(hpc), 0, 0) + return +} + +func resizePseudoConsole(hPc windows.Handle, size uint32) (hr error) { + r0, _, _ := syscall.Syscall(procResizePseudoConsole.Addr(), 2, uintptr(hPc), uintptr(size), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func NtQuerySystemInformation(systemInfoClass int, systemInformation uintptr, systemInfoLength uint32, returnLength *uint32) (status uint32) { + r0, _, _ := syscall.Syscall6(procNtQuerySystemInformation.Addr(), 4, uintptr(systemInfoClass), uintptr(systemInformation), uintptr(systemInfoLength), uintptr(unsafe.Pointer(returnLength)), 0, 0) + status = uint32(r0) + return +} + +func SetJobCompartmentId(handle windows.Handle, compartmentId uint32) (win32Err error) { + r0, _, _ := syscall.Syscall(procSetJobCompartmentId.Addr(), 2, uintptr(handle), uintptr(compartmentId), 0) + if r0 != 0 { + win32Err = syscall.Errno(r0) + } + return +} + +func SearchPath(lpPath *uint16, lpFileName *uint16, lpExtension *uint16, nBufferLength uint32, lpBuffer *uint16, lpFilePath *uint16) (size uint32, err error) { + r0, _, e1 := syscall.Syscall6(procSearchPathW.Addr(), 6, uintptr(unsafe.Pointer(lpPath)), uintptr(unsafe.Pointer(lpFileName)), uintptr(unsafe.Pointer(lpExtension)), uintptr(nBufferLength), uintptr(unsafe.Pointer(lpBuffer)), uintptr(unsafe.Pointer(lpFilePath))) + size = uint32(r0) + if size == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CreateRemoteThread(process windows.Handle, sa *windows.SecurityAttributes, stackSize uint32, startAddr uintptr, parameter uintptr, creationFlags uint32, threadID *uint32) (handle windows.Handle, err error) { + r0, _, e1 := syscall.Syscall9(procCreateRemoteThread.Addr(), 7, uintptr(process), uintptr(unsafe.Pointer(sa)), uintptr(stackSize), uintptr(startAddr), uintptr(parameter), uintptr(creationFlags), uintptr(unsafe.Pointer(threadID)), 0, 0) + handle = windows.Handle(r0) + if handle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetQueuedCompletionStatus(cphandle windows.Handle, qty *uint32, key *uintptr, overlapped **windows.Overlapped, timeout uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(cphandle), uintptr(unsafe.Pointer(qty)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(overlapped)), uintptr(timeout), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result *bool) (err error) { + r1, _, e1 := syscall.Syscall(procIsProcessInJob.Addr(), 3, uintptr(procHandle), uintptr(jobHandle), uintptr(unsafe.Pointer(result))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func QueryInformationJobObject(jobHandle windows.Handle, infoClass uint32, jobObjectInfo uintptr, jobObjectInformationLength uint32, lpReturnLength *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procQueryInformationJobObject.Addr(), 5, uintptr(jobHandle), uintptr(infoClass), uintptr(jobObjectInfo), uintptr(jobObjectInformationLength), uintptr(unsafe.Pointer(lpReturnLength)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func OpenJobObject(desiredAccess uint32, inheritHandle bool, lpName *uint16) (handle windows.Handle, err error) { + var _p0 uint32 + if inheritHandle { + _p0 = 1 + } else { + _p0 = 0 + } + r0, _, e1 := syscall.Syscall(procOpenJobObjectW.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(lpName))) + handle = windows.Handle(r0) + if handle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SetIoRateControlInformationJobObject(jobHandle windows.Handle, ioRateControlInfo *JOBOBJECT_IO_RATE_CONTROL_INFORMATION) (ret uint32, err error) { + r0, _, e1 := syscall.Syscall(procSetIoRateControlInformationJobObject.Addr(), 2, uintptr(jobHandle), uintptr(unsafe.Pointer(ioRateControlInfo)), 0) + ret = uint32(r0) + if ret == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func QueryIoRateControlInformationJobObject(jobHandle windows.Handle, volumeName *uint16, ioRateControlInfo **JOBOBJECT_IO_RATE_CONTROL_INFORMATION, infoBlockCount *uint32) (ret uint32, err error) { + r0, _, e1 := syscall.Syscall6(procQueryIoRateControlInformationJobObject.Addr(), 4, uintptr(jobHandle), uintptr(unsafe.Pointer(volumeName)), uintptr(unsafe.Pointer(ioRateControlInfo)), uintptr(unsafe.Pointer(infoBlockCount)), 0, 0) + ret = uint32(r0) + if ret == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func NtOpenJobObject(jobHandle *windows.Handle, desiredAccess uint32, objAttributes *ObjectAttributes) (status uint32) { + r0, _, _ := syscall.Syscall(procNtOpenJobObject.Addr(), 3, uintptr(unsafe.Pointer(jobHandle)), uintptr(desiredAccess), uintptr(unsafe.Pointer(objAttributes))) + status = uint32(r0) + return +} + +func NtCreateJobObject(jobHandle *windows.Handle, desiredAccess uint32, objAttributes *ObjectAttributes) (status uint32) { + r0, _, _ := syscall.Syscall(procNtCreateJobObject.Addr(), 3, uintptr(unsafe.Pointer(jobHandle)), uintptr(desiredAccess), uintptr(unsafe.Pointer(objAttributes))) + status = uint32(r0) + return +} + +func LogonUser(username *uint16, domain *uint16, password *uint16, logonType uint32, logonProvider uint32, token *windows.Token) (err error) { + r1, _, e1 := syscall.Syscall6(procLogonUserW.Addr(), 6, uintptr(unsafe.Pointer(username)), uintptr(unsafe.Pointer(domain)), uintptr(unsafe.Pointer(password)), uintptr(logonType), uintptr(logonProvider), uintptr(unsafe.Pointer(token))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func LocalAlloc(flags uint32, size int) (ptr uintptr) { + r0, _, _ := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(flags), uintptr(size), 0) + ptr = uintptr(r0) + return +} + +func LocalFree(ptr uintptr) { + syscall.Syscall(procLocalFree.Addr(), 1, uintptr(ptr), 0, 0) + return +} + +func GetActiveProcessorCount(groupNumber uint16) (amount uint32) { + r0, _, _ := syscall.Syscall(procGetActiveProcessorCount.Addr(), 1, uintptr(groupNumber), 0, 0) + amount = uint32(r0) + return +} + +func CMGetDeviceIDListSize(pulLen *uint32, pszFilter *byte, uFlags uint32) (hr error) { + r0, _, _ := syscall.Syscall(procCM_Get_Device_ID_List_SizeA.Addr(), 3, uintptr(unsafe.Pointer(pulLen)), uintptr(unsafe.Pointer(pszFilter)), uintptr(uFlags)) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func CMGetDeviceIDList(pszFilter *byte, buffer *byte, bufferLen uint32, uFlags uint32) (hr error) { + r0, _, _ := syscall.Syscall6(procCM_Get_Device_ID_ListA.Addr(), 4, uintptr(unsafe.Pointer(pszFilter)), uintptr(unsafe.Pointer(buffer)), uintptr(bufferLen), uintptr(uFlags), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func CMLocateDevNode(pdnDevInst *uint32, pDeviceID string, uFlags uint32) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(pDeviceID) + if hr != nil { + return + } + return _CMLocateDevNode(pdnDevInst, _p0, uFlags) +} + +func _CMLocateDevNode(pdnDevInst *uint32, pDeviceID *uint16, uFlags uint32) (hr error) { + r0, _, _ := syscall.Syscall(procCM_Locate_DevNodeW.Addr(), 3, uintptr(unsafe.Pointer(pdnDevInst)), uintptr(unsafe.Pointer(pDeviceID)), uintptr(uFlags)) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func CMGetDevNodeProperty(dnDevInst uint32, propertyKey *DevPropKey, propertyType *uint32, propertyBuffer *uint16, propertyBufferSize *uint32, uFlags uint32) (hr error) { + r0, _, _ := syscall.Syscall6(procCM_Get_DevNode_PropertyW.Addr(), 6, uintptr(dnDevInst), uintptr(unsafe.Pointer(propertyKey)), uintptr(unsafe.Pointer(propertyType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(unsafe.Pointer(propertyBufferSize)), uintptr(uFlags)) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func NtCreateFile(handle *uintptr, accessMask uint32, oa *ObjectAttributes, iosb *IOStatusBlock, allocationSize *uint64, fileAttributes uint32, shareAccess uint32, createDisposition uint32, createOptions uint32, eaBuffer *byte, eaLength uint32) (status uint32) { + r0, _, _ := syscall.Syscall12(procNtCreateFile.Addr(), 11, uintptr(unsafe.Pointer(handle)), uintptr(accessMask), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(allocationSize)), uintptr(fileAttributes), uintptr(shareAccess), uintptr(createDisposition), uintptr(createOptions), uintptr(unsafe.Pointer(eaBuffer)), uintptr(eaLength), 0) + status = uint32(r0) + return +} + +func NtSetInformationFile(handle uintptr, iosb *IOStatusBlock, information uintptr, length uint32, class uint32) (status uint32) { + r0, _, _ := syscall.Syscall6(procNtSetInformationFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(iosb)), uintptr(information), uintptr(length), uintptr(class), 0) + status = uint32(r0) + return +} + +func NtOpenDirectoryObject(handle *uintptr, accessMask uint32, oa *ObjectAttributes) (status uint32) { + r0, _, _ := syscall.Syscall(procNtOpenDirectoryObject.Addr(), 3, uintptr(unsafe.Pointer(handle)), uintptr(accessMask), uintptr(unsafe.Pointer(oa))) + status = uint32(r0) + return +} + +func NtQueryDirectoryObject(handle uintptr, buffer *byte, length uint32, singleEntry bool, restartScan bool, context *uint32, returnLength *uint32) (status uint32) { + var _p0 uint32 + if singleEntry { + _p0 = 1 + } else { + _p0 = 0 + } + var _p1 uint32 + if restartScan { + _p1 = 1 + } else { + _p1 = 0 + } + r0, _, _ := syscall.Syscall9(procNtQueryDirectoryObject.Addr(), 7, uintptr(handle), uintptr(unsafe.Pointer(buffer)), uintptr(length), uintptr(_p0), uintptr(_p1), uintptr(unsafe.Pointer(context)), uintptr(unsafe.Pointer(returnLength)), 0, 0) + status = uint32(r0) + return +} + +func RtlNtStatusToDosError(status uint32) (winerr error) { + r0, _, _ := syscall.Syscall(procRtlNtStatusToDosError.Addr(), 1, uintptr(status), 0, 0) + if r0 != 0 { + winerr = syscall.Errno(r0) + } + return +} diff --git a/vendor/github.com/Microsoft/hcsshim/layer.go b/vendor/github.com/Microsoft/hcsshim/layer.go new file mode 100644 index 00000000000..8916163706c --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/layer.go @@ -0,0 +1,107 @@ +package hcsshim + +import ( + "context" + "crypto/sha1" + "path/filepath" + + "github.com/Microsoft/go-winio/pkg/guid" + "github.com/Microsoft/hcsshim/internal/wclayer" +) + +func layerPath(info *DriverInfo, id string) string { + return filepath.Join(info.HomeDir, id) +} + +func ActivateLayer(info DriverInfo, id string) error { + return wclayer.ActivateLayer(context.Background(), layerPath(&info, id)) +} +func CreateLayer(info DriverInfo, id, parent string) error { + return wclayer.CreateLayer(context.Background(), layerPath(&info, id), parent) +} + +// New clients should use CreateScratchLayer instead. Kept in to preserve API compatibility. +func CreateSandboxLayer(info DriverInfo, layerId, parentId string, parentLayerPaths []string) error { + return wclayer.CreateScratchLayer(context.Background(), layerPath(&info, layerId), parentLayerPaths) +} +func CreateScratchLayer(info DriverInfo, layerId, parentId string, parentLayerPaths []string) error { + return wclayer.CreateScratchLayer(context.Background(), layerPath(&info, layerId), parentLayerPaths) +} +func DeactivateLayer(info DriverInfo, id string) error { + return wclayer.DeactivateLayer(context.Background(), layerPath(&info, id)) +} +func DestroyLayer(info DriverInfo, id string) error { + return wclayer.DestroyLayer(context.Background(), layerPath(&info, id)) +} + +// New clients should use ExpandScratchSize instead. Kept in to preserve API compatibility. +func ExpandSandboxSize(info DriverInfo, layerId string, size uint64) error { + return wclayer.ExpandScratchSize(context.Background(), layerPath(&info, layerId), size) +} +func ExpandScratchSize(info DriverInfo, layerId string, size uint64) error { + return wclayer.ExpandScratchSize(context.Background(), layerPath(&info, layerId), size) +} +func ExportLayer(info DriverInfo, layerId string, exportFolderPath string, parentLayerPaths []string) error { + return wclayer.ExportLayer(context.Background(), layerPath(&info, layerId), exportFolderPath, parentLayerPaths) +} +func GetLayerMountPath(info DriverInfo, id string) (string, error) { + return wclayer.GetLayerMountPath(context.Background(), layerPath(&info, id)) +} +func GetSharedBaseImages() (imageData string, err error) { + return wclayer.GetSharedBaseImages(context.Background()) +} +func ImportLayer(info DriverInfo, layerID string, importFolderPath string, parentLayerPaths []string) error { + return wclayer.ImportLayer(context.Background(), layerPath(&info, layerID), importFolderPath, parentLayerPaths) +} +func LayerExists(info DriverInfo, id string) (bool, error) { + return wclayer.LayerExists(context.Background(), layerPath(&info, id)) +} +func PrepareLayer(info DriverInfo, layerId string, parentLayerPaths []string) error { + return wclayer.PrepareLayer(context.Background(), layerPath(&info, layerId), parentLayerPaths) +} +func ProcessBaseLayer(path string) error { + return wclayer.ProcessBaseLayer(context.Background(), path) +} +func ProcessUtilityVMImage(path string) error { + return wclayer.ProcessUtilityVMImage(context.Background(), path) +} +func UnprepareLayer(info DriverInfo, layerId string) error { + return wclayer.UnprepareLayer(context.Background(), layerPath(&info, layerId)) +} + +type DriverInfo struct { + Flavour int + HomeDir string +} + +type GUID [16]byte + +func NameToGuid(name string) (id GUID, err error) { + g, err := wclayer.NameToGuid(context.Background(), name) + return g.ToWindowsArray(), err +} + +func NewGUID(source string) *GUID { + h := sha1.Sum([]byte(source)) + var g GUID + copy(g[0:], h[0:16]) + return &g +} + +func (g *GUID) ToString() string { + return guid.FromWindowsArray(*g).String() +} + +type LayerReader = wclayer.LayerReader + +func NewLayerReader(info DriverInfo, layerID string, parentLayerPaths []string) (LayerReader, error) { + return wclayer.NewLayerReader(context.Background(), layerPath(&info, layerID), parentLayerPaths) +} + +type LayerWriter = wclayer.LayerWriter + +func NewLayerWriter(info DriverInfo, layerID string, parentLayerPaths []string) (LayerWriter, error) { + return wclayer.NewLayerWriter(context.Background(), layerPath(&info, layerID), parentLayerPaths) +} + +type WC_LAYER_DESCRIPTOR = wclayer.WC_LAYER_DESCRIPTOR diff --git a/vendor/github.com/Microsoft/hcsshim/osversion/osversion_windows.go b/vendor/github.com/Microsoft/hcsshim/osversion/osversion_windows.go new file mode 100644 index 00000000000..3ab3bcd89a1 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/osversion/osversion_windows.go @@ -0,0 +1,50 @@ +package osversion + +import ( + "fmt" + "sync" + + "golang.org/x/sys/windows" +) + +// OSVersion is a wrapper for Windows version information +// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx +type OSVersion struct { + Version uint32 + MajorVersion uint8 + MinorVersion uint8 + Build uint16 +} + +var ( + osv OSVersion + once sync.Once +) + +// Get gets the operating system version on Windows. +// The calling application must be manifested to get the correct version information. +func Get() OSVersion { + once.Do(func() { + var err error + osv = OSVersion{} + osv.Version, err = windows.GetVersion() + if err != nil { + // GetVersion never fails. + panic(err) + } + osv.MajorVersion = uint8(osv.Version & 0xFF) + osv.MinorVersion = uint8(osv.Version >> 8 & 0xFF) + osv.Build = uint16(osv.Version >> 16) + }) + return osv +} + +// Build gets the build-number on Windows +// The calling application must be manifested to get the correct version information. +func Build() uint16 { + return Get().Build +} + +func (osv OSVersion) ToString() string { + return fmt.Sprintf("%d.%d.%d", osv.MajorVersion, osv.MinorVersion, osv.Build) +} diff --git a/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go b/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go new file mode 100644 index 00000000000..75dce5d821d --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go @@ -0,0 +1,50 @@ +package osversion + +const ( + // RS1 (version 1607, codename "Redstone 1") corresponds to Windows Server + // 2016 (ltsc2016) and Windows 10 (Anniversary Update). + RS1 = 14393 + + // RS2 (version 1703, codename "Redstone 2") was a client-only update, and + // corresponds to Windows 10 (Creators Update). + RS2 = 15063 + + // RS3 (version 1709, codename "Redstone 3") corresponds to Windows Server + // 1709 (Semi-Annual Channel (SAC)), and Windows 10 (Fall Creators Update). + RS3 = 16299 + + // RS4 (version 1803, codename "Redstone 4") corresponds to Windows Server + // 1803 (Semi-Annual Channel (SAC)), and Windows 10 (April 2018 Update). + RS4 = 17134 + + // RS5 (version 1809, codename "Redstone 5") corresponds to Windows Server + // 2019 (ltsc2019), and Windows 10 (October 2018 Update). + RS5 = 17763 + + // V19H1 (version 1903) corresponds to Windows Server 1903 (semi-annual + // channel). + V19H1 = 18362 + + // V19H2 (version 1909) corresponds to Windows Server 1909 (semi-annual + // channel). + V19H2 = 18363 + + // V20H1 (version 2004) corresponds to Windows Server 2004 (semi-annual + // channel). + V20H1 = 19041 + + // V20H2 corresponds to Windows Server 20H2 (semi-annual channel). + V20H2 = 19042 + + // V21H1 corresponds to Windows Server 21H1 (semi-annual channel). + V21H1 = 19043 + + // V21H2Win10 corresponds to Windows 10 (November 2021 Update). + V21H2Win10 = 19044 + + // V21H2Server corresponds to Windows Server 2022 (ltsc2022). + V21H2Server = 20348 + + // V21H2Win11 corresponds to Windows 11 (original release). + V21H2Win11 = 22000 +) diff --git a/vendor/github.com/Microsoft/hcsshim/process.go b/vendor/github.com/Microsoft/hcsshim/process.go new file mode 100644 index 00000000000..3362c683357 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/process.go @@ -0,0 +1,98 @@ +package hcsshim + +import ( + "context" + "io" + "sync" + "time" + + "github.com/Microsoft/hcsshim/internal/hcs" +) + +// ContainerError is an error encountered in HCS +type process struct { + p *hcs.Process + waitOnce sync.Once + waitCh chan struct{} + waitErr error +} + +// Pid returns the process ID of the process within the container. +func (process *process) Pid() int { + return process.p.Pid() +} + +// Kill signals the process to terminate but does not wait for it to finish terminating. +func (process *process) Kill() error { + found, err := process.p.Kill(context.Background()) + if err != nil { + return convertProcessError(err, process) + } + if !found { + return &ProcessError{Process: process, Err: ErrElementNotFound, Operation: "hcsshim::Process::Kill"} + } + return nil +} + +// Wait waits for the process to exit. +func (process *process) Wait() error { + return convertProcessError(process.p.Wait(), process) +} + +// WaitTimeout waits for the process to exit or the duration to elapse. It returns +// false if timeout occurs. +func (process *process) WaitTimeout(timeout time.Duration) error { + process.waitOnce.Do(func() { + process.waitCh = make(chan struct{}) + go func() { + process.waitErr = process.Wait() + close(process.waitCh) + }() + }) + t := time.NewTimer(timeout) + defer t.Stop() + select { + case <-t.C: + return &ProcessError{Process: process, Err: ErrTimeout, Operation: "hcsshim::Process::Wait"} + case <-process.waitCh: + return process.waitErr + } +} + +// ExitCode returns the exit code of the process. The process must have +// already terminated. +func (process *process) ExitCode() (int, error) { + code, err := process.p.ExitCode() + if err != nil { + err = convertProcessError(err, process) + } + return code, err +} + +// ResizeConsole resizes the console of the process. +func (process *process) ResizeConsole(width, height uint16) error { + return convertProcessError(process.p.ResizeConsole(context.Background(), width, height), process) +} + +// Stdio returns the stdin, stdout, and stderr pipes, respectively. Closing +// these pipes does not close the underlying pipes; it should be possible to +// call this multiple times to get multiple interfaces. +func (process *process) Stdio() (io.WriteCloser, io.ReadCloser, io.ReadCloser, error) { + stdin, stdout, stderr, err := process.p.StdioLegacy() + if err != nil { + err = convertProcessError(err, process) + } + return stdin, stdout, stderr, err +} + +// CloseStdin closes the write side of the stdin pipe so that the process is +// notified on the read side that there is no more data in stdin. +func (process *process) CloseStdin() error { + return convertProcessError(process.p.CloseStdin(context.Background()), process) +} + +// Close cleans up any state associated with the process but does not kill +// or wait on it. +func (process *process) Close() error { + return convertProcessError(process.p.Close(), process) +} diff --git a/vendor/github.com/Microsoft/hcsshim/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/zsyscall_windows.go new file mode 100644 index 00000000000..8bed8485738 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/zsyscall_windows.go @@ -0,0 +1,54 @@ +// Code generated mksyscall_windows.exe DO NOT EDIT + +package hcsshim + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return nil + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modiphlpapi = windows.NewLazySystemDLL("iphlpapi.dll") + + procSetCurrentThreadCompartmentId = modiphlpapi.NewProc("SetCurrentThreadCompartmentId") +) + +func SetCurrentThreadCompartmentId(compartmentId uint32) (hr error) { + r0, _, _ := syscall.Syscall(procSetCurrentThreadCompartmentId.Addr(), 1, uintptr(compartmentId), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/key_generation.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/key_generation.go index 0d2eb45b8e1..0952d8c7392 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/key_generation.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/key_generation.go @@ -74,6 +74,15 @@ func NewEntity(name, comment, email string, config *packet.Config) (*Entity, err selfSignature.PreferredSymmetric = append(selfSignature.PreferredSymmetric, uint8(packet.CipherAES128)) } + // We set CompressionNone as the preferred compression algorithm because + // of compression side channel attacks, then append the configured + // DefaultCompressionAlgo if any is set (to signal support for cases + // where the application knows that using compression is safe). + selfSignature.PreferredCompression = []uint8{uint8(packet.CompressionNone)} + if config.Compression() != packet.CompressionNone { + selfSignature.PreferredCompression = append(selfSignature.PreferredCompression, uint8(config.Compression())) + } + // And for DefaultMode. selfSignature.PreferredAEAD = []uint8{uint8(config.AEAD().Mode())} if config.AEAD().Mode() != packet.AEADModeEAX { diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go index c3fedf7a250..cef3613b611 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go @@ -37,15 +37,17 @@ type Identity struct { Name string // by convention, has the form "Full Name (comment) " UserId *packet.UserId SelfSignature *packet.Signature - Signatures []*packet.Signature + Revocations []*packet.Signature + Signatures []*packet.Signature // all (potentially unverified) self-signatures, revocations, and third-party signatures } // A Subkey is an additional public key in an Entity. Subkeys can be used for // encryption. type Subkey struct { - PublicKey *packet.PublicKey - PrivateKey *packet.PrivateKey - Sig *packet.Signature + PublicKey *packet.PublicKey + PrivateKey *packet.PrivateKey + Sig *packet.Signature + Revocations []*packet.Signature } // A Key identifies a specific public key in an Entity. This is either the @@ -55,6 +57,7 @@ type Key struct { PublicKey *packet.PublicKey PrivateKey *packet.PrivateKey SelfSignature *packet.Signature + Revocations []*packet.Signature } // A KeyRing provides access to public and private keys. @@ -71,28 +74,53 @@ type KeyRing interface { DecryptionKeys() []Key } -// PrimaryIdentity returns the Identity marked as primary or the first identity -// if none are so marked. +// PrimaryIdentity returns an Identity, preferring non-revoked identities, +// identities marked as primary, or the latest-created identity, in that order. func (e *Entity) PrimaryIdentity() *Identity { - var firstIdentity *Identity + var primaryIdentity *Identity for _, ident := range e.Identities { - if firstIdentity == nil { - firstIdentity = ident - } - if ident.SelfSignature.IsPrimaryId != nil && *ident.SelfSignature.IsPrimaryId { - return ident + if shouldPreferIdentity(primaryIdentity, ident) { + primaryIdentity = ident } } - return firstIdentity + return primaryIdentity +} + +func shouldPreferIdentity(existingId, potentialNewId *Identity) bool { + if (existingId == nil) { + return true + } + + if (len(existingId.Revocations) > len(potentialNewId.Revocations)) { + return true + } + + if (len(existingId.Revocations) < len(potentialNewId.Revocations)) { + return false + } + + if (existingId.SelfSignature.IsPrimaryId != nil && *existingId.SelfSignature.IsPrimaryId && + !(potentialNewId.SelfSignature.IsPrimaryId != nil && *potentialNewId.SelfSignature.IsPrimaryId)) { + return false + } + + if (!(existingId.SelfSignature.IsPrimaryId != nil && *existingId.SelfSignature.IsPrimaryId) && + potentialNewId.SelfSignature.IsPrimaryId != nil && *potentialNewId.SelfSignature.IsPrimaryId) { + return true + } + + return potentialNewId.SelfSignature.CreationTime.After(existingId.SelfSignature.CreationTime) } // EncryptionKey returns the best candidate Key for encrypting a message to the // given Entity. func (e *Entity) EncryptionKey(now time.Time) (Key, bool) { - // Fail to find any encryption key if the primary key has expired. + // Fail to find any encryption key if the... i := e.PrimaryIdentity() - primaryKeyExpired := e.PrimaryKey.KeyExpired(i.SelfSignature, now) - if primaryKeyExpired { + if e.PrimaryKey.KeyExpired(i.SelfSignature, now) || // primary key has expired + i.SelfSignature.SigExpired(now) || // user ID self-signature has expired + e.Revoked(now) || // primary key has been revoked + i.Revoked(now) { // user ID has been revoked return Key{}, false } @@ -104,6 +132,8 @@ func (e *Entity) EncryptionKey(now time.Time) (Key, bool) { subkey.Sig.FlagEncryptCommunications && subkey.PublicKey.PubKeyAlgo.CanEncrypt() && !subkey.PublicKey.KeyExpired(subkey.Sig, now) && + !subkey.Sig.SigExpired(now) && + !subkey.Revoked(now) && (maxTime.IsZero() || subkey.Sig.CreationTime.After(maxTime)) { candidateSubkey = i maxTime = subkey.Sig.CreationTime @@ -112,17 +142,16 @@ func (e *Entity) EncryptionKey(now time.Time) (Key, bool) { if candidateSubkey != -1 { subkey := e.Subkeys[candidateSubkey] - return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig}, true + return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig, subkey.Revocations}, true } // If we don't have any candidate subkeys for encryption and // the primary key doesn't have any usage metadata then we // assume that the primary key is ok. Or, if the primary key is // marked as ok to encrypt with, then we can obviously use it. - // Also, check expiry again just to be safe. if !i.SelfSignature.FlagsValid || i.SelfSignature.FlagEncryptCommunications && - e.PrimaryKey.PubKeyAlgo.CanEncrypt() && !primaryKeyExpired { - return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature}, true + e.PrimaryKey.PubKeyAlgo.CanEncrypt() { + return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature, e.Revocations}, true } return Key{}, false @@ -137,10 +166,12 @@ func (e *Entity) SigningKey(now time.Time) (Key, bool) { // SigningKeyById return the Key for signing a message with this // Entity and keyID. func (e *Entity) SigningKeyById(now time.Time, id uint64) (Key, bool) { - // Fail to find any signing key if the primary key has expired. + // Fail to find any signing key if the... i := e.PrimaryIdentity() - primaryKeyExpired := e.PrimaryKey.KeyExpired(i.SelfSignature, now) - if primaryKeyExpired { + if e.PrimaryKey.KeyExpired(i.SelfSignature, now) || // primary key has expired + i.SelfSignature.SigExpired(now) || // user ID self-signature has expired + e.Revoked(now) || // primary key has been revoked + i.Revoked(now) { // user ID has been revoked return Key{}, false } @@ -152,8 +183,10 @@ func (e *Entity) SigningKeyById(now time.Time, id uint64) (Key, bool) { subkey.Sig.FlagSign && subkey.PublicKey.PubKeyAlgo.CanSign() && !subkey.PublicKey.KeyExpired(subkey.Sig, now) && + !subkey.Sig.SigExpired(now) && + !subkey.Revoked(now) && (maxTime.IsZero() || subkey.Sig.CreationTime.After(maxTime)) && - (id == 0 || subkey.PrivateKey.KeyId == id) { + (id == 0 || subkey.PublicKey.KeyId == id) { candidateSubkey = idx maxTime = subkey.Sig.CreationTime } @@ -161,22 +194,63 @@ func (e *Entity) SigningKeyById(now time.Time, id uint64) (Key, bool) { if candidateSubkey != -1 { subkey := e.Subkeys[candidateSubkey] - return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig}, true + return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig, subkey.Revocations}, true } // If we have no candidate subkey then we assume that it's ok to sign // with the primary key. Or, if the primary key is marked as ok to - // sign with, then we can use it. Also, check expiry again just to be safe. + // sign with, then we can use it. if !i.SelfSignature.FlagsValid || i.SelfSignature.FlagSign && - e.PrimaryKey.PubKeyAlgo.CanSign() && !primaryKeyExpired && - (id == 0 || e.PrivateKey.KeyId == id) { - return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature}, true + e.PrimaryKey.PubKeyAlgo.CanSign() && + (id == 0 || e.PrimaryKey.KeyId == id) { + return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature, e.Revocations}, true } // No keys with a valid Signing Flag or no keys matched the id passed in return Key{}, false } +func revoked(revocations []*packet.Signature, now time.Time) bool { + for _, revocation := range revocations { + if revocation.RevocationReason != nil && *revocation.RevocationReason == packet.KeyCompromised { + // If the key is compromised, the key is considered revoked even before the revocation date. + return true + } + if !revocation.SigExpired(now) { + return true + } + } + return false +} + +// Revoked returns whether the entity has any direct key revocation signatures. +// Note that third-party revocation signatures are not supported. +// Note also that Identity and Subkey revocation should be checked separately. +func (e *Entity) Revoked(now time.Time) bool { + return revoked(e.Revocations, now) +} + +// Revoked returns whether the identity has been revoked by a self-signature. +// Note that third-party revocation signatures are not supported. +func (i *Identity) Revoked(now time.Time) bool { + return revoked(i.Revocations, now) +} + +// Revoked returns whether the subkey has been revoked by a self-signature. +// Note that third-party revocation signatures are not supported. +func (s *Subkey) Revoked(now time.Time) bool { + return revoked(s.Revocations, now) +} + +// Revoked returns whether the key or subkey has been revoked by a self-signature. +// Note that third-party revocation signatures are not supported. +// Note also that Identity revocation should be checked separately. +// Normally, it's not necessary to call this function, except on keys returned by +// KeysById or KeysByIdUsage. +func (key *Key) Revoked(now time.Time) bool { + return revoked(key.Revocations, now) +} + // An EntityList contains one or more Entities. type EntityList []*Entity @@ -184,21 +258,14 @@ type EntityList []*Entity func (el EntityList) KeysById(id uint64) (keys []Key) { for _, e := range el { if e.PrimaryKey.KeyId == id { - var selfSig *packet.Signature - for _, ident := range e.Identities { - if selfSig == nil { - selfSig = ident.SelfSignature - } else if ident.SelfSignature.IsPrimaryId != nil && *ident.SelfSignature.IsPrimaryId { - selfSig = ident.SelfSignature - break - } - } - keys = append(keys, Key{e, e.PrimaryKey, e.PrivateKey, selfSig}) + ident := e.PrimaryIdentity() + selfSig := ident.SelfSignature + keys = append(keys, Key{e, e.PrimaryKey, e.PrivateKey, selfSig, e.Revocations}) } for _, subKey := range e.Subkeys { if subKey.PublicKey.KeyId == id { - keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig}) + keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig, subKey.Revocations}) } } } @@ -210,14 +277,6 @@ func (el EntityList) KeysById(id uint64) (keys []Key) { // the bitwise-OR of packet.KeyFlag* values. func (el EntityList) KeysByIdUsage(id uint64, requiredUsage byte) (keys []Key) { for _, key := range el.KeysById(id) { - if len(key.Entity.Revocations) > 0 { - continue - } - - if key.SelfSignature.RevocationReason != nil { - continue - } - if key.SelfSignature.FlagsValid && requiredUsage != 0 { var usage byte if key.SelfSignature.FlagCertify { @@ -247,7 +306,7 @@ func (el EntityList) DecryptionKeys() (keys []Key) { for _, e := range el { for _, subKey := range e.Subkeys { if subKey.PrivateKey != nil && (!subKey.Sig.FlagsValid || subKey.Sig.FlagEncryptStorage || subKey.Sig.FlagEncryptCommunications) { - keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig}) + keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig, subKey.Revocations}) } } } @@ -442,11 +501,22 @@ func addUserID(e *Entity, packets *packet.Reader, pkt *packet.UserId) error { break } - if (sig.SigType == packet.SigTypePositiveCert || sig.SigType == packet.SigTypeGenericCert) && sig.CheckKeyIdOrFingerprint(e.PrimaryKey) { + if sig.SigType != packet.SigTypeGenericCert && + sig.SigType != packet.SigTypePersonaCert && + sig.SigType != packet.SigTypeCasualCert && + sig.SigType != packet.SigTypePositiveCert && + sig.SigType != packet.SigTypeCertificationRevocation { + return errors.StructuralError("user ID signature with wrong type") + } + + + if sig.CheckKeyIdOrFingerprint(e.PrimaryKey) { if err = e.PrimaryKey.VerifyUserIdSignature(pkt.Id, e.PrimaryKey, sig); err != nil { return errors.StructuralError("user ID self-signature invalid: " + err.Error()) } - if identity.SelfSignature == nil || sig.CreationTime.After(identity.SelfSignature.CreationTime) { + if sig.SigType == packet.SigTypeCertificationRevocation { + identity.Revocations = append(identity.Revocations, sig) + } else if identity.SelfSignature == nil || sig.CreationTime.After(identity.SelfSignature.CreationTime) { identity.SelfSignature = sig } identity.Signatures = append(identity.Signatures, sig) @@ -488,9 +558,9 @@ func addSubkey(e *Entity, packets *packet.Reader, pub *packet.PublicKey, priv *p switch sig.SigType { case packet.SigTypeSubkeyRevocation: - subKey.Sig = sig + subKey.Revocations = append(subKey.Revocations, sig) case packet.SigTypeSubkeyBinding: - if shouldReplaceSubkeySig(subKey.Sig, sig) { + if subKey.Sig == nil || sig.CreationTime.After(subKey.Sig.CreationTime) { subKey.Sig = sig } } @@ -505,22 +575,6 @@ func addSubkey(e *Entity, packets *packet.Reader, pub *packet.PublicKey, priv *p return nil } -func shouldReplaceSubkeySig(existingSig, potentialNewSig *packet.Signature) bool { - if potentialNewSig == nil { - return false - } - - if existingSig == nil { - return true - } - - if existingSig.SigType == packet.SigTypeSubkeyRevocation { - return false // never override a revocation signature - } - - return potentialNewSig.CreationTime.After(existingSig.CreationTime) -} - // SerializePrivate serializes an Entity, including private key material, but // excluding signatures from other entities, to the given Writer. // Identities and subkeys are re-signed in case they changed since NewEntry. @@ -549,6 +603,12 @@ func (e *Entity) serializePrivate(w io.Writer, config *packet.Config, reSign boo if err != nil { return } + for _, revocation := range e.Revocations { + err := revocation.Serialize(w) + if err != nil { + return err + } + } for _, ident := range e.Identities { err = ident.UserId.Serialize(w) if err != nil { @@ -560,6 +620,12 @@ func (e *Entity) serializePrivate(w io.Writer, config *packet.Config, reSign boo return } } + for _, revocation := range ident.Revocations { + err := revocation.Serialize(w) + if err != nil { + return err + } + } err = ident.SelfSignature.Serialize(w) if err != nil { return @@ -583,6 +649,12 @@ func (e *Entity) serializePrivate(w io.Writer, config *packet.Config, reSign boo } } } + for _, revocation := range subkey.Revocations { + err := revocation.Serialize(w) + if err != nil { + return err + } + } err = subkey.Sig.Serialize(w) if err != nil { return @@ -598,6 +670,12 @@ func (e *Entity) Serialize(w io.Writer) error { if err != nil { return err } + for _, revocation := range e.Revocations { + err := revocation.Serialize(w) + if err != nil { + return err + } + } for _, ident := range e.Identities { err = ident.UserId.Serialize(w) if err != nil { @@ -615,6 +693,12 @@ func (e *Entity) Serialize(w io.Writer) error { if err != nil { return err } + for _, revocation := range subkey.Revocations { + err := revocation.Serialize(w) + if err != nil { + return err + } + } err = subkey.Sig.Serialize(w) if err != nil { return err @@ -659,14 +743,13 @@ func (e *Entity) SignIdentity(identity string, signer *Entity, config *packet.Co // specified reason code and text (RFC4880 section-5.2.3.23). // If config is nil, sensible defaults will be used. func (e *Entity) RevokeKey(reason packet.ReasonForRevocation, reasonText string, config *packet.Config) error { - reasonCode := uint8(reason) revSig := &packet.Signature{ Version: e.PrimaryKey.Version, CreationTime: config.Now(), SigType: packet.SigTypeKeyRevocation, - PubKeyAlgo: packet.PubKeyAlgoRSA, + PubKeyAlgo: e.PrimaryKey.PubKeyAlgo, Hash: config.Hash(), - RevocationReason: &reasonCode, + RevocationReason: &reason, RevocationReasonText: reasonText, IssuerKeyId: &e.PrimaryKey.KeyId, } @@ -686,22 +769,21 @@ func (e *Entity) RevokeSubkey(sk *Subkey, reason packet.ReasonForRevocation, rea return errors.InvalidArgumentError("given subkey is not associated with this key") } - reasonCode := uint8(reason) revSig := &packet.Signature{ Version: e.PrimaryKey.Version, CreationTime: config.Now(), SigType: packet.SigTypeSubkeyRevocation, - PubKeyAlgo: packet.PubKeyAlgoRSA, + PubKeyAlgo: e.PrimaryKey.PubKeyAlgo, Hash: config.Hash(), - RevocationReason: &reasonCode, + RevocationReason: &reason, RevocationReasonText: reasonText, IssuerKeyId: &e.PrimaryKey.KeyId, } - if err := revSig.RevokeKey(sk.PublicKey, e.PrivateKey, config); err != nil { + if err := revSig.RevokeSubkey(sk.PublicKey, e.PrivateKey, config); err != nil { return err } - sk.Sig = revSig + sk.Revocations = append(sk.Revocations, revSig) return nil } diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/keys_test_data.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/keys_test_data.go index 21d17c2f8be..a2219e64d1c 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/keys_test_data.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/keys_test_data.go @@ -138,6 +138,69 @@ heiQvzkApQup5c+BhH5zFDFdKJ2CBByxw9+7QjMFI/wgLixKuE0Ob2kAokXf7RlB =IKnw -----END PGP PUBLIC KEY BLOCK-----` +const keyWithFirstUserIDRevoked = `-----BEGIN PGP PUBLIC KEY BLOCK----- +Version: OpenPGP.js v4.10.10 +Comment: https://openpgpjs.org + +xsBNBFsgO5EBCADhREPmcjsPkXe1z7ctvyWL0S7oa9JaoGZ9oPDHFDlQxd0q +lX2eDZJZDg0qYvVixmaULIulApq1puEsaJCn3lHUbHlb4PYKwLEywYXM28JN +91KtLsz/uaEX2KC5WqeP40utmzkNLq+oRX/xnRMgwbO7yUNVG2UlEa6eI+xO +XO3YtLdmJMBWClQ066ZnOIzEo1JxnIwha1CDBMWLLfOLrg6l8InUqaXbtEBb +naIYO6fXVXELUjkxnmk7t/QOk0tXCy8muH9UDqJkwDUESY2l79XwBAcx9riX +8vY7vwC34pm22fAUVLCJx1SJx0J8bkeNp38jKM2Zd9SUQqSbfBopQ4pPABEB +AAHNIkdvbGFuZyBHb3BoZXIgPHJldm9rZWRAZ29sYW5nLmNvbT7CwI0EMAEK +ACAWIQTkiTkktw3HqXqtl/DWgXL0jpxSgwUCWyA79wIdAAAhCRDWgXL0jpxS +gxYhBOSJOSS3Dcepeq2X8NaBcvSOnFKDfWMIAKhI/Tvu3h8fSUxp/gSAcduT +6bC1JttG0lYQ5ilKB/58lBUA5CO3ZrKDKlzW3M8VEcvohVaqeTMKeoQd5rCZ +q8KxHn/KvN6Ns85REfXfniCKfAbnGgVXX3kDmZ1g63pkxrFu0fDZjVDXC6vy ++I0sGyI/Inro0Pzbtvn0QCsxjapKK15BtmSrpgHgzVqVg0cUp8vqZeKFxarY +bYB2idtGRci4b9tObOK0BSTVFy26+I/mrFGaPrySYiy2Kz5NMEcRhjmTxJ8j +SwEr2O2sUR0yjbgUAXbTxDVE/jg5fQZ1ACvBRQnB7LvMHcInbzjyeTM3Fazk +kSYQD6b97+dkWwb1iWHNI0dvbGFuZyBHb3BoZXIgPG5vLXJlcGx5QGdvbGFu +Zy5jb20+wsCrBBMBCgA+FiEE5Ik5JLcNx6l6rZfw1oFy9I6cUoMFAlsgO5EC +GwMFCQPCZwAFCwkIBwMFFQoJCAsFFgIDAQACHgECF4AAIQkQ1oFy9I6cUoMW +IQTkiTkktw3HqXqtl/DWgXL0jpxSgwiTB/wM094PbeLiNHB3+nKVu/HBmKe1 +mXV9LBMlbXFw5rV6ZdoS1fZ16m6qE/Th+OVFAZ+xgBCHtf2M4nEAeNOaGoUG +LmwPtC8pTTRw8Vhsn8lPHQHjVuVpedJsaFE+HrdC0RkvsAICz6yHC++iMmrK +zHuTJVG7QRbbCqNd0fBH9Ik7qeE0FrYNfNKI5T9JQDjaaYb7mSMXwBpur3A/ +BP3COtodKETB416s0yY6okTEE7LfIV7IOlpfARkXMF84qjEU2QhpV/kZJ0hQ +aEUQKQa8EwH3fmSF+2aBHwA/F1TgETtetd7EUlTxEK49eiebhZA7BNZHS9CD +rilvZYoDNnweHBMZzsBNBFsgO5EBCAC5INOERA2aNSYHWFeMfByShUuMQGFm +yL2tWT6rwzZmUVG0GUdvoKSRhMJ+81aHxr5zmIhluegEuY99UhX+ZK6NftW2 +UOYjjjQZ4NPDjqOfP5dYUbHiCFRgeUxkmjwnQoSih63iSOoUt5kocR+oXXxb +YmbgeOa8KGgKzDLGHI2nsy8Cni3N/enKVMMHGbJy1DXdV7uRFhBdjnRZGdmt +amHcQbwGHUH+PtTa/jUSMdbtTUvXPI6dz7jDpK0BImzbXNb+r9CcudpiixuM +u5gv3qyJL5EAWCXcT2j+y2VWj2HN/8bJHMoo6yf+bn6A/Cu9f0obbGVF0kJ/ +Y5UWmEdBG6IzABEBAAHCwJMEGAEKACYWIQTkiTkktw3HqXqtl/DWgXL0jpxS +gwUCWyA7kQIbDAUJA8JnAAAhCRDWgXL0jpxSgxYhBOSJOSS3Dcepeq2X8NaB +cvSOnFKDkFMIAIt64bVZ8x7+TitH1bR4pgcNkaKmgKoZz6FXu80+SnbuEt2N +nDyf1cLOSimSTILpwLIuv9Uft5PbOraQbYt3xi9yrqdKqGLv80bxqK0NuryN +kvh9yyx5WoG1iKqMj9/FjGghuPrRaT4lQinNAghGVkEy1+aXGFrG2DsOC1FF +I51CC2WVTzZ5RwR2GpiNRfESsU1rZAUqf/2VyJl9bD5R4SUNy8oQmhOxi+gb +hD4Ao34e4W0ilibslI/uawvCiOwlu5NGd8zv5n+UheiQvzkApQup5c+BhH5z +FDFdKJ2CBByxw9+7QjMFI/wgLixKuE0Ob2kAokXf7RlB7qTZOahrETw= +=+2T8 +-----END PGP PUBLIC KEY BLOCK----- +` + +const keyWithOnlyUserIDRevoked = `-----BEGIN PGP PUBLIC KEY BLOCK----- + +mDMEYYwB7RYJKwYBBAHaRw8BAQdARimqhPPzyGAXmfQJjcqM1QVPzLtURJSzNVll +JV4tEaW0KVJldm9rZWQgUHJpbWFyeSBVc2VyIElEIDxyZXZva2VkQGtleS5jb20+ +iHgEMBYIACAWIQSpyJZAXYqVEFkjyKutFcS0yeB0LQUCYYwCtgIdAAAKCRCtFcS0 +yeB0LbSsAQD8OYMaaBjrdzzpwIkP1stgmPd4/kzN/ZG28Ywl6a5F5QEA5Xg7aq4e +/t6Fsb4F5iqB956kSPe6YJrikobD/tBbMwSIkAQTFggAOBYhBKnIlkBdipUQWSPI +q60VxLTJ4HQtBQJhjAHtAhsDBQsJCAcCBhUKCQgLAgQWAgMBAh4BAheAAAoJEK0V +xLTJ4HQtBaoBAPZL7luTCji+Tqhn7XNfFE/0QIahCt8k9wfO1cGlB3inAQDf8Tzw +ZGR5fNluUcNoVxQT7bUSFStbaGo3k0BaOYPbCLg4BGGMAe0SCisGAQQBl1UBBQEB +B0DLwSpveSrbIO/IVZD13yrs1XuB3FURZUnafGrRq7+jUAMBCAeIeAQYFggAIBYh +BKnIlkBdipUQWSPIq60VxLTJ4HQtBQJhjAHtAhsMAAoJEK0VxLTJ4HQtZ1oA/j9u +8+p3xTNzsmabTL6BkNbMeB/RUKCrlm6woM6AV+vxAQCcXTn3JC2sNoNrLoXuVzaA +mcG3/TwG5GSQUUPkrDsGDA== +=mFWy +-----END PGP PUBLIC KEY BLOCK----- +` + const keyWithSubKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- mI0EWyKwKQEEALwXhKBnyaaNFeK3ljfc/qn9X/QFw+28EUfgZPHjRmHubuXLE2uR @@ -304,33 +367,34 @@ X5KMN5kp2zraLv9dlBBpWW43XktjcCZgMy20SouraVma8Je/ECwUWYUiAZxLIlMv qV/yq36UWQ0JB/AUGhHVPdFf6pl6eaxBwT5GXvbBUibtf8YI2og5RsgTWtXfU7eb SGXrl5ZMpbA6mbfhd0R8aPxWfmDWiIOhBufhMCvUHh1sApMKVZnvIff9/0Dca3wb vLIwa3T4CyshfT0AEQEAAc0hQm9iIEJhYmJhZ2UgPGJvYkBvcGVucGdwLmV4YW1w -bGU+wsEUBBMBCgBIBYJfiWKKBYkBX+/UBQsJCAcCCRD7/MgqAV5zMAYVCgkICwIE -FgIDAQIXgAIbAwIeARYhBNGmbhojsYLJmA94jPv8yCoBXnMwAAAf0wv/a++a3DkL -CttbK4LRIiSry2wb97mxYQoWYzvkKYD1IP/KiamRwzjBKuRSE0qZ2uonQDRGc+zl -1H9dwkDLL4T9uJngCCPCBgFW/hOFPvF+WYEKHtOzunqx6KwDHkpdH+hpzfFzIhDo -aXiVnDGvJ3H/bVTKq1m2KPXO2ckkXPQXJX9Fx6kPHdvcq+3ZI75IzbD/ue5lBcsy -OKLzVu+KLxzlzGui6v1V0fTvU/uqvHlvUxcDAqMnIsDUPakjK2RDeQ38qtN6+PQ5 -/dT7vx2Wtzesqn2eDbDf5uRfSgmp2hJLJniAKjMCBVAJiOgPb0LXUIcwCGxaiWOA -g5ZvNWjX5bZ/FxqpLpOE9OReI5YY7ns7zqP4thLYYWe0Qdp9a2ezVrgzgrh/SLla -d73x/S9TrmLtYGGlbVUByJW+GXjW2Tt6iaa/WDFzx8NvZ/wzIAdGSEfLcvS+JBSP -2ppdY5Ac/2dK3PzYABkHvB/rhXIwlXnrFDU9efRHZfFqQqGauA64wfdIzsDNBF2l -nPIBDADWML9cbGMrp12CtF9b2P6z9TTT74S8iyBOzaSvdGDQY/sUtZXRg21HWamX -nn9sSXvIDEINOQ6A9QxdxoqWdCHrOuW3ofneYXoG+zeKc4dC86wa1TR2q9vW+RMX -SO4uImA+Uzula/6k1DogDf28qhCxMwG/i/m9g1c/0aApuDyKdQ1PXsHHNlgd/Dn6 -rrd5y2AObaifV7wIhEJnvqgFXDN2RXGjLeCOHV4Q2WTYPg/S4k1nMXVDwZXrvIsA -0YwIMgIT86Rafp1qKlgPNbiIlC1g9RY/iFaGN2b4Ir6GDohBQSfZW2+LXoPZuVE/ -wGlQ01rh827KVZW4lXvqsge+wtnWlszcselGATyzqOK9LdHPdZGzROZYI2e8c+pa -LNDdVPL6vdRBUnkCaEkOtl1mr2JpQi5nTU+gTX4IeInC7E+1a9UDF/Y85ybUz8XV -8rUnR76UqVC7KidNepdHbZjjXCt8/Zo+Tec9JNbYNQB/e9ExmDntmlHEsSEQzFwz -j8sxH48AEQEAAcLA9gQYAQoAIBYhBNGmbhojsYLJmA94jPv8yCoBXnMwBQJdpZzy -AhsMAAoJEPv8yCoBXnMw6f8L/26C34dkjBffTzMj5Bdzm8MtF67OYneJ4TQMw7+4 -1IL4rVcSKhIhk/3Ud5knaRtP2ef1+5F66h9/RPQOJ5+tvBwhBAcUWSupKnUrdVaZ -QanYmtSxcVV2PL9+QEiNN3tzluhaWO//rACxJ+K/ZXQlIzwQVTpNhfGzAaMVV9zp -f3u0k14itcv6alKY8+rLZvO1wIIeRZLmU0tZDD5HtWDvUV7rIFI1WuoLb+KZgbYn -3OWjCPHVdTrdZ2CqnZbG3SXw6awH9bzRLV9EXkbhIMez0deCVdeo+wFFklh8/5VK -2b0vk/+wqMJxfpa1lHvJLobzOP9fvrswsr92MA2+k901WeISR7qEzcI0Fdg8AyFA -ExaEK6VyjP7SXGLwvfisw34OxuZr3qmx1Sufu4toH3XrB7QJN8XyqqbsGxUCBqWi -f9RSK4xjzRTe56iPeiSJJOIciMP9i2ldI+KgLycyeDvGoBj0HCLO3gVaBe4ubVrj -5KjhX2PVNEJd3XZRzaXZE2aAMQ== -=522n +bGU+wsFcBBMBCgCQBYJhesp/BYkEWQPJBQsJCAcCCRD7/MgqAV5zMEcUAAAAAAAe +ACBzYWx0QG5vdGF0aW9ucy5zZXF1b2lhLXBncC5vcmeEOQlNyTLFkc9I/elp+BpY +495V7KatqtDmsyDr+zDAdwYVCgkICwIEFgIDAQIXgAIbAwIeARYhBNGmbhojsYLJ +mA94jPv8yCoBXnMwAABSCQv/av8hKyynMtXVKFuWOGJw0mR8auDm84WdhMFRZg8t +yTJ1L88+Ny4WUAFeqo2j7DU2yPGrm5rmuvzlEedFYFeOWt+A4adz+oumgRd0nsgG +Lf3QYUWQhLWVlz+H7zubgKqSB2A2RqV65S7mTTVro42nb2Mng6rvGWiqeKG5nrXN +/01p1mIBQGR/KnZSqYLzA2Pw2PiJoSkXT26PDz/kiEMXpjKMR6sicV4bKVlEdUvm +pIImIPBHZq1EsKXEyWtWC41w/pc+FofGE+uSFs2aef1vvEHFkj3BHSK8gRcH3kfR +eFroTET8C2q9V1AOELWm+Ys6PzGzF72URK1MKXlThuL4t4LjvXWGNA78IKW+/RQH +DzK4U0jqSO0mL6qxqVS5Ij6jjL6OTrVEGdtDf5n0vI8tcUTBKtVqYAYk+t2YGT05 +ayxALtb7viVKo8f10WEcCuKshn0gdsEFMRZQzJ89uQIY3R3FbsdRCaE6OEaDgKMQ +UTFROyfhthgzRKbRxfcplMUCzsDNBF2lnPIBDADWML9cbGMrp12CtF9b2P6z9TTT +74S8iyBOzaSvdGDQY/sUtZXRg21HWamXnn9sSXvIDEINOQ6A9QxdxoqWdCHrOuW3 +ofneYXoG+zeKc4dC86wa1TR2q9vW+RMXSO4uImA+Uzula/6k1DogDf28qhCxMwG/ +i/m9g1c/0aApuDyKdQ1PXsHHNlgd/Dn6rrd5y2AObaifV7wIhEJnvqgFXDN2RXGj +LeCOHV4Q2WTYPg/S4k1nMXVDwZXrvIsA0YwIMgIT86Rafp1qKlgPNbiIlC1g9RY/ +iFaGN2b4Ir6GDohBQSfZW2+LXoPZuVE/wGlQ01rh827KVZW4lXvqsge+wtnWlszc +selGATyzqOK9LdHPdZGzROZYI2e8c+paLNDdVPL6vdRBUnkCaEkOtl1mr2JpQi5n +TU+gTX4IeInC7E+1a9UDF/Y85ybUz8XV8rUnR76UqVC7KidNepdHbZjjXCt8/Zo+ +Tec9JNbYNQB/e9ExmDntmlHEsSEQzFwzj8sxH48AEQEAAcLA9gQYAQoAIBYhBNGm +bhojsYLJmA94jPv8yCoBXnMwBQJdpZzyAhsMAAoJEPv8yCoBXnMw6f8L/26C34dk +jBffTzMj5Bdzm8MtF67OYneJ4TQMw7+41IL4rVcSKhIhk/3Ud5knaRtP2ef1+5F6 +6h9/RPQOJ5+tvBwhBAcUWSupKnUrdVaZQanYmtSxcVV2PL9+QEiNN3tzluhaWO// +rACxJ+K/ZXQlIzwQVTpNhfGzAaMVV9zpf3u0k14itcv6alKY8+rLZvO1wIIeRZLm +U0tZDD5HtWDvUV7rIFI1WuoLb+KZgbYn3OWjCPHVdTrdZ2CqnZbG3SXw6awH9bzR +LV9EXkbhIMez0deCVdeo+wFFklh8/5VK2b0vk/+wqMJxfpa1lHvJLobzOP9fvrsw +sr92MA2+k901WeISR7qEzcI0Fdg8AyFAExaEK6VyjP7SXGLwvfisw34OxuZr3qmx +1Sufu4toH3XrB7QJN8XyqqbsGxUCBqWif9RSK4xjzRTe56iPeiSJJOIciMP9i2ld +I+KgLycyeDvGoBj0HCLO3gVaBe4ubVrj5KjhX2PVNEJd3XZRzaXZE2aAMQ== +=AmgT -----END PGP PUBLIC KEY BLOCK-----` diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet.go index fe0da9d3685..65203813778 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet.go @@ -395,6 +395,7 @@ const ( SigTypeDirectSignature = 0x1F SigTypeKeyRevocation = 0x20 SigTypeSubkeyRevocation = 0x28 + SigTypeCertificationRevocation = 0x30 ) // PublicKeyAlgorithm represents the different public key system specified for diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go index 28971c1ada2..297dc40ba8c 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go @@ -735,13 +735,13 @@ func (pk *PublicKey) VerifyRevocationSignature(sig *Signature) (err error) { } // VerifySubkeyRevocationSignature returns nil iff sig is a valid subkey revocation signature, -// made by the passed in signingKey. -func (pk *PublicKey) VerifySubkeyRevocationSignature(sig *Signature, signingKey *PublicKey) (err error) { - h, err := keyRevocationHash(pk, sig.Hash) +// made by this public key, of signed. +func (pk *PublicKey) VerifySubkeyRevocationSignature(sig *Signature, signed *PublicKey) (err error) { + h, err := keySignatureHash(pk, signed, sig.Hash) if err != nil { return err } - return signingKey.VerifySignature(h, sig) + return pk.VerifySignature(h, sig) } // userIdSignatureHash returns a Hash of the message that needs to be signed diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go index 6b1704e4292..9131b876e53 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go @@ -68,6 +68,11 @@ type Signature struct { IssuerFingerprint []byte IsPrimaryId *bool + // PolicyURI can be set to the URI of a document that describes the + // policy under which the signature was issued. See RFC 4880, section + // 5.2.3.20 for details. + PolicyURI string + // FlagsValid is set if any flags were given. See RFC 4880, section // 5.2.3.21 for details. FlagsValid bool @@ -75,7 +80,7 @@ type Signature struct { // RevocationReason is set if this signature has been revoked. // See RFC 4880, section 5.2.3.23 for details. - RevocationReason *uint8 + RevocationReason *ReasonForRevocation RevocationReasonText string // In a self-signature, these flags are set there is a features subpacket @@ -218,6 +223,7 @@ const ( prefHashAlgosSubpacket signatureSubpacketType = 21 prefCompressionSubpacket signatureSubpacketType = 22 primaryUserIdSubpacket signatureSubpacketType = 25 + policyUriSubpacket signatureSubpacketType = 26 keyFlagsSubpacket signatureSubpacketType = 27 reasonForRevocationSubpacket signatureSubpacketType = 29 featuresSubpacket signatureSubpacketType = 30 @@ -377,8 +383,8 @@ func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (r err = errors.StructuralError("empty revocation reason subpacket") return } - sig.RevocationReason = new(uint8) - *sig.RevocationReason = subpacket[0] + sig.RevocationReason = new(ReasonForRevocation) + *sig.RevocationReason = ReasonForRevocation(subpacket[0]) sig.RevocationReasonText = string(subpacket[1:]) case featuresSubpacket: // Features subpacket, section 5.2.3.24 specifies a very general @@ -416,6 +422,12 @@ func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (r if sigType := sig.EmbeddedSignature.SigType; sigType != SigTypePrimaryKeyBinding { return nil, errors.StructuralError("cross-signature has unexpected type " + strconv.Itoa(int(sigType))) } + case policyUriSubpacket: + // Policy URI, section 5.2.3.20 + if !isHashed { + return + } + sig.PolicyURI = string(subpacket) case issuerFingerprintSubpacket: v, l := subpacket[0], len(subpacket[1:]) if v == 5 && l != 32 || v != 5 && l != 20 { @@ -507,6 +519,9 @@ func serializeSubpackets(to []byte, subpackets []outputSubpacket, hashed bool) { if subpacket.hashed == hashed { n := serializeSubpacketLength(to, len(subpacket.contents)+1) to[n] = byte(subpacket.subpacketType) + if subpacket.isCritical { + to[n] |= 0x80 + } to = to[1+n:] n = copy(to, subpacket.contents) to = to[n:] @@ -714,6 +729,14 @@ func (sig *Signature) RevokeKey(pub *PublicKey, priv *PrivateKey, config *Config return sig.Sign(h, priv, config) } +// RevokeSubkey computes a subkey revocation signature of pub using priv. +// On success, the signature is stored in sig. Call Serialize to write it out. +// If config is nil, sensible defaults will be used. +func (sig *Signature) RevokeSubkey(pub *PublicKey, priv *PrivateKey, config *Config) error { + // Identical to a subkey binding signature + return sig.SignKey(pub, priv, config) +} + // Serialize marshals sig to w. Sign, SignUserId or SignKey must have been // called first. func (sig *Signature) Serialize(w io.Writer) (err error) { @@ -892,6 +915,10 @@ func (sig *Signature) buildSubpackets(issuer PublicKey) (subpackets []outputSubp subpackets = append(subpackets, outputSubpacket{true, prefCompressionSubpacket, false, sig.PreferredCompression}) } + if len(sig.PolicyURI) > 0 { + subpackets = append(subpackets, outputSubpacket{true, policyUriSubpacket, false, []uint8(sig.PolicyURI)}) + } + if len(sig.PreferredAEAD) > 0 { subpackets = append(subpackets, outputSubpacket{true, prefAeadAlgosSubpacket, false, sig.PreferredAEAD}) } @@ -899,7 +926,7 @@ func (sig *Signature) buildSubpackets(issuer PublicKey) (subpackets []outputSubp // Revocation reason appears only in revocation signatures and is serialized as per section 5.2.3.23. if sig.RevocationReason != nil { subpackets = append(subpackets, outputSubpacket{true, reasonForRevocationSubpacket, true, - append([]uint8{*sig.RevocationReason}, []uint8(sig.RevocationReasonText)...)}) + append([]uint8{uint8(*sig.RevocationReason)}, []uint8(sig.RevocationReasonText)...)}) } // EmbeddedSignature appears only in subkeys capable of signing and is serialized as per section 5.2.3.26. diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go index a649ebbab39..acb7cc6e29f 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go @@ -370,11 +370,25 @@ func (scr *signatureCheckReader) Read(buf []byte) (int, error) { // If signature KeyID matches if scr.md.SignedBy != nil && *sig.IssuerKeyId == scr.md.SignedByKeyId { - scr.md.Signature = sig - scr.md.SignatureError = scr.md.SignedBy.PublicKey.VerifySignature(scr.h, scr.md.Signature) - if scr.md.SignatureError == nil && scr.md.Signature.SigExpired(scr.config.Now()) { - scr.md.SignatureError = errors.ErrSignatureExpired + key := scr.md.SignedBy + signatureError := key.PublicKey.VerifySignature(scr.h, sig) + if signatureError == nil { + now := scr.config.Now() + if key.Revoked(now) || + key.Entity.Revoked(now) || // primary key is revoked (redundant if key is the primary key) + key.Entity.PrimaryIdentity().Revoked(now) { + signatureError = errors.ErrKeyRevoked + } + if sig.SigExpired(now) { + signatureError = errors.ErrSignatureExpired + } + if key.PublicKey.KeyExpired(key.SelfSignature, now) || + key.SelfSignature.SigExpired(now) { + signatureError = errors.ErrKeyExpired + } } + scr.md.Signature = sig + scr.md.SignatureError = signatureError } else { scr.md.UnverifiedSignatures = append(scr.md.UnverifiedSignatures, sig) } @@ -483,10 +497,16 @@ func CheckDetachedSignatureAndHash(keyring KeyRing, signed, signature io.Reader, err = key.PublicKey.VerifySignature(h, sig) if err == nil { now := config.Now() + if key.Revoked(now) || + key.Entity.Revoked(now) || // primary key is revoked (redundant if key is the primary key) + key.Entity.PrimaryIdentity().Revoked(now) { + return key.Entity, errors.ErrKeyRevoked + } if sig.SigExpired(now) { return key.Entity, errors.ErrSignatureExpired } - if key.PublicKey.KeyExpired(key.SelfSignature, now) { + if key.PublicKey.KeyExpired(key.SelfSignature, now) || + key.SelfSignature.SigExpired(now) { return key.Entity, errors.ErrKeyExpired } return key.Entity, nil diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go index bb74b13872a..35e18f6112e 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go @@ -208,6 +208,15 @@ func EncryptSplit(keyWriter io.Writer, dataWriter io.Writer, to []*Entity, signe return encrypt(keyWriter, dataWriter, to, signed, hints, packet.SigTypeBinary, config) } +// EncryptTextSplit encrypts a message to a number of recipients and, optionally, signs +// it. hints contains optional information, that is also encrypted, that aids +// the recipients in processing the message. The resulting WriteCloser must +// be closed after the contents of the file have been written. +// If config is nil, sensible defaults will be used. +func EncryptTextSplit(keyWriter io.Writer, dataWriter io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { + return encrypt(keyWriter, dataWriter, to, signed, hints, packet.SigTypeText, config) +} + // writeAndSign writes the data as a payload package and, optionally, signs // it. hints contains optional information, that is also encrypted, // that aids the recipients in processing the message. The resulting @@ -358,7 +367,7 @@ func encrypt(keyWriter io.Writer, dataWriter io.Writer, to []*Entity, signed *En var ok bool encryptKeys[i], ok = to[i].EncryptionKey(config.Now()) if !ok { - return nil, errors.InvalidArgumentError("cannot encrypt a message to key id " + strconv.FormatUint(to[i].PrimaryKey.KeyId, 16) + " because it has no encryption keys") + return nil, errors.InvalidArgumentError("cannot encrypt a message to key id " + strconv.FormatUint(to[i].PrimaryKey.KeyId, 16) + " because it has no valid encryption keys") } sig := to[i].PrimaryIdentity().SelfSignature diff --git a/vendor/github.com/VividCortex/ewma/.gitignore b/vendor/github.com/VividCortex/ewma/.gitignore new file mode 100644 index 00000000000..c66769f6ca1 --- /dev/null +++ b/vendor/github.com/VividCortex/ewma/.gitignore @@ -0,0 +1,3 @@ +.DS_Store +.*.sw? +/coverage.txt \ No newline at end of file diff --git a/vendor/github.com/VividCortex/ewma/.whitesource b/vendor/github.com/VividCortex/ewma/.whitesource new file mode 100644 index 00000000000..d7eebc0cf27 --- /dev/null +++ b/vendor/github.com/VividCortex/ewma/.whitesource @@ -0,0 +1,3 @@ +{ + "settingsInheritedFrom": "VividCortex/whitesource-config@master" +} \ No newline at end of file diff --git a/vendor/github.com/VividCortex/ewma/LICENSE b/vendor/github.com/VividCortex/ewma/LICENSE new file mode 100644 index 00000000000..a78d643ed8f --- /dev/null +++ b/vendor/github.com/VividCortex/ewma/LICENSE @@ -0,0 +1,21 @@ +The MIT License + +Copyright (c) 2013 VividCortex + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/VividCortex/ewma/README.md b/vendor/github.com/VividCortex/ewma/README.md new file mode 100644 index 00000000000..87b4a3c7efa --- /dev/null +++ b/vendor/github.com/VividCortex/ewma/README.md @@ -0,0 +1,145 @@ +# EWMA + +[![GoDoc](https://godoc.org/github.com/VividCortex/ewma?status.svg)](https://godoc.org/github.com/VividCortex/ewma) +![build](https://github.com/VividCortex/ewma/workflows/build/badge.svg) +[![codecov](https://codecov.io/gh/VividCortex/ewma/branch/master/graph/badge.svg)](https://codecov.io/gh/VividCortex/ewma) + +This repo provides Exponentially Weighted Moving Average algorithms, or EWMAs for short, [based on our +Quantifying Abnormal Behavior talk](https://vividcortex.com/blog/2013/07/23/a-fast-go-library-for-exponential-moving-averages/). + +### Exponentially Weighted Moving Average + +An exponentially weighted moving average is a way to continuously compute a type of +average for a series of numbers, as the numbers arrive. After a value in the series is +added to the average, its weight in the average decreases exponentially over time. This +biases the average towards more recent data. EWMAs are useful for several reasons, chiefly +their inexpensive computational and memory cost, as well as the fact that they represent +the recent central tendency of the series of values. + +The EWMA algorithm requires a decay factor, alpha. The larger the alpha, the more the average +is biased towards recent history. The alpha must be between 0 and 1, and is typically +a fairly small number, such as 0.04. We will discuss the choice of alpha later. + +The algorithm works thus, in pseudocode: + +1. Multiply the next number in the series by alpha. +2. Multiply the current value of the average by 1 minus alpha. +3. Add the result of steps 1 and 2, and store it as the new current value of the average. +4. Repeat for each number in the series. + +There are special-case behaviors for how to initialize the current value, and these vary +between implementations. One approach is to start with the first value in the series; +another is to average the first 10 or so values in the series using an arithmetic average, +and then begin the incremental updating of the average. Each method has pros and cons. + +It may help to look at it pictorially. Suppose the series has five numbers, and we choose +alpha to be 0.50 for simplicity. Here's the series, with numbers in the neighborhood of 300. + +![Data Series](https://user-images.githubusercontent.com/279875/28242350-463289a2-6977-11e7-88ca-fd778ccef1f0.png) + +Now let's take the moving average of those numbers. First we set the average to the value +of the first number. + +![EWMA Step 1](https://user-images.githubusercontent.com/279875/28242353-464c96bc-6977-11e7-9981-dc4e0789c7ba.png) + +Next we multiply the next number by alpha, multiply the current value by 1-alpha, and add +them to generate a new value. + +![EWMA Step 2](https://user-images.githubusercontent.com/279875/28242351-464abefa-6977-11e7-95d0-43900f29bef2.png) + +This continues until we are done. + +![EWMA Step N](https://user-images.githubusercontent.com/279875/28242352-464c58f0-6977-11e7-8cd0-e01e4efaac7f.png) + +Notice how each of the values in the series decays by half each time a new value +is added, and the top of the bars in the lower portion of the image represents the +size of the moving average. It is a smoothed, or low-pass, average of the original +series. + +For further reading, see [Exponentially weighted moving average](http://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average) on wikipedia. + +### Choosing Alpha + +Consider a fixed-size sliding-window moving average (not an exponentially weighted moving average) +that averages over the previous N samples. What is the average age of each sample? It is N/2. + +Now suppose that you wish to construct a EWMA whose samples have the same average age. The formula +to compute the alpha required for this is: alpha = 2/(N+1). Proof is in the book +"Production and Operations Analysis" by Steven Nahmias. + +So, for example, if you have a time-series with samples once per second, and you want to get the +moving average over the previous minute, you should use an alpha of .032786885. This, by the way, +is the constant alpha used for this repository's SimpleEWMA. + +### Implementations + +This repository contains two implementations of the EWMA algorithm, with different properties. + +The implementations all conform to the MovingAverage interface, and the constructor returns +that type. + +Current implementations assume an implicit time interval of 1.0 between every sample added. +That is, the passage of time is treated as though it's the same as the arrival of samples. +If you need time-based decay when samples are not arriving precisely at set intervals, then +this package will not support your needs at present. + +#### SimpleEWMA + +A SimpleEWMA is designed for low CPU and memory consumption. It **will** have different behavior than the VariableEWMA +for multiple reasons. It has no warm-up period and it uses a constant +decay. These properties let it use less memory. It will also behave +differently when it's equal to zero, which is assumed to mean +uninitialized, so if a value is likely to actually become zero over time, +then any non-zero value will cause a sharp jump instead of a small change. + +#### VariableEWMA + +Unlike SimpleEWMA, this supports a custom age which must be stored, and thus uses more memory. +It also has a "warmup" time when you start adding values to it. It will report a value of 0.0 +until you have added the required number of samples to it. It uses some memory to store the +number of samples added to it. As a result it uses a little over twice the memory of SimpleEWMA. + +## Usage + +### API Documentation + +View the GoDoc generated documentation [here](http://godoc.org/github.com/VividCortex/ewma). + +```go +package main + +import "github.com/VividCortex/ewma" + +func main() { + samples := [100]float64{ + 4599, 5711, 4746, 4621, 5037, 4218, 4925, 4281, 5207, 5203, 5594, 5149, + } + + e := ewma.NewMovingAverage() //=> Returns a SimpleEWMA if called without params + a := ewma.NewMovingAverage(5) //=> returns a VariableEWMA with a decay of 2 / (5 + 1) + + for _, f := range samples { + e.Add(f) + a.Add(f) + } + + e.Value() //=> 13.577404704631077 + a.Value() //=> 1.5806140565521463e-12 +} +``` + +## Contributing + +We only accept pull requests for minor fixes or improvements. This includes: + +* Small bug fixes +* Typos +* Documentation or comments + +Please open issues to discuss new features. Pull requests for new features will be rejected, +so we recommend forking the repository and making changes in your fork for your use case. + +## License + +This repository is Copyright (c) 2013 VividCortex, Inc. All rights reserved. +It is licensed under the MIT license. Please see the LICENSE file for applicable license terms. diff --git a/vendor/github.com/VividCortex/ewma/codecov.yml b/vendor/github.com/VividCortex/ewma/codecov.yml new file mode 100644 index 00000000000..0d36d903f93 --- /dev/null +++ b/vendor/github.com/VividCortex/ewma/codecov.yml @@ -0,0 +1,6 @@ +coverage: + status: + project: + default: + threshold: 15% + patch: off diff --git a/vendor/github.com/VividCortex/ewma/ewma.go b/vendor/github.com/VividCortex/ewma/ewma.go new file mode 100644 index 00000000000..44d5d53e3ca --- /dev/null +++ b/vendor/github.com/VividCortex/ewma/ewma.go @@ -0,0 +1,126 @@ +// Package ewma implements exponentially weighted moving averages. +package ewma + +// Copyright (c) 2013 VividCortex, Inc. All rights reserved. +// Please see the LICENSE file for applicable license terms. + +const ( + // By default, we average over a one-minute period, which means the average + // age of the metrics in the period is 30 seconds. + AVG_METRIC_AGE float64 = 30.0 + + // The formula for computing the decay factor from the average age comes + // from "Production and Operations Analysis" by Steven Nahmias. + DECAY float64 = 2 / (float64(AVG_METRIC_AGE) + 1) + + // For best results, the moving average should not be initialized to the + // samples it sees immediately. The book "Production and Operations + // Analysis" by Steven Nahmias suggests initializing the moving average to + // the mean of the first 10 samples. Until the VariableEwma has seen this + // many samples, it is not "ready" to be queried for the value of the + // moving average. This adds some memory cost. + WARMUP_SAMPLES uint8 = 10 +) + +// MovingAverage is the interface that computes a moving average over a time- +// series stream of numbers. The average may be over a window or exponentially +// decaying. +type MovingAverage interface { + Add(float64) + Value() float64 + Set(float64) +} + +// NewMovingAverage constructs a MovingAverage that computes an average with the +// desired characteristics in the moving window or exponential decay. If no +// age is given, it constructs a default exponentially weighted implementation +// that consumes minimal memory. The age is related to the decay factor alpha +// by the formula given for the DECAY constant. It signifies the average age +// of the samples as time goes to infinity. +func NewMovingAverage(age ...float64) MovingAverage { + if len(age) == 0 || age[0] == AVG_METRIC_AGE { + return new(SimpleEWMA) + } + return &VariableEWMA{ + decay: 2 / (age[0] + 1), + } +} + +// A SimpleEWMA represents the exponentially weighted moving average of a +// series of numbers. It WILL have different behavior than the VariableEWMA +// for multiple reasons. It has no warm-up period and it uses a constant +// decay. These properties let it use less memory. It will also behave +// differently when it's equal to zero, which is assumed to mean +// uninitialized, so if a value is likely to actually become zero over time, +// then any non-zero value will cause a sharp jump instead of a small change. +// However, note that this takes a long time, and the value may just +// decays to a stable value that's close to zero, but which won't be mistaken +// for uninitialized. See http://play.golang.org/p/litxBDr_RC for example. +type SimpleEWMA struct { + // The current value of the average. After adding with Add(), this is + // updated to reflect the average of all values seen thus far. + value float64 +} + +// Add adds a value to the series and updates the moving average. +func (e *SimpleEWMA) Add(value float64) { + if e.value == 0 { // this is a proxy for "uninitialized" + e.value = value + } else { + e.value = (value * DECAY) + (e.value * (1 - DECAY)) + } +} + +// Value returns the current value of the moving average. +func (e *SimpleEWMA) Value() float64 { + return e.value +} + +// Set sets the EWMA's value. +func (e *SimpleEWMA) Set(value float64) { + e.value = value +} + +// VariableEWMA represents the exponentially weighted moving average of a series of +// numbers. Unlike SimpleEWMA, it supports a custom age, and thus uses more memory. +type VariableEWMA struct { + // The multiplier factor by which the previous samples decay. + decay float64 + // The current value of the average. + value float64 + // The number of samples added to this instance. + count uint8 +} + +// Add adds a value to the series and updates the moving average. +func (e *VariableEWMA) Add(value float64) { + switch { + case e.count < WARMUP_SAMPLES: + e.count++ + e.value += value + case e.count == WARMUP_SAMPLES: + e.count++ + e.value = e.value / float64(WARMUP_SAMPLES) + e.value = (value * e.decay) + (e.value * (1 - e.decay)) + default: + e.value = (value * e.decay) + (e.value * (1 - e.decay)) + } +} + +// Value returns the current value of the average, or 0.0 if the series hasn't +// warmed up yet. +func (e *VariableEWMA) Value() float64 { + if e.count <= WARMUP_SAMPLES { + return 0.0 + } + + return e.value +} + +// Set sets the EWMA's value. +func (e *VariableEWMA) Set(value float64) { + e.value = value + if e.count <= WARMUP_SAMPLES { + e.count = WARMUP_SAMPLES + 1 + } +} diff --git a/vendor/github.com/VividCortex/ewma/go.mod b/vendor/github.com/VividCortex/ewma/go.mod new file mode 100644 index 00000000000..e481e3dfb7a --- /dev/null +++ b/vendor/github.com/VividCortex/ewma/go.mod @@ -0,0 +1,3 @@ +module github.com/VividCortex/ewma + +go 1.12 diff --git a/vendor/github.com/shurcooL/sanitized_anchor_name/LICENSE b/vendor/github.com/acarl005/stripansi/LICENSE similarity index 96% rename from vendor/github.com/shurcooL/sanitized_anchor_name/LICENSE rename to vendor/github.com/acarl005/stripansi/LICENSE index c35c17af980..00abe0dbf49 100644 --- a/vendor/github.com/shurcooL/sanitized_anchor_name/LICENSE +++ b/vendor/github.com/acarl005/stripansi/LICENSE @@ -1,6 +1,6 @@ MIT License -Copyright (c) 2015 Dmitri Shuralyov +Copyright (c) 2018 Andrew Carlson Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/github.com/acarl005/stripansi/README.md b/vendor/github.com/acarl005/stripansi/README.md new file mode 100644 index 00000000000..8bdb1f5059f --- /dev/null +++ b/vendor/github.com/acarl005/stripansi/README.md @@ -0,0 +1,30 @@ +Strip ANSI +========== + +This Go package removes ANSI escape codes from strings. + +Ideally, we would prevent these from appearing in any text we want to process. +However, sometimes this can't be helped, and we need to be able to deal with that noise. +This will use a regexp to remove those unwanted escape codes. + + +## Install + +```sh +$ go get -u github.com/acarl005/stripansi +``` + +## Usage + +```go +import ( + "fmt" + "github.com/acarl005/stripansi" +) + +func main() { + msg := "\x1b[38;5;140m foo\x1b[0m bar" + cleanMsg := stripansi.Strip(msg) + fmt.Println(cleanMsg) // " foo bar" +} +``` diff --git a/vendor/github.com/acarl005/stripansi/stripansi.go b/vendor/github.com/acarl005/stripansi/stripansi.go new file mode 100644 index 00000000000..235732a7829 --- /dev/null +++ b/vendor/github.com/acarl005/stripansi/stripansi.go @@ -0,0 +1,13 @@ +package stripansi + +import ( + "regexp" +) + +const ansi = "[\u001B\u009B][[\\]()#;?]*(?:(?:(?:[a-zA-Z\\d]*(?:;[a-zA-Z\\d]*)*)?\u0007)|(?:(?:\\d{1,4}(?:;\\d{0,4})*)?[\\dA-PRZcf-ntqry=><~]))" + +var re = regexp.MustCompile(ansi) + +func Strip(str string) string { + return re.ReplaceAllString(str, "") +} diff --git a/vendor/github.com/blang/semver/.travis.yml b/vendor/github.com/blang/semver/.travis.yml new file mode 100644 index 00000000000..102fb9a691b --- /dev/null +++ b/vendor/github.com/blang/semver/.travis.yml @@ -0,0 +1,21 @@ +language: go +matrix: + include: + - go: 1.4.3 + - go: 1.5.4 + - go: 1.6.3 + - go: 1.7 + - go: tip + allow_failures: + - go: tip +install: +- go get golang.org/x/tools/cmd/cover +- go get github.com/mattn/goveralls +script: +- echo "Test and track coverage" ; $HOME/gopath/bin/goveralls -package "." -service=travis-ci + -repotoken $COVERALLS_TOKEN +- echo "Build examples" ; cd examples && go build +- echo "Check if gofmt'd" ; diff -u <(echo -n) <(gofmt -d -s .) +env: + global: + secure: HroGEAUQpVq9zX1b1VIkraLiywhGbzvNnTZq2TMxgK7JHP8xqNplAeF1izrR2i4QLL9nsY+9WtYss4QuPvEtZcVHUobw6XnL6radF7jS1LgfYZ9Y7oF+zogZ2I5QUMRLGA7rcxQ05s7mKq3XZQfeqaNts4bms/eZRefWuaFZbkw= diff --git a/vendor/github.com/blang/semver/LICENSE b/vendor/github.com/blang/semver/LICENSE new file mode 100644 index 00000000000..5ba5c86fcb0 --- /dev/null +++ b/vendor/github.com/blang/semver/LICENSE @@ -0,0 +1,22 @@ +The MIT License + +Copyright (c) 2014 Benedikt Lang + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + diff --git a/vendor/github.com/blang/semver/README.md b/vendor/github.com/blang/semver/README.md new file mode 100644 index 00000000000..08b2e4a3d76 --- /dev/null +++ b/vendor/github.com/blang/semver/README.md @@ -0,0 +1,194 @@ +semver for golang [![Build Status](https://travis-ci.org/blang/semver.svg?branch=master)](https://travis-ci.org/blang/semver) [![GoDoc](https://godoc.org/github.com/blang/semver?status.png)](https://godoc.org/github.com/blang/semver) [![Coverage Status](https://img.shields.io/coveralls/blang/semver.svg)](https://coveralls.io/r/blang/semver?branch=master) +====== + +semver is a [Semantic Versioning](http://semver.org/) library written in golang. It fully covers spec version `2.0.0`. + +Usage +----- +```bash +$ go get github.com/blang/semver +``` +Note: Always vendor your dependencies or fix on a specific version tag. + +```go +import github.com/blang/semver +v1, err := semver.Make("1.0.0-beta") +v2, err := semver.Make("2.0.0-beta") +v1.Compare(v2) +``` + +Also check the [GoDocs](http://godoc.org/github.com/blang/semver). + +Why should I use this lib? +----- + +- Fully spec compatible +- No reflection +- No regex +- Fully tested (Coverage >99%) +- Readable parsing/validation errors +- Fast (See [Benchmarks](#benchmarks)) +- Only Stdlib +- Uses values instead of pointers +- Many features, see below + + +Features +----- + +- Parsing and validation at all levels +- Comparator-like comparisons +- Compare Helper Methods +- InPlace manipulation +- Ranges `>=1.0.0 <2.0.0 || >=3.0.0 !3.0.1-beta.1` +- Wildcards `>=1.x`, `<=2.5.x` +- Sortable (implements sort.Interface) +- database/sql compatible (sql.Scanner/Valuer) +- encoding/json compatible (json.Marshaler/Unmarshaler) + +Ranges +------ + +A `Range` is a set of conditions which specify which versions satisfy the range. + +A condition is composed of an operator and a version. The supported operators are: + +- `<1.0.0` Less than `1.0.0` +- `<=1.0.0` Less than or equal to `1.0.0` +- `>1.0.0` Greater than `1.0.0` +- `>=1.0.0` Greater than or equal to `1.0.0` +- `1.0.0`, `=1.0.0`, `==1.0.0` Equal to `1.0.0` +- `!1.0.0`, `!=1.0.0` Not equal to `1.0.0`. Excludes version `1.0.0`. + +Note that spaces between the operator and the version will be gracefully tolerated. + +A `Range` can link multiple `Ranges` separated by space: + +Ranges can be linked by logical AND: + + - `>1.0.0 <2.0.0` would match between both ranges, so `1.1.1` and `1.8.7` but not `1.0.0` or `2.0.0` + - `>1.0.0 <3.0.0 !2.0.3-beta.2` would match every version between `1.0.0` and `3.0.0` except `2.0.3-beta.2` + +Ranges can also be linked by logical OR: + + - `<2.0.0 || >=3.0.0` would match `1.x.x` and `3.x.x` but not `2.x.x` + +AND has a higher precedence than OR. It's not possible to use brackets. + +Ranges can be combined by both AND and OR + + - `>1.0.0 <2.0.0 || >3.0.0 !4.2.1` would match `1.2.3`, `1.9.9`, `3.1.1`, but not `4.2.1`, `2.1.1` + +Range usage: + +``` +v, err := semver.Parse("1.2.3") +range, err := semver.ParseRange(">1.0.0 <2.0.0 || >=3.0.0") +if range(v) { + //valid +} + +``` + +Example +----- + +Have a look at full examples in [examples/main.go](examples/main.go) + +```go +import github.com/blang/semver + +v, err := semver.Make("0.0.1-alpha.preview+123.github") +fmt.Printf("Major: %d\n", v.Major) +fmt.Printf("Minor: %d\n", v.Minor) +fmt.Printf("Patch: %d\n", v.Patch) +fmt.Printf("Pre: %s\n", v.Pre) +fmt.Printf("Build: %s\n", v.Build) + +// Prerelease versions array +if len(v.Pre) > 0 { + fmt.Println("Prerelease versions:") + for i, pre := range v.Pre { + fmt.Printf("%d: %q\n", i, pre) + } +} + +// Build meta data array +if len(v.Build) > 0 { + fmt.Println("Build meta data:") + for i, build := range v.Build { + fmt.Printf("%d: %q\n", i, build) + } +} + +v001, err := semver.Make("0.0.1") +// Compare using helpers: v.GT(v2), v.LT, v.GTE, v.LTE +v001.GT(v) == true +v.LT(v001) == true +v.GTE(v) == true +v.LTE(v) == true + +// Or use v.Compare(v2) for comparisons (-1, 0, 1): +v001.Compare(v) == 1 +v.Compare(v001) == -1 +v.Compare(v) == 0 + +// Manipulate Version in place: +v.Pre[0], err = semver.NewPRVersion("beta") +if err != nil { + fmt.Printf("Error parsing pre release version: %q", err) +} + +fmt.Println("\nValidate versions:") +v.Build[0] = "?" + +err = v.Validate() +if err != nil { + fmt.Printf("Validation failed: %s\n", err) +} +``` + + +Benchmarks +----- + + BenchmarkParseSimple-4 5000000 390 ns/op 48 B/op 1 allocs/op + BenchmarkParseComplex-4 1000000 1813 ns/op 256 B/op 7 allocs/op + BenchmarkParseAverage-4 1000000 1171 ns/op 163 B/op 4 allocs/op + BenchmarkStringSimple-4 20000000 119 ns/op 16 B/op 1 allocs/op + BenchmarkStringLarger-4 10000000 206 ns/op 32 B/op 2 allocs/op + BenchmarkStringComplex-4 5000000 324 ns/op 80 B/op 3 allocs/op + BenchmarkStringAverage-4 5000000 273 ns/op 53 B/op 2 allocs/op + BenchmarkValidateSimple-4 200000000 9.33 ns/op 0 B/op 0 allocs/op + BenchmarkValidateComplex-4 3000000 469 ns/op 0 B/op 0 allocs/op + BenchmarkValidateAverage-4 5000000 256 ns/op 0 B/op 0 allocs/op + BenchmarkCompareSimple-4 100000000 11.8 ns/op 0 B/op 0 allocs/op + BenchmarkCompareComplex-4 50000000 30.8 ns/op 0 B/op 0 allocs/op + BenchmarkCompareAverage-4 30000000 41.5 ns/op 0 B/op 0 allocs/op + BenchmarkSort-4 3000000 419 ns/op 256 B/op 2 allocs/op + BenchmarkRangeParseSimple-4 2000000 850 ns/op 192 B/op 5 allocs/op + BenchmarkRangeParseAverage-4 1000000 1677 ns/op 400 B/op 10 allocs/op + BenchmarkRangeParseComplex-4 300000 5214 ns/op 1440 B/op 30 allocs/op + BenchmarkRangeMatchSimple-4 50000000 25.6 ns/op 0 B/op 0 allocs/op + BenchmarkRangeMatchAverage-4 30000000 56.4 ns/op 0 B/op 0 allocs/op + BenchmarkRangeMatchComplex-4 10000000 153 ns/op 0 B/op 0 allocs/op + +See benchmark cases at [semver_test.go](semver_test.go) + + +Motivation +----- + +I simply couldn't find any lib supporting the full spec. Others were just wrong or used reflection and regex which i don't like. + + +Contribution +----- + +Feel free to make a pull request. For bigger changes create a issue first to discuss about it. + + +License +----- + +See [LICENSE](LICENSE) file. diff --git a/vendor/github.com/blang/semver/json.go b/vendor/github.com/blang/semver/json.go new file mode 100644 index 00000000000..a74bf7c4494 --- /dev/null +++ b/vendor/github.com/blang/semver/json.go @@ -0,0 +1,23 @@ +package semver + +import ( + "encoding/json" +) + +// MarshalJSON implements the encoding/json.Marshaler interface. +func (v Version) MarshalJSON() ([]byte, error) { + return json.Marshal(v.String()) +} + +// UnmarshalJSON implements the encoding/json.Unmarshaler interface. +func (v *Version) UnmarshalJSON(data []byte) (err error) { + var versionString string + + if err = json.Unmarshal(data, &versionString); err != nil { + return + } + + *v, err = Parse(versionString) + + return +} diff --git a/vendor/github.com/blang/semver/package.json b/vendor/github.com/blang/semver/package.json new file mode 100644 index 00000000000..1cf8ebdd9c1 --- /dev/null +++ b/vendor/github.com/blang/semver/package.json @@ -0,0 +1,17 @@ +{ + "author": "blang", + "bugs": { + "URL": "https://github.com/blang/semver/issues", + "url": "https://github.com/blang/semver/issues" + }, + "gx": { + "dvcsimport": "github.com/blang/semver" + }, + "gxVersion": "0.10.0", + "language": "go", + "license": "MIT", + "name": "semver", + "releaseCmd": "git commit -a -m \"gx publish $VERSION\"", + "version": "3.5.1" +} + diff --git a/vendor/github.com/blang/semver/range.go b/vendor/github.com/blang/semver/range.go new file mode 100644 index 00000000000..fca406d4793 --- /dev/null +++ b/vendor/github.com/blang/semver/range.go @@ -0,0 +1,416 @@ +package semver + +import ( + "fmt" + "strconv" + "strings" + "unicode" +) + +type wildcardType int + +const ( + noneWildcard wildcardType = iota + majorWildcard wildcardType = 1 + minorWildcard wildcardType = 2 + patchWildcard wildcardType = 3 +) + +func wildcardTypefromInt(i int) wildcardType { + switch i { + case 1: + return majorWildcard + case 2: + return minorWildcard + case 3: + return patchWildcard + default: + return noneWildcard + } +} + +type comparator func(Version, Version) bool + +var ( + compEQ comparator = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) == 0 + } + compNE = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) != 0 + } + compGT = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) == 1 + } + compGE = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) >= 0 + } + compLT = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) == -1 + } + compLE = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) <= 0 + } +) + +type versionRange struct { + v Version + c comparator +} + +// rangeFunc creates a Range from the given versionRange. +func (vr *versionRange) rangeFunc() Range { + return Range(func(v Version) bool { + return vr.c(v, vr.v) + }) +} + +// Range represents a range of versions. +// A Range can be used to check if a Version satisfies it: +// +// range, err := semver.ParseRange(">1.0.0 <2.0.0") +// range(semver.MustParse("1.1.1") // returns true +type Range func(Version) bool + +// OR combines the existing Range with another Range using logical OR. +func (rf Range) OR(f Range) Range { + return Range(func(v Version) bool { + return rf(v) || f(v) + }) +} + +// AND combines the existing Range with another Range using logical AND. +func (rf Range) AND(f Range) Range { + return Range(func(v Version) bool { + return rf(v) && f(v) + }) +} + +// ParseRange parses a range and returns a Range. +// If the range could not be parsed an error is returned. +// +// Valid ranges are: +// - "<1.0.0" +// - "<=1.0.0" +// - ">1.0.0" +// - ">=1.0.0" +// - "1.0.0", "=1.0.0", "==1.0.0" +// - "!1.0.0", "!=1.0.0" +// +// A Range can consist of multiple ranges separated by space: +// Ranges can be linked by logical AND: +// - ">1.0.0 <2.0.0" would match between both ranges, so "1.1.1" and "1.8.7" but not "1.0.0" or "2.0.0" +// - ">1.0.0 <3.0.0 !2.0.3-beta.2" would match every version between 1.0.0 and 3.0.0 except 2.0.3-beta.2 +// +// Ranges can also be linked by logical OR: +// - "<2.0.0 || >=3.0.0" would match "1.x.x" and "3.x.x" but not "2.x.x" +// +// AND has a higher precedence than OR. It's not possible to use brackets. +// +// Ranges can be combined by both AND and OR +// +// - `>1.0.0 <2.0.0 || >3.0.0 !4.2.1` would match `1.2.3`, `1.9.9`, `3.1.1`, but not `4.2.1`, `2.1.1` +func ParseRange(s string) (Range, error) { + parts := splitAndTrim(s) + orParts, err := splitORParts(parts) + if err != nil { + return nil, err + } + expandedParts, err := expandWildcardVersion(orParts) + if err != nil { + return nil, err + } + var orFn Range + for _, p := range expandedParts { + var andFn Range + for _, ap := range p { + opStr, vStr, err := splitComparatorVersion(ap) + if err != nil { + return nil, err + } + vr, err := buildVersionRange(opStr, vStr) + if err != nil { + return nil, fmt.Errorf("Could not parse Range %q: %s", ap, err) + } + rf := vr.rangeFunc() + + // Set function + if andFn == nil { + andFn = rf + } else { // Combine with existing function + andFn = andFn.AND(rf) + } + } + if orFn == nil { + orFn = andFn + } else { + orFn = orFn.OR(andFn) + } + + } + return orFn, nil +} + +// splitORParts splits the already cleaned parts by '||'. +// Checks for invalid positions of the operator and returns an +// error if found. +func splitORParts(parts []string) ([][]string, error) { + var ORparts [][]string + last := 0 + for i, p := range parts { + if p == "||" { + if i == 0 { + return nil, fmt.Errorf("First element in range is '||'") + } + ORparts = append(ORparts, parts[last:i]) + last = i + 1 + } + } + if last == len(parts) { + return nil, fmt.Errorf("Last element in range is '||'") + } + ORparts = append(ORparts, parts[last:]) + return ORparts, nil +} + +// buildVersionRange takes a slice of 2: operator and version +// and builds a versionRange, otherwise an error. +func buildVersionRange(opStr, vStr string) (*versionRange, error) { + c := parseComparator(opStr) + if c == nil { + return nil, fmt.Errorf("Could not parse comparator %q in %q", opStr, strings.Join([]string{opStr, vStr}, "")) + } + v, err := Parse(vStr) + if err != nil { + return nil, fmt.Errorf("Could not parse version %q in %q: %s", vStr, strings.Join([]string{opStr, vStr}, ""), err) + } + + return &versionRange{ + v: v, + c: c, + }, nil + +} + +// inArray checks if a byte is contained in an array of bytes +func inArray(s byte, list []byte) bool { + for _, el := range list { + if el == s { + return true + } + } + return false +} + +// splitAndTrim splits a range string by spaces and cleans whitespaces +func splitAndTrim(s string) (result []string) { + last := 0 + var lastChar byte + excludeFromSplit := []byte{'>', '<', '='} + for i := 0; i < len(s); i++ { + if s[i] == ' ' && !inArray(lastChar, excludeFromSplit) { + if last < i-1 { + result = append(result, s[last:i]) + } + last = i + 1 + } else if s[i] != ' ' { + lastChar = s[i] + } + } + if last < len(s)-1 { + result = append(result, s[last:]) + } + + for i, v := range result { + result[i] = strings.Replace(v, " ", "", -1) + } + + // parts := strings.Split(s, " ") + // for _, x := range parts { + // if s := strings.TrimSpace(x); len(s) != 0 { + // result = append(result, s) + // } + // } + return +} + +// splitComparatorVersion splits the comparator from the version. +// Input must be free of leading or trailing spaces. +func splitComparatorVersion(s string) (string, string, error) { + i := strings.IndexFunc(s, unicode.IsDigit) + if i == -1 { + return "", "", fmt.Errorf("Could not get version from string: %q", s) + } + return strings.TrimSpace(s[0:i]), s[i:], nil +} + +// getWildcardType will return the type of wildcard that the +// passed version contains +func getWildcardType(vStr string) wildcardType { + parts := strings.Split(vStr, ".") + nparts := len(parts) + wildcard := parts[nparts-1] + + possibleWildcardType := wildcardTypefromInt(nparts) + if wildcard == "x" { + return possibleWildcardType + } + + return noneWildcard +} + +// createVersionFromWildcard will convert a wildcard version +// into a regular version, replacing 'x's with '0's, handling +// special cases like '1.x.x' and '1.x' +func createVersionFromWildcard(vStr string) string { + // handle 1.x.x + vStr2 := strings.Replace(vStr, ".x.x", ".x", 1) + vStr2 = strings.Replace(vStr2, ".x", ".0", 1) + parts := strings.Split(vStr2, ".") + + // handle 1.x + if len(parts) == 2 { + return vStr2 + ".0" + } + + return vStr2 +} + +// incrementMajorVersion will increment the major version +// of the passed version +func incrementMajorVersion(vStr string) (string, error) { + parts := strings.Split(vStr, ".") + i, err := strconv.Atoi(parts[0]) + if err != nil { + return "", err + } + parts[0] = strconv.Itoa(i + 1) + + return strings.Join(parts, "."), nil +} + +// incrementMajorVersion will increment the minor version +// of the passed version +func incrementMinorVersion(vStr string) (string, error) { + parts := strings.Split(vStr, ".") + i, err := strconv.Atoi(parts[1]) + if err != nil { + return "", err + } + parts[1] = strconv.Itoa(i + 1) + + return strings.Join(parts, "."), nil +} + +// expandWildcardVersion will expand wildcards inside versions +// following these rules: +// +// * when dealing with patch wildcards: +// >= 1.2.x will become >= 1.2.0 +// <= 1.2.x will become < 1.3.0 +// > 1.2.x will become >= 1.3.0 +// < 1.2.x will become < 1.2.0 +// != 1.2.x will become < 1.2.0 >= 1.3.0 +// +// * when dealing with minor wildcards: +// >= 1.x will become >= 1.0.0 +// <= 1.x will become < 2.0.0 +// > 1.x will become >= 2.0.0 +// < 1.0 will become < 1.0.0 +// != 1.x will become < 1.0.0 >= 2.0.0 +// +// * when dealing with wildcards without +// version operator: +// 1.2.x will become >= 1.2.0 < 1.3.0 +// 1.x will become >= 1.0.0 < 2.0.0 +func expandWildcardVersion(parts [][]string) ([][]string, error) { + var expandedParts [][]string + for _, p := range parts { + var newParts []string + for _, ap := range p { + if strings.Index(ap, "x") != -1 { + opStr, vStr, err := splitComparatorVersion(ap) + if err != nil { + return nil, err + } + + versionWildcardType := getWildcardType(vStr) + flatVersion := createVersionFromWildcard(vStr) + + var resultOperator string + var shouldIncrementVersion bool + switch opStr { + case ">": + resultOperator = ">=" + shouldIncrementVersion = true + case ">=": + resultOperator = ">=" + case "<": + resultOperator = "<" + case "<=": + resultOperator = "<" + shouldIncrementVersion = true + case "", "=", "==": + newParts = append(newParts, ">="+flatVersion) + resultOperator = "<" + shouldIncrementVersion = true + case "!=", "!": + newParts = append(newParts, "<"+flatVersion) + resultOperator = ">=" + shouldIncrementVersion = true + } + + var resultVersion string + if shouldIncrementVersion { + switch versionWildcardType { + case patchWildcard: + resultVersion, _ = incrementMinorVersion(flatVersion) + case minorWildcard: + resultVersion, _ = incrementMajorVersion(flatVersion) + } + } else { + resultVersion = flatVersion + } + + ap = resultOperator + resultVersion + } + newParts = append(newParts, ap) + } + expandedParts = append(expandedParts, newParts) + } + + return expandedParts, nil +} + +func parseComparator(s string) comparator { + switch s { + case "==": + fallthrough + case "": + fallthrough + case "=": + return compEQ + case ">": + return compGT + case ">=": + return compGE + case "<": + return compLT + case "<=": + return compLE + case "!": + fallthrough + case "!=": + return compNE + } + + return nil +} + +// MustParseRange is like ParseRange but panics if the range cannot be parsed. +func MustParseRange(s string) Range { + r, err := ParseRange(s) + if err != nil { + panic(`semver: ParseRange(` + s + `): ` + err.Error()) + } + return r +} diff --git a/vendor/github.com/blang/semver/semver.go b/vendor/github.com/blang/semver/semver.go new file mode 100644 index 00000000000..8ee0842e6ac --- /dev/null +++ b/vendor/github.com/blang/semver/semver.go @@ -0,0 +1,418 @@ +package semver + +import ( + "errors" + "fmt" + "strconv" + "strings" +) + +const ( + numbers string = "0123456789" + alphas = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-" + alphanum = alphas + numbers +) + +// SpecVersion is the latest fully supported spec version of semver +var SpecVersion = Version{ + Major: 2, + Minor: 0, + Patch: 0, +} + +// Version represents a semver compatible version +type Version struct { + Major uint64 + Minor uint64 + Patch uint64 + Pre []PRVersion + Build []string //No Precendence +} + +// Version to string +func (v Version) String() string { + b := make([]byte, 0, 5) + b = strconv.AppendUint(b, v.Major, 10) + b = append(b, '.') + b = strconv.AppendUint(b, v.Minor, 10) + b = append(b, '.') + b = strconv.AppendUint(b, v.Patch, 10) + + if len(v.Pre) > 0 { + b = append(b, '-') + b = append(b, v.Pre[0].String()...) + + for _, pre := range v.Pre[1:] { + b = append(b, '.') + b = append(b, pre.String()...) + } + } + + if len(v.Build) > 0 { + b = append(b, '+') + b = append(b, v.Build[0]...) + + for _, build := range v.Build[1:] { + b = append(b, '.') + b = append(b, build...) + } + } + + return string(b) +} + +// Equals checks if v is equal to o. +func (v Version) Equals(o Version) bool { + return (v.Compare(o) == 0) +} + +// EQ checks if v is equal to o. +func (v Version) EQ(o Version) bool { + return (v.Compare(o) == 0) +} + +// NE checks if v is not equal to o. +func (v Version) NE(o Version) bool { + return (v.Compare(o) != 0) +} + +// GT checks if v is greater than o. +func (v Version) GT(o Version) bool { + return (v.Compare(o) == 1) +} + +// GTE checks if v is greater than or equal to o. +func (v Version) GTE(o Version) bool { + return (v.Compare(o) >= 0) +} + +// GE checks if v is greater than or equal to o. +func (v Version) GE(o Version) bool { + return (v.Compare(o) >= 0) +} + +// LT checks if v is less than o. +func (v Version) LT(o Version) bool { + return (v.Compare(o) == -1) +} + +// LTE checks if v is less than or equal to o. +func (v Version) LTE(o Version) bool { + return (v.Compare(o) <= 0) +} + +// LE checks if v is less than or equal to o. +func (v Version) LE(o Version) bool { + return (v.Compare(o) <= 0) +} + +// Compare compares Versions v to o: +// -1 == v is less than o +// 0 == v is equal to o +// 1 == v is greater than o +func (v Version) Compare(o Version) int { + if v.Major != o.Major { + if v.Major > o.Major { + return 1 + } + return -1 + } + if v.Minor != o.Minor { + if v.Minor > o.Minor { + return 1 + } + return -1 + } + if v.Patch != o.Patch { + if v.Patch > o.Patch { + return 1 + } + return -1 + } + + // Quick comparison if a version has no prerelease versions + if len(v.Pre) == 0 && len(o.Pre) == 0 { + return 0 + } else if len(v.Pre) == 0 && len(o.Pre) > 0 { + return 1 + } else if len(v.Pre) > 0 && len(o.Pre) == 0 { + return -1 + } + + i := 0 + for ; i < len(v.Pre) && i < len(o.Pre); i++ { + if comp := v.Pre[i].Compare(o.Pre[i]); comp == 0 { + continue + } else if comp == 1 { + return 1 + } else { + return -1 + } + } + + // If all pr versions are the equal but one has further prversion, this one greater + if i == len(v.Pre) && i == len(o.Pre) { + return 0 + } else if i == len(v.Pre) && i < len(o.Pre) { + return -1 + } else { + return 1 + } + +} + +// Validate validates v and returns error in case +func (v Version) Validate() error { + // Major, Minor, Patch already validated using uint64 + + for _, pre := range v.Pre { + if !pre.IsNum { //Numeric prerelease versions already uint64 + if len(pre.VersionStr) == 0 { + return fmt.Errorf("Prerelease can not be empty %q", pre.VersionStr) + } + if !containsOnly(pre.VersionStr, alphanum) { + return fmt.Errorf("Invalid character(s) found in prerelease %q", pre.VersionStr) + } + } + } + + for _, build := range v.Build { + if len(build) == 0 { + return fmt.Errorf("Build meta data can not be empty %q", build) + } + if !containsOnly(build, alphanum) { + return fmt.Errorf("Invalid character(s) found in build meta data %q", build) + } + } + + return nil +} + +// New is an alias for Parse and returns a pointer, parses version string and returns a validated Version or error +func New(s string) (vp *Version, err error) { + v, err := Parse(s) + vp = &v + return +} + +// Make is an alias for Parse, parses version string and returns a validated Version or error +func Make(s string) (Version, error) { + return Parse(s) +} + +// ParseTolerant allows for certain version specifications that do not strictly adhere to semver +// specs to be parsed by this library. It does so by normalizing versions before passing them to +// Parse(). It currently trims spaces, removes a "v" prefix, and adds a 0 patch number to versions +// with only major and minor components specified +func ParseTolerant(s string) (Version, error) { + s = strings.TrimSpace(s) + s = strings.TrimPrefix(s, "v") + + // Split into major.minor.(patch+pr+meta) + parts := strings.SplitN(s, ".", 3) + if len(parts) < 3 { + if strings.ContainsAny(parts[len(parts)-1], "+-") { + return Version{}, errors.New("Short version cannot contain PreRelease/Build meta data") + } + for len(parts) < 3 { + parts = append(parts, "0") + } + s = strings.Join(parts, ".") + } + + return Parse(s) +} + +// Parse parses version string and returns a validated Version or error +func Parse(s string) (Version, error) { + if len(s) == 0 { + return Version{}, errors.New("Version string empty") + } + + // Split into major.minor.(patch+pr+meta) + parts := strings.SplitN(s, ".", 3) + if len(parts) != 3 { + return Version{}, errors.New("No Major.Minor.Patch elements found") + } + + // Major + if !containsOnly(parts[0], numbers) { + return Version{}, fmt.Errorf("Invalid character(s) found in major number %q", parts[0]) + } + if hasLeadingZeroes(parts[0]) { + return Version{}, fmt.Errorf("Major number must not contain leading zeroes %q", parts[0]) + } + major, err := strconv.ParseUint(parts[0], 10, 64) + if err != nil { + return Version{}, err + } + + // Minor + if !containsOnly(parts[1], numbers) { + return Version{}, fmt.Errorf("Invalid character(s) found in minor number %q", parts[1]) + } + if hasLeadingZeroes(parts[1]) { + return Version{}, fmt.Errorf("Minor number must not contain leading zeroes %q", parts[1]) + } + minor, err := strconv.ParseUint(parts[1], 10, 64) + if err != nil { + return Version{}, err + } + + v := Version{} + v.Major = major + v.Minor = minor + + var build, prerelease []string + patchStr := parts[2] + + if buildIndex := strings.IndexRune(patchStr, '+'); buildIndex != -1 { + build = strings.Split(patchStr[buildIndex+1:], ".") + patchStr = patchStr[:buildIndex] + } + + if preIndex := strings.IndexRune(patchStr, '-'); preIndex != -1 { + prerelease = strings.Split(patchStr[preIndex+1:], ".") + patchStr = patchStr[:preIndex] + } + + if !containsOnly(patchStr, numbers) { + return Version{}, fmt.Errorf("Invalid character(s) found in patch number %q", patchStr) + } + if hasLeadingZeroes(patchStr) { + return Version{}, fmt.Errorf("Patch number must not contain leading zeroes %q", patchStr) + } + patch, err := strconv.ParseUint(patchStr, 10, 64) + if err != nil { + return Version{}, err + } + + v.Patch = patch + + // Prerelease + for _, prstr := range prerelease { + parsedPR, err := NewPRVersion(prstr) + if err != nil { + return Version{}, err + } + v.Pre = append(v.Pre, parsedPR) + } + + // Build meta data + for _, str := range build { + if len(str) == 0 { + return Version{}, errors.New("Build meta data is empty") + } + if !containsOnly(str, alphanum) { + return Version{}, fmt.Errorf("Invalid character(s) found in build meta data %q", str) + } + v.Build = append(v.Build, str) + } + + return v, nil +} + +// MustParse is like Parse but panics if the version cannot be parsed. +func MustParse(s string) Version { + v, err := Parse(s) + if err != nil { + panic(`semver: Parse(` + s + `): ` + err.Error()) + } + return v +} + +// PRVersion represents a PreRelease Version +type PRVersion struct { + VersionStr string + VersionNum uint64 + IsNum bool +} + +// NewPRVersion creates a new valid prerelease version +func NewPRVersion(s string) (PRVersion, error) { + if len(s) == 0 { + return PRVersion{}, errors.New("Prerelease is empty") + } + v := PRVersion{} + if containsOnly(s, numbers) { + if hasLeadingZeroes(s) { + return PRVersion{}, fmt.Errorf("Numeric PreRelease version must not contain leading zeroes %q", s) + } + num, err := strconv.ParseUint(s, 10, 64) + + // Might never be hit, but just in case + if err != nil { + return PRVersion{}, err + } + v.VersionNum = num + v.IsNum = true + } else if containsOnly(s, alphanum) { + v.VersionStr = s + v.IsNum = false + } else { + return PRVersion{}, fmt.Errorf("Invalid character(s) found in prerelease %q", s) + } + return v, nil +} + +// IsNumeric checks if prerelease-version is numeric +func (v PRVersion) IsNumeric() bool { + return v.IsNum +} + +// Compare compares two PreRelease Versions v and o: +// -1 == v is less than o +// 0 == v is equal to o +// 1 == v is greater than o +func (v PRVersion) Compare(o PRVersion) int { + if v.IsNum && !o.IsNum { + return -1 + } else if !v.IsNum && o.IsNum { + return 1 + } else if v.IsNum && o.IsNum { + if v.VersionNum == o.VersionNum { + return 0 + } else if v.VersionNum > o.VersionNum { + return 1 + } else { + return -1 + } + } else { // both are Alphas + if v.VersionStr == o.VersionStr { + return 0 + } else if v.VersionStr > o.VersionStr { + return 1 + } else { + return -1 + } + } +} + +// PreRelease version to string +func (v PRVersion) String() string { + if v.IsNum { + return strconv.FormatUint(v.VersionNum, 10) + } + return v.VersionStr +} + +func containsOnly(s string, set string) bool { + return strings.IndexFunc(s, func(r rune) bool { + return !strings.ContainsRune(set, r) + }) == -1 +} + +func hasLeadingZeroes(s string) bool { + return len(s) > 1 && s[0] == '0' +} + +// NewBuildVersion creates a new valid build version +func NewBuildVersion(s string) (string, error) { + if len(s) == 0 { + return "", errors.New("Buildversion is empty") + } + if !containsOnly(s, alphanum) { + return "", fmt.Errorf("Invalid character(s) found in build meta data %q", s) + } + return s, nil +} diff --git a/vendor/github.com/blang/semver/sort.go b/vendor/github.com/blang/semver/sort.go new file mode 100644 index 00000000000..e18f880826a --- /dev/null +++ b/vendor/github.com/blang/semver/sort.go @@ -0,0 +1,28 @@ +package semver + +import ( + "sort" +) + +// Versions represents multiple versions. +type Versions []Version + +// Len returns length of version collection +func (s Versions) Len() int { + return len(s) +} + +// Swap swaps two versions inside the collection by its indices +func (s Versions) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +// Less checks if version at index i is less than version at index j +func (s Versions) Less(i, j int) bool { + return s[i].LT(s[j]) +} + +// Sort sorts a slice of versions +func Sort(versions []Version) { + sort.Sort(Versions(versions)) +} diff --git a/vendor/github.com/blang/semver/sql.go b/vendor/github.com/blang/semver/sql.go new file mode 100644 index 00000000000..eb4d802666e --- /dev/null +++ b/vendor/github.com/blang/semver/sql.go @@ -0,0 +1,30 @@ +package semver + +import ( + "database/sql/driver" + "fmt" +) + +// Scan implements the database/sql.Scanner interface. +func (v *Version) Scan(src interface{}) (err error) { + var str string + switch src := src.(type) { + case string: + str = src + case []byte: + str = string(src) + default: + return fmt.Errorf("Version.Scan: cannot convert %T to string.", src) + } + + if t, err := Parse(str); err == nil { + *v = t + } + + return +} + +// Value implements the database/sql/driver.Valuer interface. +func (v Version) Value() (driver.Value, error) { + return v.String(), nil +} diff --git a/vendor/github.com/cespare/xxhash/v2/.travis.yml b/vendor/github.com/cespare/xxhash/v2/.travis.yml deleted file mode 100644 index c516ea88da7..00000000000 --- a/vendor/github.com/cespare/xxhash/v2/.travis.yml +++ /dev/null @@ -1,8 +0,0 @@ -language: go -go: - - "1.x" - - master -env: - - TAGS="" - - TAGS="-tags purego" -script: go test $TAGS -v ./... diff --git a/vendor/github.com/cespare/xxhash/v2/README.md b/vendor/github.com/cespare/xxhash/v2/README.md index 2fd8693c21b..792b4a60b34 100644 --- a/vendor/github.com/cespare/xxhash/v2/README.md +++ b/vendor/github.com/cespare/xxhash/v2/README.md @@ -1,7 +1,7 @@ # xxhash -[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash) -[![Build Status](https://travis-ci.org/cespare/xxhash.svg?branch=master)](https://travis-ci.org/cespare/xxhash) +[![Go Reference](https://pkg.go.dev/badge/github.com/cespare/xxhash/v2.svg)](https://pkg.go.dev/github.com/cespare/xxhash/v2) +[![Test](https://github.com/cespare/xxhash/actions/workflows/test.yml/badge.svg)](https://github.com/cespare/xxhash/actions/workflows/test.yml) xxhash is a Go implementation of the 64-bit [xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a @@ -64,4 +64,6 @@ $ go test -benchtime 10s -bench '/xxhash,direct,bytes' - [InfluxDB](https://github.com/influxdata/influxdb) - [Prometheus](https://github.com/prometheus/prometheus) +- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics) - [FreeCache](https://github.com/coocood/freecache) +- [FastCache](https://github.com/VictoriaMetrics/fastcache) diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash.go b/vendor/github.com/cespare/xxhash/v2/xxhash.go index db0b35fbe39..15c835d5417 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash.go @@ -193,7 +193,6 @@ func (d *Digest) UnmarshalBinary(b []byte) error { b, d.v4 = consumeUint64(b) b, d.total = consumeUint64(b) copy(d.mem[:], b) - b = b[len(d.mem):] d.n = int(d.total % uint64(len(d.mem))) return nil } diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s index d580e32aed4..be8db5bf796 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s @@ -6,7 +6,7 @@ // Register allocation: // AX h -// CX pointer to advance through b +// SI pointer to advance through b // DX n // BX loop end // R8 v1, k1 @@ -16,39 +16,39 @@ // R12 tmp // R13 prime1v // R14 prime2v -// R15 prime4v +// DI prime4v -// round reads from and advances the buffer pointer in CX. +// round reads from and advances the buffer pointer in SI. // It assumes that R13 has prime1v and R14 has prime2v. #define round(r) \ - MOVQ (CX), R12 \ - ADDQ $8, CX \ + MOVQ (SI), R12 \ + ADDQ $8, SI \ IMULQ R14, R12 \ ADDQ R12, r \ ROLQ $31, r \ IMULQ R13, r // mergeRound applies a merge round on the two registers acc and val. -// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v. +// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v. #define mergeRound(acc, val) \ IMULQ R14, val \ ROLQ $31, val \ IMULQ R13, val \ XORQ val, acc \ IMULQ R13, acc \ - ADDQ R15, acc + ADDQ DI, acc // func Sum64(b []byte) uint64 TEXT ·Sum64(SB), NOSPLIT, $0-32 // Load fixed primes. MOVQ ·prime1v(SB), R13 MOVQ ·prime2v(SB), R14 - MOVQ ·prime4v(SB), R15 + MOVQ ·prime4v(SB), DI // Load slice. - MOVQ b_base+0(FP), CX + MOVQ b_base+0(FP), SI MOVQ b_len+8(FP), DX - LEAQ (CX)(DX*1), BX + LEAQ (SI)(DX*1), BX // The first loop limit will be len(b)-32. SUBQ $32, BX @@ -65,14 +65,14 @@ TEXT ·Sum64(SB), NOSPLIT, $0-32 XORQ R11, R11 SUBQ R13, R11 - // Loop until CX > BX. + // Loop until SI > BX. blockLoop: round(R8) round(R9) round(R10) round(R11) - CMPQ CX, BX + CMPQ SI, BX JLE blockLoop MOVQ R8, AX @@ -100,16 +100,16 @@ noBlocks: afterBlocks: ADDQ DX, AX - // Right now BX has len(b)-32, and we want to loop until CX > len(b)-8. + // Right now BX has len(b)-32, and we want to loop until SI > len(b)-8. ADDQ $24, BX - CMPQ CX, BX + CMPQ SI, BX JG fourByte wordLoop: // Calculate k1. - MOVQ (CX), R8 - ADDQ $8, CX + MOVQ (SI), R8 + ADDQ $8, SI IMULQ R14, R8 ROLQ $31, R8 IMULQ R13, R8 @@ -117,18 +117,18 @@ wordLoop: XORQ R8, AX ROLQ $27, AX IMULQ R13, AX - ADDQ R15, AX + ADDQ DI, AX - CMPQ CX, BX + CMPQ SI, BX JLE wordLoop fourByte: ADDQ $4, BX - CMPQ CX, BX + CMPQ SI, BX JG singles - MOVL (CX), R8 - ADDQ $4, CX + MOVL (SI), R8 + ADDQ $4, SI IMULQ R13, R8 XORQ R8, AX @@ -138,19 +138,19 @@ fourByte: singles: ADDQ $4, BX - CMPQ CX, BX + CMPQ SI, BX JGE finalize singlesLoop: - MOVBQZX (CX), R12 - ADDQ $1, CX + MOVBQZX (SI), R12 + ADDQ $1, SI IMULQ ·prime5v(SB), R12 XORQ R12, AX ROLQ $11, AX IMULQ R13, AX - CMPQ CX, BX + CMPQ SI, BX JL singlesLoop finalize: @@ -179,9 +179,9 @@ TEXT ·writeBlocks(SB), NOSPLIT, $0-40 MOVQ ·prime2v(SB), R14 // Load slice. - MOVQ b_base+8(FP), CX + MOVQ b_base+8(FP), SI MOVQ b_len+16(FP), DX - LEAQ (CX)(DX*1), BX + LEAQ (SI)(DX*1), BX SUBQ $32, BX // Load vN from d. @@ -199,7 +199,7 @@ blockLoop: round(R10) round(R11) - CMPQ CX, BX + CMPQ SI, BX JLE blockLoop // Copy vN back to d. @@ -208,8 +208,8 @@ blockLoop: MOVQ R10, 16(AX) MOVQ R11, 24(AX) - // The number of bytes written is CX minus the old base pointer. - SUBQ b_base+8(FP), CX - MOVQ CX, ret+32(FP) + // The number of bytes written is SI minus the old base pointer. + SUBQ b_base+8(FP), SI + MOVQ SI, ret+32(FP) RET diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go index 53bf76efbc2..376e0ca2e49 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go @@ -6,41 +6,52 @@ package xxhash import ( - "reflect" "unsafe" ) -// Notes: +// In the future it's possible that compiler optimizations will make these +// XxxString functions unnecessary by realizing that calls such as +// Sum64([]byte(s)) don't need to copy s. See https://golang.org/issue/2205. +// If that happens, even if we keep these functions they can be replaced with +// the trivial safe code. + +// NOTE: The usual way of doing an unsafe string-to-[]byte conversion is: // -// See https://groups.google.com/d/msg/golang-nuts/dcjzJy-bSpw/tcZYBzQqAQAJ -// for some discussion about these unsafe conversions. +// var b []byte +// bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) +// bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data +// bh.Len = len(s) +// bh.Cap = len(s) // -// In the future it's possible that compiler optimizations will make these -// unsafe operations unnecessary: https://golang.org/issue/2205. +// Unfortunately, as of Go 1.15.3 the inliner's cost model assigns a high enough +// weight to this sequence of expressions that any function that uses it will +// not be inlined. Instead, the functions below use a different unsafe +// conversion designed to minimize the inliner weight and allow both to be +// inlined. There is also a test (TestInlining) which verifies that these are +// inlined. // -// Both of these wrapper functions still incur function call overhead since they -// will not be inlined. We could write Go/asm copies of Sum64 and Digest.Write -// for strings to squeeze out a bit more speed. Mid-stack inlining should -// eventually fix this. +// See https://github.com/golang/go/issues/42739 for discussion. // Sum64String computes the 64-bit xxHash digest of s. // It may be faster than Sum64([]byte(s)) by avoiding a copy. func Sum64String(s string) uint64 { - var b []byte - bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data - bh.Len = len(s) - bh.Cap = len(s) + b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)})) return Sum64(b) } // WriteString adds more data to d. It always returns len(s), nil. // It may be faster than Write([]byte(s)) by avoiding a copy. func (d *Digest) WriteString(s string) (n int, err error) { - var b []byte - bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data - bh.Len = len(s) - bh.Cap = len(s) - return d.Write(b) + d.Write(*(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)}))) + // d.Write always returns len(s), nil. + // Ignoring the return output and returning these fixed values buys a + // savings of 6 in the inliner's cost model. + return len(s), nil +} + +// sliceHeader is similar to reflect.SliceHeader, but it assumes that the layout +// of the first two words is the same as the layout of a string. +type sliceHeader struct { + s string + cap int } diff --git a/vendor/github.com/chzyer/readline/.gitignore b/vendor/github.com/chzyer/readline/.gitignore new file mode 100644 index 00000000000..a3062beae38 --- /dev/null +++ b/vendor/github.com/chzyer/readline/.gitignore @@ -0,0 +1 @@ +.vscode/* diff --git a/vendor/github.com/chzyer/readline/.travis.yml b/vendor/github.com/chzyer/readline/.travis.yml new file mode 100644 index 00000000000..9c359554320 --- /dev/null +++ b/vendor/github.com/chzyer/readline/.travis.yml @@ -0,0 +1,8 @@ +language: go +go: + - 1.x +script: + - GOOS=windows go install github.com/chzyer/readline/example/... + - GOOS=linux go install github.com/chzyer/readline/example/... + - GOOS=darwin go install github.com/chzyer/readline/example/... + - go test -race -v diff --git a/vendor/github.com/chzyer/readline/CHANGELOG.md b/vendor/github.com/chzyer/readline/CHANGELOG.md new file mode 100644 index 00000000000..14ff5be1313 --- /dev/null +++ b/vendor/github.com/chzyer/readline/CHANGELOG.md @@ -0,0 +1,58 @@ +# ChangeLog + +### 1.4 - 2016-07-25 + +* [#60][60] Support dynamic autocompletion +* Fix ANSI parser on Windows +* Fix wrong column width in complete mode on Windows +* Remove dependent package "golang.org/x/crypto/ssh/terminal" + +### 1.3 - 2016-05-09 + +* [#38][38] add SetChildren for prefix completer interface +* [#42][42] improve multiple lines compatibility +* [#43][43] remove sub-package(runes) for gopkg compatibility +* [#46][46] Auto complete with space prefixed line +* [#48][48] support suspend process (ctrl+Z) +* [#49][49] fix bug that check equals with previous command +* [#53][53] Fix bug which causes integer divide by zero panicking when input buffer is empty + +### 1.2 - 2016-03-05 + +* Add a demo for checking password strength [example/readline-pass-strength](https://github.com/chzyer/readline/blob/master/example/readline-pass-strength/readline-pass-strength.go), , written by [@sahib](https://github.com/sahib) +* [#23][23], support stdin remapping +* [#27][27], add a `UniqueEditLine` to `Config`, which will erase the editing line after user submited it, usually use in IM. +* Add a demo for multiline [example/readline-multiline](https://github.com/chzyer/readline/blob/master/example/readline-multiline/readline-multiline.go) which can submit one SQL by multiple lines. +* Supports performs even stdin/stdout is not a tty. +* Add a new simple apis for single instance, check by [here](https://github.com/chzyer/readline/blob/master/std.go). It need to save history manually if using this api. +* [#28][28], fixes the history is not working as expected. +* [#33][33], vim mode now support `c`, `d`, `x (delete character)`, `r (replace character)` + +### 1.1 - 2015-11-20 + +* [#12][12] Add support for key ``/``/`` +* Only enter raw mode as needed (calling `Readline()`), program will receive signal(e.g. Ctrl+C) if not interact with `readline`. +* Bugs fixed for `PrefixCompleter` +* Press `Ctrl+D` in empty line will cause `io.EOF` in error, Press `Ctrl+C` in anytime will cause `ErrInterrupt` instead of `io.EOF`, this will privodes a shell-like user experience. +* Customable Interrupt/EOF prompt in `Config` +* [#17][17] Change atomic package to use 32bit function to let it runnable on arm 32bit devices +* Provides a new password user experience(`readline.ReadPasswordEx()`). + +### 1.0 - 2015-10-14 + +* Initial public release. + +[12]: https://github.com/chzyer/readline/pull/12 +[17]: https://github.com/chzyer/readline/pull/17 +[23]: https://github.com/chzyer/readline/pull/23 +[27]: https://github.com/chzyer/readline/pull/27 +[28]: https://github.com/chzyer/readline/pull/28 +[33]: https://github.com/chzyer/readline/pull/33 +[38]: https://github.com/chzyer/readline/pull/38 +[42]: https://github.com/chzyer/readline/pull/42 +[43]: https://github.com/chzyer/readline/pull/43 +[46]: https://github.com/chzyer/readline/pull/46 +[48]: https://github.com/chzyer/readline/pull/48 +[49]: https://github.com/chzyer/readline/pull/49 +[53]: https://github.com/chzyer/readline/pull/53 +[60]: https://github.com/chzyer/readline/pull/60 diff --git a/vendor/github.com/chzyer/readline/LICENSE b/vendor/github.com/chzyer/readline/LICENSE new file mode 100644 index 00000000000..c9afab3dcd0 --- /dev/null +++ b/vendor/github.com/chzyer/readline/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Chzyer + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/chzyer/readline/README.md b/vendor/github.com/chzyer/readline/README.md new file mode 100644 index 00000000000..fab974b7f34 --- /dev/null +++ b/vendor/github.com/chzyer/readline/README.md @@ -0,0 +1,114 @@ +[![Build Status](https://travis-ci.org/chzyer/readline.svg?branch=master)](https://travis-ci.org/chzyer/readline) +[![Software License](https://img.shields.io/badge/license-MIT-brightgreen.svg)](LICENSE.md) +[![Version](https://img.shields.io/github/tag/chzyer/readline.svg)](https://github.com/chzyer/readline/releases) +[![GoDoc](https://godoc.org/github.com/chzyer/readline?status.svg)](https://godoc.org/github.com/chzyer/readline) +[![OpenCollective](https://opencollective.com/readline/badge/backers.svg)](#backers) +[![OpenCollective](https://opencollective.com/readline/badge/sponsors.svg)](#sponsors) + +

+ + + +

+ +A powerful readline library in `Linux` `macOS` `Windows` `Solaris` + +## Guide + +* [Demo](example/readline-demo/readline-demo.go) +* [Shortcut](doc/shortcut.md) + +## Repos using readline + +[![cockroachdb](https://img.shields.io/github/stars/cockroachdb/cockroach.svg?label=cockroachdb/cockroach)](https://github.com/cockroachdb/cockroach) +[![robertkrimen/otto](https://img.shields.io/github/stars/robertkrimen/otto.svg?label=robertkrimen/otto)](https://github.com/robertkrimen/otto) +[![empire](https://img.shields.io/github/stars/remind101/empire.svg?label=remind101/empire)](https://github.com/remind101/empire) +[![mehrdadrad/mylg](https://img.shields.io/github/stars/mehrdadrad/mylg.svg?label=mehrdadrad/mylg)](https://github.com/mehrdadrad/mylg) +[![knq/usql](https://img.shields.io/github/stars/knq/usql.svg?label=knq/usql)](https://github.com/knq/usql) +[![youtube/doorman](https://img.shields.io/github/stars/youtube/doorman.svg?label=youtube/doorman)](https://github.com/youtube/doorman) +[![bom-d-van/harp](https://img.shields.io/github/stars/bom-d-van/harp.svg?label=bom-d-van/harp)](https://github.com/bom-d-van/harp) +[![abiosoft/ishell](https://img.shields.io/github/stars/abiosoft/ishell.svg?label=abiosoft/ishell)](https://github.com/abiosoft/ishell) +[![Netflix/hal-9001](https://img.shields.io/github/stars/Netflix/hal-9001.svg?label=Netflix/hal-9001)](https://github.com/Netflix/hal-9001) +[![docker/go-p9p](https://img.shields.io/github/stars/docker/go-p9p.svg?label=docker/go-p9p)](https://github.com/docker/go-p9p) + + +## Feedback + +If you have any questions, please submit a github issue and any pull requests is welcomed :) + +* [https://twitter.com/chzyer](https://twitter.com/chzyer) +* [http://weibo.com/2145262190](http://weibo.com/2145262190) + + +## Backers + +Love Readline? Help me keep it alive by donating funds to cover project expenses!
+[[Become a backer](https://opencollective.com/readline#backer)] + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +## Sponsors + +Become a sponsor and get your logo here on our Github page. [[Become a sponsor](https://opencollective.com/readline#sponsor)] + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/vendor/github.com/chzyer/readline/ansi_windows.go b/vendor/github.com/chzyer/readline/ansi_windows.go new file mode 100644 index 00000000000..63b908c187a --- /dev/null +++ b/vendor/github.com/chzyer/readline/ansi_windows.go @@ -0,0 +1,249 @@ +// +build windows + +package readline + +import ( + "bufio" + "io" + "strconv" + "strings" + "sync" + "unicode/utf8" + "unsafe" +) + +const ( + _ = uint16(0) + COLOR_FBLUE = 0x0001 + COLOR_FGREEN = 0x0002 + COLOR_FRED = 0x0004 + COLOR_FINTENSITY = 0x0008 + + COLOR_BBLUE = 0x0010 + COLOR_BGREEN = 0x0020 + COLOR_BRED = 0x0040 + COLOR_BINTENSITY = 0x0080 + + COMMON_LVB_UNDERSCORE = 0x8000 + COMMON_LVB_BOLD = 0x0007 +) + +var ColorTableFg = []word{ + 0, // 30: Black + COLOR_FRED, // 31: Red + COLOR_FGREEN, // 32: Green + COLOR_FRED | COLOR_FGREEN, // 33: Yellow + COLOR_FBLUE, // 34: Blue + COLOR_FRED | COLOR_FBLUE, // 35: Magenta + COLOR_FGREEN | COLOR_FBLUE, // 36: Cyan + COLOR_FRED | COLOR_FBLUE | COLOR_FGREEN, // 37: White +} + +var ColorTableBg = []word{ + 0, // 40: Black + COLOR_BRED, // 41: Red + COLOR_BGREEN, // 42: Green + COLOR_BRED | COLOR_BGREEN, // 43: Yellow + COLOR_BBLUE, // 44: Blue + COLOR_BRED | COLOR_BBLUE, // 45: Magenta + COLOR_BGREEN | COLOR_BBLUE, // 46: Cyan + COLOR_BRED | COLOR_BBLUE | COLOR_BGREEN, // 47: White +} + +type ANSIWriter struct { + target io.Writer + wg sync.WaitGroup + ctx *ANSIWriterCtx + sync.Mutex +} + +func NewANSIWriter(w io.Writer) *ANSIWriter { + a := &ANSIWriter{ + target: w, + ctx: NewANSIWriterCtx(w), + } + return a +} + +func (a *ANSIWriter) Close() error { + a.wg.Wait() + return nil +} + +type ANSIWriterCtx struct { + isEsc bool + isEscSeq bool + arg []string + target *bufio.Writer + wantFlush bool +} + +func NewANSIWriterCtx(target io.Writer) *ANSIWriterCtx { + return &ANSIWriterCtx{ + target: bufio.NewWriter(target), + } +} + +func (a *ANSIWriterCtx) Flush() { + a.target.Flush() +} + +func (a *ANSIWriterCtx) process(r rune) bool { + if a.wantFlush { + if r == 0 || r == CharEsc { + a.wantFlush = false + a.target.Flush() + } + } + if a.isEscSeq { + a.isEscSeq = a.ioloopEscSeq(a.target, r, &a.arg) + return true + } + + switch r { + case CharEsc: + a.isEsc = true + case '[': + if a.isEsc { + a.arg = nil + a.isEscSeq = true + a.isEsc = false + break + } + fallthrough + default: + a.target.WriteRune(r) + a.wantFlush = true + } + return true +} + +func (a *ANSIWriterCtx) ioloopEscSeq(w *bufio.Writer, r rune, argptr *[]string) bool { + arg := *argptr + var err error + + if r >= 'A' && r <= 'D' { + count := short(GetInt(arg, 1)) + info, err := GetConsoleScreenBufferInfo() + if err != nil { + return false + } + switch r { + case 'A': // up + info.dwCursorPosition.y -= count + case 'B': // down + info.dwCursorPosition.y += count + case 'C': // right + info.dwCursorPosition.x += count + case 'D': // left + info.dwCursorPosition.x -= count + } + SetConsoleCursorPosition(&info.dwCursorPosition) + return false + } + + switch r { + case 'J': + killLines() + case 'K': + eraseLine() + case 'm': + color := word(0) + for _, item := range arg { + var c int + c, err = strconv.Atoi(item) + if err != nil { + w.WriteString("[" + strings.Join(arg, ";") + "m") + break + } + if c >= 30 && c < 40 { + color ^= COLOR_FINTENSITY + color |= ColorTableFg[c-30] + } else if c >= 40 && c < 50 { + color ^= COLOR_BINTENSITY + color |= ColorTableBg[c-40] + } else if c == 4 { + color |= COMMON_LVB_UNDERSCORE | ColorTableFg[7] + } else if c == 1 { + color |= COMMON_LVB_BOLD | COLOR_FINTENSITY + } else { // unknown code treat as reset + color = ColorTableFg[7] + } + } + if err != nil { + break + } + kernel.SetConsoleTextAttribute(stdout, uintptr(color)) + case '\007': // set title + case ';': + if len(arg) == 0 || arg[len(arg)-1] != "" { + arg = append(arg, "") + *argptr = arg + } + return true + default: + if len(arg) == 0 { + arg = append(arg, "") + } + arg[len(arg)-1] += string(r) + *argptr = arg + return true + } + *argptr = nil + return false +} + +func (a *ANSIWriter) Write(b []byte) (int, error) { + a.Lock() + defer a.Unlock() + + off := 0 + for len(b) > off { + r, size := utf8.DecodeRune(b[off:]) + if size == 0 { + return off, io.ErrShortWrite + } + off += size + a.ctx.process(r) + } + a.ctx.Flush() + return off, nil +} + +func killLines() error { + sbi, err := GetConsoleScreenBufferInfo() + if err != nil { + return err + } + + size := (sbi.dwCursorPosition.y - sbi.dwSize.y) * sbi.dwSize.x + size += sbi.dwCursorPosition.x + + var written int + kernel.FillConsoleOutputAttribute(stdout, uintptr(ColorTableFg[7]), + uintptr(size), + sbi.dwCursorPosition.ptr(), + uintptr(unsafe.Pointer(&written)), + ) + return kernel.FillConsoleOutputCharacterW(stdout, uintptr(' '), + uintptr(size), + sbi.dwCursorPosition.ptr(), + uintptr(unsafe.Pointer(&written)), + ) +} + +func eraseLine() error { + sbi, err := GetConsoleScreenBufferInfo() + if err != nil { + return err + } + + size := sbi.dwSize.x + sbi.dwCursorPosition.x = 0 + var written int + return kernel.FillConsoleOutputCharacterW(stdout, uintptr(' '), + uintptr(size), + sbi.dwCursorPosition.ptr(), + uintptr(unsafe.Pointer(&written)), + ) +} diff --git a/vendor/github.com/chzyer/readline/complete.go b/vendor/github.com/chzyer/readline/complete.go new file mode 100644 index 00000000000..c08c994141e --- /dev/null +++ b/vendor/github.com/chzyer/readline/complete.go @@ -0,0 +1,285 @@ +package readline + +import ( + "bufio" + "bytes" + "fmt" + "io" +) + +type AutoCompleter interface { + // Readline will pass the whole line and current offset to it + // Completer need to pass all the candidates, and how long they shared the same characters in line + // Example: + // [go, git, git-shell, grep] + // Do("g", 1) => ["o", "it", "it-shell", "rep"], 1 + // Do("gi", 2) => ["t", "t-shell"], 2 + // Do("git", 3) => ["", "-shell"], 3 + Do(line []rune, pos int) (newLine [][]rune, length int) +} + +type TabCompleter struct{} + +func (t *TabCompleter) Do([]rune, int) ([][]rune, int) { + return [][]rune{[]rune("\t")}, 0 +} + +type opCompleter struct { + w io.Writer + op *Operation + width int + + inCompleteMode bool + inSelectMode bool + candidate [][]rune + candidateSource []rune + candidateOff int + candidateChoise int + candidateColNum int +} + +func newOpCompleter(w io.Writer, op *Operation, width int) *opCompleter { + return &opCompleter{ + w: w, + op: op, + width: width, + } +} + +func (o *opCompleter) doSelect() { + if len(o.candidate) == 1 { + o.op.buf.WriteRunes(o.candidate[0]) + o.ExitCompleteMode(false) + return + } + o.nextCandidate(1) + o.CompleteRefresh() +} + +func (o *opCompleter) nextCandidate(i int) { + o.candidateChoise += i + o.candidateChoise = o.candidateChoise % len(o.candidate) + if o.candidateChoise < 0 { + o.candidateChoise = len(o.candidate) + o.candidateChoise + } +} + +func (o *opCompleter) OnComplete() bool { + if o.width == 0 { + return false + } + if o.IsInCompleteSelectMode() { + o.doSelect() + return true + } + + buf := o.op.buf + rs := buf.Runes() + + if o.IsInCompleteMode() && o.candidateSource != nil && runes.Equal(rs, o.candidateSource) { + o.EnterCompleteSelectMode() + o.doSelect() + return true + } + + o.ExitCompleteSelectMode() + o.candidateSource = rs + newLines, offset := o.op.cfg.AutoComplete.Do(rs, buf.idx) + if len(newLines) == 0 { + o.ExitCompleteMode(false) + return true + } + + // only Aggregate candidates in non-complete mode + if !o.IsInCompleteMode() { + if len(newLines) == 1 { + buf.WriteRunes(newLines[0]) + o.ExitCompleteMode(false) + return true + } + + same, size := runes.Aggregate(newLines) + if size > 0 { + buf.WriteRunes(same) + o.ExitCompleteMode(false) + return true + } + } + + o.EnterCompleteMode(offset, newLines) + return true +} + +func (o *opCompleter) IsInCompleteSelectMode() bool { + return o.inSelectMode +} + +func (o *opCompleter) IsInCompleteMode() bool { + return o.inCompleteMode +} + +func (o *opCompleter) HandleCompleteSelect(r rune) bool { + next := true + switch r { + case CharEnter, CharCtrlJ: + next = false + o.op.buf.WriteRunes(o.op.candidate[o.op.candidateChoise]) + o.ExitCompleteMode(false) + case CharLineStart: + num := o.candidateChoise % o.candidateColNum + o.nextCandidate(-num) + case CharLineEnd: + num := o.candidateColNum - o.candidateChoise%o.candidateColNum - 1 + o.candidateChoise += num + if o.candidateChoise >= len(o.candidate) { + o.candidateChoise = len(o.candidate) - 1 + } + case CharBackspace: + o.ExitCompleteSelectMode() + next = false + case CharTab, CharForward: + o.doSelect() + case CharBell, CharInterrupt: + o.ExitCompleteMode(true) + next = false + case CharNext: + tmpChoise := o.candidateChoise + o.candidateColNum + if tmpChoise >= o.getMatrixSize() { + tmpChoise -= o.getMatrixSize() + } else if tmpChoise >= len(o.candidate) { + tmpChoise += o.candidateColNum + tmpChoise -= o.getMatrixSize() + } + o.candidateChoise = tmpChoise + case CharBackward: + o.nextCandidate(-1) + case CharPrev: + tmpChoise := o.candidateChoise - o.candidateColNum + if tmpChoise < 0 { + tmpChoise += o.getMatrixSize() + if tmpChoise >= len(o.candidate) { + tmpChoise -= o.candidateColNum + } + } + o.candidateChoise = tmpChoise + default: + next = false + o.ExitCompleteSelectMode() + } + if next { + o.CompleteRefresh() + return true + } + return false +} + +func (o *opCompleter) getMatrixSize() int { + line := len(o.candidate) / o.candidateColNum + if len(o.candidate)%o.candidateColNum != 0 { + line++ + } + return line * o.candidateColNum +} + +func (o *opCompleter) OnWidthChange(newWidth int) { + o.width = newWidth +} + +func (o *opCompleter) CompleteRefresh() { + if !o.inCompleteMode { + return + } + lineCnt := o.op.buf.CursorLineCount() + colWidth := 0 + for _, c := range o.candidate { + w := runes.WidthAll(c) + if w > colWidth { + colWidth = w + } + } + colWidth += o.candidateOff + 1 + same := o.op.buf.RuneSlice(-o.candidateOff) + + // -1 to avoid reach the end of line + width := o.width - 1 + colNum := width / colWidth + if colNum != 0 { + colWidth += (width - (colWidth * colNum)) / colNum + } + + o.candidateColNum = colNum + buf := bufio.NewWriter(o.w) + buf.Write(bytes.Repeat([]byte("\n"), lineCnt)) + + colIdx := 0 + lines := 1 + buf.WriteString("\033[J") + for idx, c := range o.candidate { + inSelect := idx == o.candidateChoise && o.IsInCompleteSelectMode() + if inSelect { + buf.WriteString("\033[30;47m") + } + buf.WriteString(string(same)) + buf.WriteString(string(c)) + buf.Write(bytes.Repeat([]byte(" "), colWidth-runes.WidthAll(c)-runes.WidthAll(same))) + + if inSelect { + buf.WriteString("\033[0m") + } + + colIdx++ + if colIdx == colNum { + buf.WriteString("\n") + lines++ + colIdx = 0 + } + } + + // move back + fmt.Fprintf(buf, "\033[%dA\r", lineCnt-1+lines) + fmt.Fprintf(buf, "\033[%dC", o.op.buf.idx+o.op.buf.PromptLen()) + buf.Flush() +} + +func (o *opCompleter) aggCandidate(candidate [][]rune) int { + offset := 0 + for i := 0; i < len(candidate[0]); i++ { + for j := 0; j < len(candidate)-1; j++ { + if i > len(candidate[j]) { + goto aggregate + } + if candidate[j][i] != candidate[j+1][i] { + goto aggregate + } + } + offset = i + } +aggregate: + return offset +} + +func (o *opCompleter) EnterCompleteSelectMode() { + o.inSelectMode = true + o.candidateChoise = -1 + o.CompleteRefresh() +} + +func (o *opCompleter) EnterCompleteMode(offset int, candidate [][]rune) { + o.inCompleteMode = true + o.candidate = candidate + o.candidateOff = offset + o.CompleteRefresh() +} + +func (o *opCompleter) ExitCompleteSelectMode() { + o.inSelectMode = false + o.candidate = nil + o.candidateChoise = -1 + o.candidateOff = -1 + o.candidateSource = nil +} + +func (o *opCompleter) ExitCompleteMode(revent bool) { + o.inCompleteMode = false + o.ExitCompleteSelectMode() +} diff --git a/vendor/github.com/chzyer/readline/complete_helper.go b/vendor/github.com/chzyer/readline/complete_helper.go new file mode 100644 index 00000000000..58d724872bf --- /dev/null +++ b/vendor/github.com/chzyer/readline/complete_helper.go @@ -0,0 +1,165 @@ +package readline + +import ( + "bytes" + "strings" +) + +// Caller type for dynamic completion +type DynamicCompleteFunc func(string) []string + +type PrefixCompleterInterface interface { + Print(prefix string, level int, buf *bytes.Buffer) + Do(line []rune, pos int) (newLine [][]rune, length int) + GetName() []rune + GetChildren() []PrefixCompleterInterface + SetChildren(children []PrefixCompleterInterface) +} + +type DynamicPrefixCompleterInterface interface { + PrefixCompleterInterface + IsDynamic() bool + GetDynamicNames(line []rune) [][]rune +} + +type PrefixCompleter struct { + Name []rune + Dynamic bool + Callback DynamicCompleteFunc + Children []PrefixCompleterInterface +} + +func (p *PrefixCompleter) Tree(prefix string) string { + buf := bytes.NewBuffer(nil) + p.Print(prefix, 0, buf) + return buf.String() +} + +func Print(p PrefixCompleterInterface, prefix string, level int, buf *bytes.Buffer) { + if strings.TrimSpace(string(p.GetName())) != "" { + buf.WriteString(prefix) + if level > 0 { + buf.WriteString("├") + buf.WriteString(strings.Repeat("─", (level*4)-2)) + buf.WriteString(" ") + } + buf.WriteString(string(p.GetName()) + "\n") + level++ + } + for _, ch := range p.GetChildren() { + ch.Print(prefix, level, buf) + } +} + +func (p *PrefixCompleter) Print(prefix string, level int, buf *bytes.Buffer) { + Print(p, prefix, level, buf) +} + +func (p *PrefixCompleter) IsDynamic() bool { + return p.Dynamic +} + +func (p *PrefixCompleter) GetName() []rune { + return p.Name +} + +func (p *PrefixCompleter) GetDynamicNames(line []rune) [][]rune { + var names = [][]rune{} + for _, name := range p.Callback(string(line)) { + names = append(names, []rune(name+" ")) + } + return names +} + +func (p *PrefixCompleter) GetChildren() []PrefixCompleterInterface { + return p.Children +} + +func (p *PrefixCompleter) SetChildren(children []PrefixCompleterInterface) { + p.Children = children +} + +func NewPrefixCompleter(pc ...PrefixCompleterInterface) *PrefixCompleter { + return PcItem("", pc...) +} + +func PcItem(name string, pc ...PrefixCompleterInterface) *PrefixCompleter { + name += " " + return &PrefixCompleter{ + Name: []rune(name), + Dynamic: false, + Children: pc, + } +} + +func PcItemDynamic(callback DynamicCompleteFunc, pc ...PrefixCompleterInterface) *PrefixCompleter { + return &PrefixCompleter{ + Callback: callback, + Dynamic: true, + Children: pc, + } +} + +func (p *PrefixCompleter) Do(line []rune, pos int) (newLine [][]rune, offset int) { + return doInternal(p, line, pos, line) +} + +func Do(p PrefixCompleterInterface, line []rune, pos int) (newLine [][]rune, offset int) { + return doInternal(p, line, pos, line) +} + +func doInternal(p PrefixCompleterInterface, line []rune, pos int, origLine []rune) (newLine [][]rune, offset int) { + line = runes.TrimSpaceLeft(line[:pos]) + goNext := false + var lineCompleter PrefixCompleterInterface + for _, child := range p.GetChildren() { + childNames := make([][]rune, 1) + + childDynamic, ok := child.(DynamicPrefixCompleterInterface) + if ok && childDynamic.IsDynamic() { + childNames = childDynamic.GetDynamicNames(origLine) + } else { + childNames[0] = child.GetName() + } + + for _, childName := range childNames { + if len(line) >= len(childName) { + if runes.HasPrefix(line, childName) { + if len(line) == len(childName) { + newLine = append(newLine, []rune{' '}) + } else { + newLine = append(newLine, childName) + } + offset = len(childName) + lineCompleter = child + goNext = true + } + } else { + if runes.HasPrefix(childName, line) { + newLine = append(newLine, childName[len(line):]) + offset = len(line) + lineCompleter = child + } + } + } + } + + if len(newLine) != 1 { + return + } + + tmpLine := make([]rune, 0, len(line)) + for i := offset; i < len(line); i++ { + if line[i] == ' ' { + continue + } + + tmpLine = append(tmpLine, line[i:]...) + return doInternal(lineCompleter, tmpLine, len(tmpLine), origLine) + } + + if goNext { + return doInternal(lineCompleter, nil, 0, origLine) + } + return +} diff --git a/vendor/github.com/chzyer/readline/complete_segment.go b/vendor/github.com/chzyer/readline/complete_segment.go new file mode 100644 index 00000000000..5ceadd80f97 --- /dev/null +++ b/vendor/github.com/chzyer/readline/complete_segment.go @@ -0,0 +1,82 @@ +package readline + +type SegmentCompleter interface { + // a + // |- a1 + // |--- a11 + // |- a2 + // b + // input: + // DoTree([], 0) [a, b] + // DoTree([a], 1) [a] + // DoTree([a, ], 0) [a1, a2] + // DoTree([a, a], 1) [a1, a2] + // DoTree([a, a1], 2) [a1] + // DoTree([a, a1, ], 0) [a11] + // DoTree([a, a1, a], 1) [a11] + DoSegment([][]rune, int) [][]rune +} + +type dumpSegmentCompleter struct { + f func([][]rune, int) [][]rune +} + +func (d *dumpSegmentCompleter) DoSegment(segment [][]rune, n int) [][]rune { + return d.f(segment, n) +} + +func SegmentFunc(f func([][]rune, int) [][]rune) AutoCompleter { + return &SegmentComplete{&dumpSegmentCompleter{f}} +} + +func SegmentAutoComplete(completer SegmentCompleter) *SegmentComplete { + return &SegmentComplete{ + SegmentCompleter: completer, + } +} + +type SegmentComplete struct { + SegmentCompleter +} + +func RetSegment(segments [][]rune, cands [][]rune, idx int) ([][]rune, int) { + ret := make([][]rune, 0, len(cands)) + lastSegment := segments[len(segments)-1] + for _, cand := range cands { + if !runes.HasPrefix(cand, lastSegment) { + continue + } + ret = append(ret, cand[len(lastSegment):]) + } + return ret, idx +} + +func SplitSegment(line []rune, pos int) ([][]rune, int) { + segs := [][]rune{} + lastIdx := -1 + line = line[:pos] + pos = 0 + for idx, l := range line { + if l == ' ' { + pos = 0 + segs = append(segs, line[lastIdx+1:idx]) + lastIdx = idx + } else { + pos++ + } + } + segs = append(segs, line[lastIdx+1:]) + return segs, pos +} + +func (c *SegmentComplete) Do(line []rune, pos int) (newLine [][]rune, offset int) { + + segment, idx := SplitSegment(line, pos) + + cands := c.DoSegment(segment, idx) + newLine, offset = RetSegment(segment, cands, idx) + for idx := range newLine { + newLine[idx] = append(newLine[idx], ' ') + } + return newLine, offset +} diff --git a/vendor/github.com/chzyer/readline/history.go b/vendor/github.com/chzyer/readline/history.go new file mode 100644 index 00000000000..6b17c464baf --- /dev/null +++ b/vendor/github.com/chzyer/readline/history.go @@ -0,0 +1,330 @@ +package readline + +import ( + "bufio" + "container/list" + "fmt" + "os" + "strings" + "sync" +) + +type hisItem struct { + Source []rune + Version int64 + Tmp []rune +} + +func (h *hisItem) Clean() { + h.Source = nil + h.Tmp = nil +} + +type opHistory struct { + cfg *Config + history *list.List + historyVer int64 + current *list.Element + fd *os.File + fdLock sync.Mutex + enable bool +} + +func newOpHistory(cfg *Config) (o *opHistory) { + o = &opHistory{ + cfg: cfg, + history: list.New(), + enable: true, + } + return o +} + +func (o *opHistory) Reset() { + o.history = list.New() + o.current = nil +} + +func (o *opHistory) IsHistoryClosed() bool { + o.fdLock.Lock() + defer o.fdLock.Unlock() + return o.fd.Fd() == ^(uintptr(0)) +} + +func (o *opHistory) Init() { + if o.IsHistoryClosed() { + o.initHistory() + } +} + +func (o *opHistory) initHistory() { + if o.cfg.HistoryFile != "" { + o.historyUpdatePath(o.cfg.HistoryFile) + } +} + +// only called by newOpHistory +func (o *opHistory) historyUpdatePath(path string) { + o.fdLock.Lock() + defer o.fdLock.Unlock() + f, err := os.OpenFile(path, os.O_APPEND|os.O_CREATE|os.O_RDWR, 0666) + if err != nil { + return + } + o.fd = f + r := bufio.NewReader(o.fd) + total := 0 + for ; ; total++ { + line, err := r.ReadString('\n') + if err != nil { + break + } + // ignore the empty line + line = strings.TrimSpace(line) + if len(line) == 0 { + continue + } + o.Push([]rune(line)) + o.Compact() + } + if total > o.cfg.HistoryLimit { + o.rewriteLocked() + } + o.historyVer++ + o.Push(nil) + return +} + +func (o *opHistory) Compact() { + for o.history.Len() > o.cfg.HistoryLimit && o.history.Len() > 0 { + o.history.Remove(o.history.Front()) + } +} + +func (o *opHistory) Rewrite() { + o.fdLock.Lock() + defer o.fdLock.Unlock() + o.rewriteLocked() +} + +func (o *opHistory) rewriteLocked() { + if o.cfg.HistoryFile == "" { + return + } + + tmpFile := o.cfg.HistoryFile + ".tmp" + fd, err := os.OpenFile(tmpFile, os.O_CREATE|os.O_WRONLY|os.O_TRUNC|os.O_APPEND, 0666) + if err != nil { + return + } + + buf := bufio.NewWriter(fd) + for elem := o.history.Front(); elem != nil; elem = elem.Next() { + buf.WriteString(string(elem.Value.(*hisItem).Source) + "\n") + } + buf.Flush() + + // replace history file + if err = os.Rename(tmpFile, o.cfg.HistoryFile); err != nil { + fd.Close() + return + } + + if o.fd != nil { + o.fd.Close() + } + // fd is write only, just satisfy what we need. + o.fd = fd +} + +func (o *opHistory) Close() { + o.fdLock.Lock() + defer o.fdLock.Unlock() + if o.fd != nil { + o.fd.Close() + } +} + +func (o *opHistory) FindBck(isNewSearch bool, rs []rune, start int) (int, *list.Element) { + for elem := o.current; elem != nil; elem = elem.Prev() { + item := o.showItem(elem.Value) + if isNewSearch { + start += len(rs) + } + if elem == o.current { + if len(item) >= start { + item = item[:start] + } + } + idx := runes.IndexAllBckEx(item, rs, o.cfg.HistorySearchFold) + if idx < 0 { + continue + } + return idx, elem + } + return -1, nil +} + +func (o *opHistory) FindFwd(isNewSearch bool, rs []rune, start int) (int, *list.Element) { + for elem := o.current; elem != nil; elem = elem.Next() { + item := o.showItem(elem.Value) + if isNewSearch { + start -= len(rs) + if start < 0 { + start = 0 + } + } + if elem == o.current { + if len(item)-1 >= start { + item = item[start:] + } else { + continue + } + } + idx := runes.IndexAllEx(item, rs, o.cfg.HistorySearchFold) + if idx < 0 { + continue + } + if elem == o.current { + idx += start + } + return idx, elem + } + return -1, nil +} + +func (o *opHistory) showItem(obj interface{}) []rune { + item := obj.(*hisItem) + if item.Version == o.historyVer { + return item.Tmp + } + return item.Source +} + +func (o *opHistory) Prev() []rune { + if o.current == nil { + return nil + } + current := o.current.Prev() + if current == nil { + return nil + } + o.current = current + return runes.Copy(o.showItem(current.Value)) +} + +func (o *opHistory) Next() ([]rune, bool) { + if o.current == nil { + return nil, false + } + current := o.current.Next() + if current == nil { + return nil, false + } + + o.current = current + return runes.Copy(o.showItem(current.Value)), true +} + +// Disable the current history +func (o *opHistory) Disable() { + o.enable = false +} + +// Enable the current history +func (o *opHistory) Enable() { + o.enable = true +} + +func (o *opHistory) debug() { + Debug("-------") + for item := o.history.Front(); item != nil; item = item.Next() { + Debug(fmt.Sprintf("%+v", item.Value)) + } +} + +// save history +func (o *opHistory) New(current []rune) (err error) { + + // history deactivated + if !o.enable { + return nil + } + + current = runes.Copy(current) + + // if just use last command without modify + // just clean lastest history + if back := o.history.Back(); back != nil { + prev := back.Prev() + if prev != nil { + if runes.Equal(current, prev.Value.(*hisItem).Source) { + o.current = o.history.Back() + o.current.Value.(*hisItem).Clean() + o.historyVer++ + return nil + } + } + } + + if len(current) == 0 { + o.current = o.history.Back() + if o.current != nil { + o.current.Value.(*hisItem).Clean() + o.historyVer++ + return nil + } + } + + if o.current != o.history.Back() { + // move history item to current command + currentItem := o.current.Value.(*hisItem) + // set current to last item + o.current = o.history.Back() + + current = runes.Copy(currentItem.Tmp) + } + + // err only can be a IO error, just report + err = o.Update(current, true) + + // push a new one to commit current command + o.historyVer++ + o.Push(nil) + return +} + +func (o *opHistory) Revert() { + o.historyVer++ + o.current = o.history.Back() +} + +func (o *opHistory) Update(s []rune, commit bool) (err error) { + o.fdLock.Lock() + defer o.fdLock.Unlock() + s = runes.Copy(s) + if o.current == nil { + o.Push(s) + o.Compact() + return + } + r := o.current.Value.(*hisItem) + r.Version = o.historyVer + if commit { + r.Source = s + if o.fd != nil { + // just report the error + _, err = o.fd.Write([]byte(string(r.Source) + "\n")) + } + } else { + r.Tmp = append(r.Tmp[:0], s...) + } + o.current.Value = r + o.Compact() + return +} + +func (o *opHistory) Push(s []rune) { + s = runes.Copy(s) + elem := o.history.PushBack(&hisItem{Source: s}) + o.current = elem +} diff --git a/vendor/github.com/chzyer/readline/operation.go b/vendor/github.com/chzyer/readline/operation.go new file mode 100644 index 00000000000..4c31624f806 --- /dev/null +++ b/vendor/github.com/chzyer/readline/operation.go @@ -0,0 +1,531 @@ +package readline + +import ( + "errors" + "io" + "sync" +) + +var ( + ErrInterrupt = errors.New("Interrupt") +) + +type InterruptError struct { + Line []rune +} + +func (*InterruptError) Error() string { + return "Interrupted" +} + +type Operation struct { + m sync.Mutex + cfg *Config + t *Terminal + buf *RuneBuffer + outchan chan []rune + errchan chan error + w io.Writer + + history *opHistory + *opSearch + *opCompleter + *opPassword + *opVim +} + +func (o *Operation) SetBuffer(what string) { + o.buf.Set([]rune(what)) +} + +type wrapWriter struct { + r *Operation + t *Terminal + target io.Writer +} + +func (w *wrapWriter) Write(b []byte) (int, error) { + if !w.t.IsReading() { + return w.target.Write(b) + } + + var ( + n int + err error + ) + w.r.buf.Refresh(func() { + n, err = w.target.Write(b) + }) + + if w.r.IsSearchMode() { + w.r.SearchRefresh(-1) + } + if w.r.IsInCompleteMode() { + w.r.CompleteRefresh() + } + return n, err +} + +func NewOperation(t *Terminal, cfg *Config) *Operation { + width := cfg.FuncGetWidth() + op := &Operation{ + t: t, + buf: NewRuneBuffer(t, cfg.Prompt, cfg, width), + outchan: make(chan []rune), + errchan: make(chan error, 1), + } + op.w = op.buf.w + op.SetConfig(cfg) + op.opVim = newVimMode(op) + op.opCompleter = newOpCompleter(op.buf.w, op, width) + op.opPassword = newOpPassword(op) + op.cfg.FuncOnWidthChanged(func() { + newWidth := cfg.FuncGetWidth() + op.opCompleter.OnWidthChange(newWidth) + op.opSearch.OnWidthChange(newWidth) + op.buf.OnWidthChange(newWidth) + }) + go op.ioloop() + return op +} + +func (o *Operation) SetPrompt(s string) { + o.buf.SetPrompt(s) +} + +func (o *Operation) SetMaskRune(r rune) { + o.buf.SetMask(r) +} + +func (o *Operation) GetConfig() *Config { + o.m.Lock() + cfg := *o.cfg + o.m.Unlock() + return &cfg +} + +func (o *Operation) ioloop() { + for { + keepInSearchMode := false + keepInCompleteMode := false + r := o.t.ReadRune() + if o.GetConfig().FuncFilterInputRune != nil { + var process bool + r, process = o.GetConfig().FuncFilterInputRune(r) + if !process { + o.buf.Refresh(nil) // to refresh the line + continue // ignore this rune + } + } + + if r == 0 { // io.EOF + if o.buf.Len() == 0 { + o.buf.Clean() + select { + case o.errchan <- io.EOF: + } + break + } else { + // if stdin got io.EOF and there is something left in buffer, + // let's flush them by sending CharEnter. + // And we will got io.EOF int next loop. + r = CharEnter + } + } + isUpdateHistory := true + + if o.IsInCompleteSelectMode() { + keepInCompleteMode = o.HandleCompleteSelect(r) + if keepInCompleteMode { + continue + } + + o.buf.Refresh(nil) + switch r { + case CharEnter, CharCtrlJ: + o.history.Update(o.buf.Runes(), false) + fallthrough + case CharInterrupt: + o.t.KickRead() + fallthrough + case CharBell: + continue + } + } + + if o.IsEnableVimMode() { + r = o.HandleVim(r, o.t.ReadRune) + if r == 0 { + continue + } + } + + switch r { + case CharBell: + if o.IsSearchMode() { + o.ExitSearchMode(true) + o.buf.Refresh(nil) + } + if o.IsInCompleteMode() { + o.ExitCompleteMode(true) + o.buf.Refresh(nil) + } + case CharTab: + if o.GetConfig().AutoComplete == nil { + o.t.Bell() + break + } + if o.OnComplete() { + keepInCompleteMode = true + } else { + o.t.Bell() + break + } + + case CharBckSearch: + if !o.SearchMode(S_DIR_BCK) { + o.t.Bell() + break + } + keepInSearchMode = true + case CharCtrlU: + o.buf.KillFront() + case CharFwdSearch: + if !o.SearchMode(S_DIR_FWD) { + o.t.Bell() + break + } + keepInSearchMode = true + case CharKill: + o.buf.Kill() + keepInCompleteMode = true + case MetaForward: + o.buf.MoveToNextWord() + case CharTranspose: + o.buf.Transpose() + case MetaBackward: + o.buf.MoveToPrevWord() + case MetaDelete: + o.buf.DeleteWord() + case CharLineStart: + o.buf.MoveToLineStart() + case CharLineEnd: + o.buf.MoveToLineEnd() + case CharBackspace, CharCtrlH: + if o.IsSearchMode() { + o.SearchBackspace() + keepInSearchMode = true + break + } + + if o.buf.Len() == 0 { + o.t.Bell() + break + } + o.buf.Backspace() + if o.IsInCompleteMode() { + o.OnComplete() + } + case CharCtrlZ: + o.buf.Clean() + o.t.SleepToResume() + o.Refresh() + case CharCtrlL: + ClearScreen(o.w) + o.Refresh() + case MetaBackspace, CharCtrlW: + o.buf.BackEscapeWord() + case CharCtrlY: + o.buf.Yank() + case CharEnter, CharCtrlJ: + if o.IsSearchMode() { + o.ExitSearchMode(false) + } + o.buf.MoveToLineEnd() + var data []rune + if !o.GetConfig().UniqueEditLine { + o.buf.WriteRune('\n') + data = o.buf.Reset() + data = data[:len(data)-1] // trim \n + } else { + o.buf.Clean() + data = o.buf.Reset() + } + o.outchan <- data + if !o.GetConfig().DisableAutoSaveHistory { + // ignore IO error + _ = o.history.New(data) + } else { + isUpdateHistory = false + } + case CharBackward: + o.buf.MoveBackward() + case CharForward: + o.buf.MoveForward() + case CharPrev: + buf := o.history.Prev() + if buf != nil { + o.buf.Set(buf) + } else { + o.t.Bell() + } + case CharNext: + buf, ok := o.history.Next() + if ok { + o.buf.Set(buf) + } else { + o.t.Bell() + } + case CharDelete: + if o.buf.Len() > 0 || !o.IsNormalMode() { + o.t.KickRead() + if !o.buf.Delete() { + o.t.Bell() + } + break + } + + // treat as EOF + if !o.GetConfig().UniqueEditLine { + o.buf.WriteString(o.GetConfig().EOFPrompt + "\n") + } + o.buf.Reset() + isUpdateHistory = false + o.history.Revert() + o.errchan <- io.EOF + if o.GetConfig().UniqueEditLine { + o.buf.Clean() + } + case CharInterrupt: + if o.IsSearchMode() { + o.t.KickRead() + o.ExitSearchMode(true) + break + } + if o.IsInCompleteMode() { + o.t.KickRead() + o.ExitCompleteMode(true) + o.buf.Refresh(nil) + break + } + o.buf.MoveToLineEnd() + o.buf.Refresh(nil) + hint := o.GetConfig().InterruptPrompt + "\n" + if !o.GetConfig().UniqueEditLine { + o.buf.WriteString(hint) + } + remain := o.buf.Reset() + if !o.GetConfig().UniqueEditLine { + remain = remain[:len(remain)-len([]rune(hint))] + } + isUpdateHistory = false + o.history.Revert() + o.errchan <- &InterruptError{remain} + default: + if o.IsSearchMode() { + o.SearchChar(r) + keepInSearchMode = true + break + } + o.buf.WriteRune(r) + if o.IsInCompleteMode() { + o.OnComplete() + keepInCompleteMode = true + } + } + + listener := o.GetConfig().Listener + if listener != nil { + newLine, newPos, ok := listener.OnChange(o.buf.Runes(), o.buf.Pos(), r) + if ok { + o.buf.SetWithIdx(newPos, newLine) + } + } + + o.m.Lock() + if !keepInSearchMode && o.IsSearchMode() { + o.ExitSearchMode(false) + o.buf.Refresh(nil) + } else if o.IsInCompleteMode() { + if !keepInCompleteMode { + o.ExitCompleteMode(false) + o.Refresh() + } else { + o.buf.Refresh(nil) + o.CompleteRefresh() + } + } + if isUpdateHistory && !o.IsSearchMode() { + // it will cause null history + o.history.Update(o.buf.Runes(), false) + } + o.m.Unlock() + } +} + +func (o *Operation) Stderr() io.Writer { + return &wrapWriter{target: o.GetConfig().Stderr, r: o, t: o.t} +} + +func (o *Operation) Stdout() io.Writer { + return &wrapWriter{target: o.GetConfig().Stdout, r: o, t: o.t} +} + +func (o *Operation) String() (string, error) { + r, err := o.Runes() + return string(r), err +} + +func (o *Operation) Runes() ([]rune, error) { + o.t.EnterRawMode() + defer o.t.ExitRawMode() + + listener := o.GetConfig().Listener + if listener != nil { + listener.OnChange(nil, 0, 0) + } + + o.buf.Refresh(nil) // print prompt + o.t.KickRead() + select { + case r := <-o.outchan: + return r, nil + case err := <-o.errchan: + if e, ok := err.(*InterruptError); ok { + return e.Line, ErrInterrupt + } + return nil, err + } +} + +func (o *Operation) PasswordEx(prompt string, l Listener) ([]byte, error) { + cfg := o.GenPasswordConfig() + cfg.Prompt = prompt + cfg.Listener = l + return o.PasswordWithConfig(cfg) +} + +func (o *Operation) GenPasswordConfig() *Config { + return o.opPassword.PasswordConfig() +} + +func (o *Operation) PasswordWithConfig(cfg *Config) ([]byte, error) { + if err := o.opPassword.EnterPasswordMode(cfg); err != nil { + return nil, err + } + defer o.opPassword.ExitPasswordMode() + return o.Slice() +} + +func (o *Operation) Password(prompt string) ([]byte, error) { + return o.PasswordEx(prompt, nil) +} + +func (o *Operation) SetTitle(t string) { + o.w.Write([]byte("\033[2;" + t + "\007")) +} + +func (o *Operation) Slice() ([]byte, error) { + r, err := o.Runes() + if err != nil { + return nil, err + } + return []byte(string(r)), nil +} + +func (o *Operation) Close() { + o.history.Close() +} + +func (o *Operation) SetHistoryPath(path string) { + if o.history != nil { + o.history.Close() + } + o.cfg.HistoryFile = path + o.history = newOpHistory(o.cfg) +} + +func (o *Operation) IsNormalMode() bool { + return !o.IsInCompleteMode() && !o.IsSearchMode() +} + +func (op *Operation) SetConfig(cfg *Config) (*Config, error) { + op.m.Lock() + defer op.m.Unlock() + if op.cfg == cfg { + return op.cfg, nil + } + if err := cfg.Init(); err != nil { + return op.cfg, err + } + old := op.cfg + op.cfg = cfg + op.SetPrompt(cfg.Prompt) + op.SetMaskRune(cfg.MaskRune) + op.buf.SetConfig(cfg) + width := op.cfg.FuncGetWidth() + + if cfg.opHistory == nil { + op.SetHistoryPath(cfg.HistoryFile) + cfg.opHistory = op.history + cfg.opSearch = newOpSearch(op.buf.w, op.buf, op.history, cfg, width) + } + op.history = cfg.opHistory + + // SetHistoryPath will close opHistory which already exists + // so if we use it next time, we need to reopen it by `InitHistory()` + op.history.Init() + + if op.cfg.AutoComplete != nil { + op.opCompleter = newOpCompleter(op.buf.w, op, width) + } + + op.opSearch = cfg.opSearch + return old, nil +} + +func (o *Operation) ResetHistory() { + o.history.Reset() +} + +// if err is not nil, it just mean it fail to write to file +// other things goes fine. +func (o *Operation) SaveHistory(content string) error { + return o.history.New([]rune(content)) +} + +func (o *Operation) Refresh() { + if o.t.IsReading() { + o.buf.Refresh(nil) + } +} + +func (o *Operation) Clean() { + o.buf.Clean() +} + +func FuncListener(f func(line []rune, pos int, key rune) (newLine []rune, newPos int, ok bool)) Listener { + return &DumpListener{f: f} +} + +type DumpListener struct { + f func(line []rune, pos int, key rune) (newLine []rune, newPos int, ok bool) +} + +func (d *DumpListener) OnChange(line []rune, pos int, key rune) (newLine []rune, newPos int, ok bool) { + return d.f(line, pos, key) +} + +type Listener interface { + OnChange(line []rune, pos int, key rune) (newLine []rune, newPos int, ok bool) +} + +type Painter interface { + Paint(line []rune, pos int) []rune +} + +type defaultPainter struct{} + +func (p *defaultPainter) Paint(line []rune, _ int) []rune { + return line +} diff --git a/vendor/github.com/chzyer/readline/password.go b/vendor/github.com/chzyer/readline/password.go new file mode 100644 index 00000000000..414288c2a50 --- /dev/null +++ b/vendor/github.com/chzyer/readline/password.go @@ -0,0 +1,33 @@ +package readline + +type opPassword struct { + o *Operation + backupCfg *Config +} + +func newOpPassword(o *Operation) *opPassword { + return &opPassword{o: o} +} + +func (o *opPassword) ExitPasswordMode() { + o.o.SetConfig(o.backupCfg) + o.backupCfg = nil +} + +func (o *opPassword) EnterPasswordMode(cfg *Config) (err error) { + o.backupCfg, err = o.o.SetConfig(cfg) + return +} + +func (o *opPassword) PasswordConfig() *Config { + return &Config{ + EnableMask: true, + InterruptPrompt: "\n", + EOFPrompt: "\n", + HistoryLimit: -1, + Painter: &defaultPainter{}, + + Stdout: o.o.cfg.Stdout, + Stderr: o.o.cfg.Stderr, + } +} diff --git a/vendor/github.com/chzyer/readline/rawreader_windows.go b/vendor/github.com/chzyer/readline/rawreader_windows.go new file mode 100644 index 00000000000..073ef150a59 --- /dev/null +++ b/vendor/github.com/chzyer/readline/rawreader_windows.go @@ -0,0 +1,125 @@ +// +build windows + +package readline + +import "unsafe" + +const ( + VK_CANCEL = 0x03 + VK_BACK = 0x08 + VK_TAB = 0x09 + VK_RETURN = 0x0D + VK_SHIFT = 0x10 + VK_CONTROL = 0x11 + VK_MENU = 0x12 + VK_ESCAPE = 0x1B + VK_LEFT = 0x25 + VK_UP = 0x26 + VK_RIGHT = 0x27 + VK_DOWN = 0x28 + VK_DELETE = 0x2E + VK_LSHIFT = 0xA0 + VK_RSHIFT = 0xA1 + VK_LCONTROL = 0xA2 + VK_RCONTROL = 0xA3 +) + +// RawReader translate input record to ANSI escape sequence. +// To provides same behavior as unix terminal. +type RawReader struct { + ctrlKey bool + altKey bool +} + +func NewRawReader() *RawReader { + r := new(RawReader) + return r +} + +// only process one action in one read +func (r *RawReader) Read(buf []byte) (int, error) { + ir := new(_INPUT_RECORD) + var read int + var err error +next: + err = kernel.ReadConsoleInputW(stdin, + uintptr(unsafe.Pointer(ir)), + 1, + uintptr(unsafe.Pointer(&read)), + ) + if err != nil { + return 0, err + } + if ir.EventType != EVENT_KEY { + goto next + } + ker := (*_KEY_EVENT_RECORD)(unsafe.Pointer(&ir.Event[0])) + if ker.bKeyDown == 0 { // keyup + if r.ctrlKey || r.altKey { + switch ker.wVirtualKeyCode { + case VK_RCONTROL, VK_LCONTROL: + r.ctrlKey = false + case VK_MENU: //alt + r.altKey = false + } + } + goto next + } + + if ker.unicodeChar == 0 { + var target rune + switch ker.wVirtualKeyCode { + case VK_RCONTROL, VK_LCONTROL: + r.ctrlKey = true + case VK_MENU: //alt + r.altKey = true + case VK_LEFT: + target = CharBackward + case VK_RIGHT: + target = CharForward + case VK_UP: + target = CharPrev + case VK_DOWN: + target = CharNext + } + if target != 0 { + return r.write(buf, target) + } + goto next + } + char := rune(ker.unicodeChar) + if r.ctrlKey { + switch char { + case 'A': + char = CharLineStart + case 'E': + char = CharLineEnd + case 'R': + char = CharBckSearch + case 'S': + char = CharFwdSearch + } + } else if r.altKey { + switch char { + case VK_BACK: + char = CharBackspace + } + return r.writeEsc(buf, char) + } + return r.write(buf, char) +} + +func (r *RawReader) writeEsc(b []byte, char rune) (int, error) { + b[0] = '\033' + n := copy(b[1:], []byte(string(char))) + return n + 1, nil +} + +func (r *RawReader) write(b []byte, char rune) (int, error) { + n := copy(b, []byte(string(char))) + return n, nil +} + +func (r *RawReader) Close() error { + return nil +} diff --git a/vendor/github.com/chzyer/readline/readline.go b/vendor/github.com/chzyer/readline/readline.go new file mode 100644 index 00000000000..0e7aca06d5a --- /dev/null +++ b/vendor/github.com/chzyer/readline/readline.go @@ -0,0 +1,326 @@ +// Readline is a pure go implementation for GNU-Readline kind library. +// +// example: +// rl, err := readline.New("> ") +// if err != nil { +// panic(err) +// } +// defer rl.Close() +// +// for { +// line, err := rl.Readline() +// if err != nil { // io.EOF +// break +// } +// println(line) +// } +// +package readline + +import "io" + +type Instance struct { + Config *Config + Terminal *Terminal + Operation *Operation +} + +type Config struct { + // prompt supports ANSI escape sequence, so we can color some characters even in windows + Prompt string + + // readline will persist historys to file where HistoryFile specified + HistoryFile string + // specify the max length of historys, it's 500 by default, set it to -1 to disable history + HistoryLimit int + DisableAutoSaveHistory bool + // enable case-insensitive history searching + HistorySearchFold bool + + // AutoCompleter will called once user press TAB + AutoComplete AutoCompleter + + // Any key press will pass to Listener + // NOTE: Listener will be triggered by (nil, 0, 0) immediately + Listener Listener + + Painter Painter + + // If VimMode is true, readline will in vim.insert mode by default + VimMode bool + + InterruptPrompt string + EOFPrompt string + + FuncGetWidth func() int + + Stdin io.ReadCloser + StdinWriter io.Writer + Stdout io.Writer + Stderr io.Writer + + EnableMask bool + MaskRune rune + + // erase the editing line after user submited it + // it use in IM usually. + UniqueEditLine bool + + // filter input runes (may be used to disable CtrlZ or for translating some keys to different actions) + // -> output = new (translated) rune and true/false if continue with processing this one + FuncFilterInputRune func(rune) (rune, bool) + + // force use interactive even stdout is not a tty + FuncIsTerminal func() bool + FuncMakeRaw func() error + FuncExitRaw func() error + FuncOnWidthChanged func(func()) + ForceUseInteractive bool + + // private fields + inited bool + opHistory *opHistory + opSearch *opSearch +} + +func (c *Config) useInteractive() bool { + if c.ForceUseInteractive { + return true + } + return c.FuncIsTerminal() +} + +func (c *Config) Init() error { + if c.inited { + return nil + } + c.inited = true + if c.Stdin == nil { + c.Stdin = NewCancelableStdin(Stdin) + } + + c.Stdin, c.StdinWriter = NewFillableStdin(c.Stdin) + + if c.Stdout == nil { + c.Stdout = Stdout + } + if c.Stderr == nil { + c.Stderr = Stderr + } + if c.HistoryLimit == 0 { + c.HistoryLimit = 500 + } + + if c.InterruptPrompt == "" { + c.InterruptPrompt = "^C" + } else if c.InterruptPrompt == "\n" { + c.InterruptPrompt = "" + } + if c.EOFPrompt == "" { + c.EOFPrompt = "^D" + } else if c.EOFPrompt == "\n" { + c.EOFPrompt = "" + } + + if c.AutoComplete == nil { + c.AutoComplete = &TabCompleter{} + } + if c.FuncGetWidth == nil { + c.FuncGetWidth = GetScreenWidth + } + if c.FuncIsTerminal == nil { + c.FuncIsTerminal = DefaultIsTerminal + } + rm := new(RawMode) + if c.FuncMakeRaw == nil { + c.FuncMakeRaw = rm.Enter + } + if c.FuncExitRaw == nil { + c.FuncExitRaw = rm.Exit + } + if c.FuncOnWidthChanged == nil { + c.FuncOnWidthChanged = DefaultOnWidthChanged + } + + return nil +} + +func (c Config) Clone() *Config { + c.opHistory = nil + c.opSearch = nil + return &c +} + +func (c *Config) SetListener(f func(line []rune, pos int, key rune) (newLine []rune, newPos int, ok bool)) { + c.Listener = FuncListener(f) +} + +func (c *Config) SetPainter(p Painter) { + c.Painter = p +} + +func NewEx(cfg *Config) (*Instance, error) { + t, err := NewTerminal(cfg) + if err != nil { + return nil, err + } + rl := t.Readline() + if cfg.Painter == nil { + cfg.Painter = &defaultPainter{} + } + return &Instance{ + Config: cfg, + Terminal: t, + Operation: rl, + }, nil +} + +func New(prompt string) (*Instance, error) { + return NewEx(&Config{Prompt: prompt}) +} + +func (i *Instance) ResetHistory() { + i.Operation.ResetHistory() +} + +func (i *Instance) SetPrompt(s string) { + i.Operation.SetPrompt(s) +} + +func (i *Instance) SetMaskRune(r rune) { + i.Operation.SetMaskRune(r) +} + +// change history persistence in runtime +func (i *Instance) SetHistoryPath(p string) { + i.Operation.SetHistoryPath(p) +} + +// readline will refresh automatic when write through Stdout() +func (i *Instance) Stdout() io.Writer { + return i.Operation.Stdout() +} + +// readline will refresh automatic when write through Stdout() +func (i *Instance) Stderr() io.Writer { + return i.Operation.Stderr() +} + +// switch VimMode in runtime +func (i *Instance) SetVimMode(on bool) { + i.Operation.SetVimMode(on) +} + +func (i *Instance) IsVimMode() bool { + return i.Operation.IsEnableVimMode() +} + +func (i *Instance) GenPasswordConfig() *Config { + return i.Operation.GenPasswordConfig() +} + +// we can generate a config by `i.GenPasswordConfig()` +func (i *Instance) ReadPasswordWithConfig(cfg *Config) ([]byte, error) { + return i.Operation.PasswordWithConfig(cfg) +} + +func (i *Instance) ReadPasswordEx(prompt string, l Listener) ([]byte, error) { + return i.Operation.PasswordEx(prompt, l) +} + +func (i *Instance) ReadPassword(prompt string) ([]byte, error) { + return i.Operation.Password(prompt) +} + +type Result struct { + Line string + Error error +} + +func (l *Result) CanContinue() bool { + return len(l.Line) != 0 && l.Error == ErrInterrupt +} + +func (l *Result) CanBreak() bool { + return !l.CanContinue() && l.Error != nil +} + +func (i *Instance) Line() *Result { + ret, err := i.Readline() + return &Result{ret, err} +} + +// err is one of (nil, io.EOF, readline.ErrInterrupt) +func (i *Instance) Readline() (string, error) { + return i.Operation.String() +} + +func (i *Instance) ReadlineWithDefault(what string) (string, error) { + i.Operation.SetBuffer(what) + return i.Operation.String() +} + +func (i *Instance) SaveHistory(content string) error { + return i.Operation.SaveHistory(content) +} + +// same as readline +func (i *Instance) ReadSlice() ([]byte, error) { + return i.Operation.Slice() +} + +// we must make sure that call Close() before process exit. +func (i *Instance) Close() error { + if err := i.Terminal.Close(); err != nil { + return err + } + i.Config.Stdin.Close() + i.Operation.Close() + return nil +} +func (i *Instance) Clean() { + i.Operation.Clean() +} + +func (i *Instance) Write(b []byte) (int, error) { + return i.Stdout().Write(b) +} + +// WriteStdin prefill the next Stdin fetch +// Next time you call ReadLine() this value will be writen before the user input +// ie : +// i := readline.New() +// i.WriteStdin([]byte("test")) +// _, _= i.Readline() +// +// gives +// +// > test[cursor] +func (i *Instance) WriteStdin(val []byte) (int, error) { + return i.Terminal.WriteStdin(val) +} + +func (i *Instance) SetConfig(cfg *Config) *Config { + if i.Config == cfg { + return cfg + } + old := i.Config + i.Config = cfg + i.Operation.SetConfig(cfg) + i.Terminal.SetConfig(cfg) + return old +} + +func (i *Instance) Refresh() { + i.Operation.Refresh() +} + +// HistoryDisable the save of the commands into the history +func (i *Instance) HistoryDisable() { + i.Operation.history.Disable() +} + +// HistoryEnable the save of the commands into the history (default on) +func (i *Instance) HistoryEnable() { + i.Operation.history.Enable() +} diff --git a/vendor/github.com/chzyer/readline/remote.go b/vendor/github.com/chzyer/readline/remote.go new file mode 100644 index 00000000000..74dbf569022 --- /dev/null +++ b/vendor/github.com/chzyer/readline/remote.go @@ -0,0 +1,475 @@ +package readline + +import ( + "bufio" + "bytes" + "encoding/binary" + "fmt" + "io" + "net" + "os" + "sync" + "sync/atomic" +) + +type MsgType int16 + +const ( + T_DATA = MsgType(iota) + T_WIDTH + T_WIDTH_REPORT + T_ISTTY_REPORT + T_RAW + T_ERAW // exit raw + T_EOF +) + +type RemoteSvr struct { + eof int32 + closed int32 + width int32 + reciveChan chan struct{} + writeChan chan *writeCtx + conn net.Conn + isTerminal bool + funcWidthChan func() + stopChan chan struct{} + + dataBufM sync.Mutex + dataBuf bytes.Buffer +} + +type writeReply struct { + n int + err error +} + +type writeCtx struct { + msg *Message + reply chan *writeReply +} + +func newWriteCtx(msg *Message) *writeCtx { + return &writeCtx{ + msg: msg, + reply: make(chan *writeReply), + } +} + +func NewRemoteSvr(conn net.Conn) (*RemoteSvr, error) { + rs := &RemoteSvr{ + width: -1, + conn: conn, + writeChan: make(chan *writeCtx), + reciveChan: make(chan struct{}), + stopChan: make(chan struct{}), + } + buf := bufio.NewReader(rs.conn) + + if err := rs.init(buf); err != nil { + return nil, err + } + + go rs.readLoop(buf) + go rs.writeLoop() + return rs, nil +} + +func (r *RemoteSvr) init(buf *bufio.Reader) error { + m, err := ReadMessage(buf) + if err != nil { + return err + } + // receive isTerminal + if m.Type != T_ISTTY_REPORT { + return fmt.Errorf("unexpected init message") + } + r.GotIsTerminal(m.Data) + + // receive width + m, err = ReadMessage(buf) + if err != nil { + return err + } + if m.Type != T_WIDTH_REPORT { + return fmt.Errorf("unexpected init message") + } + r.GotReportWidth(m.Data) + + return nil +} + +func (r *RemoteSvr) HandleConfig(cfg *Config) { + cfg.Stderr = r + cfg.Stdout = r + cfg.Stdin = r + cfg.FuncExitRaw = r.ExitRawMode + cfg.FuncIsTerminal = r.IsTerminal + cfg.FuncMakeRaw = r.EnterRawMode + cfg.FuncExitRaw = r.ExitRawMode + cfg.FuncGetWidth = r.GetWidth + cfg.FuncOnWidthChanged = func(f func()) { + r.funcWidthChan = f + } +} + +func (r *RemoteSvr) IsTerminal() bool { + return r.isTerminal +} + +func (r *RemoteSvr) checkEOF() error { + if atomic.LoadInt32(&r.eof) == 1 { + return io.EOF + } + return nil +} + +func (r *RemoteSvr) Read(b []byte) (int, error) { + r.dataBufM.Lock() + n, err := r.dataBuf.Read(b) + r.dataBufM.Unlock() + if n == 0 { + if err := r.checkEOF(); err != nil { + return 0, err + } + } + + if n == 0 && err == io.EOF { + <-r.reciveChan + r.dataBufM.Lock() + n, err = r.dataBuf.Read(b) + r.dataBufM.Unlock() + } + if n == 0 { + if err := r.checkEOF(); err != nil { + return 0, err + } + } + + return n, err +} + +func (r *RemoteSvr) writeMsg(m *Message) error { + ctx := newWriteCtx(m) + r.writeChan <- ctx + reply := <-ctx.reply + return reply.err +} + +func (r *RemoteSvr) Write(b []byte) (int, error) { + ctx := newWriteCtx(NewMessage(T_DATA, b)) + r.writeChan <- ctx + reply := <-ctx.reply + return reply.n, reply.err +} + +func (r *RemoteSvr) EnterRawMode() error { + return r.writeMsg(NewMessage(T_RAW, nil)) +} + +func (r *RemoteSvr) ExitRawMode() error { + return r.writeMsg(NewMessage(T_ERAW, nil)) +} + +func (r *RemoteSvr) writeLoop() { + defer r.Close() + +loop: + for { + select { + case ctx, ok := <-r.writeChan: + if !ok { + break + } + n, err := ctx.msg.WriteTo(r.conn) + ctx.reply <- &writeReply{n, err} + case <-r.stopChan: + break loop + } + } +} + +func (r *RemoteSvr) Close() error { + if atomic.CompareAndSwapInt32(&r.closed, 0, 1) { + close(r.stopChan) + r.conn.Close() + } + return nil +} + +func (r *RemoteSvr) readLoop(buf *bufio.Reader) { + defer r.Close() + for { + m, err := ReadMessage(buf) + if err != nil { + break + } + switch m.Type { + case T_EOF: + atomic.StoreInt32(&r.eof, 1) + select { + case r.reciveChan <- struct{}{}: + default: + } + case T_DATA: + r.dataBufM.Lock() + r.dataBuf.Write(m.Data) + r.dataBufM.Unlock() + select { + case r.reciveChan <- struct{}{}: + default: + } + case T_WIDTH_REPORT: + r.GotReportWidth(m.Data) + case T_ISTTY_REPORT: + r.GotIsTerminal(m.Data) + } + } +} + +func (r *RemoteSvr) GotIsTerminal(data []byte) { + if binary.BigEndian.Uint16(data) == 0 { + r.isTerminal = false + } else { + r.isTerminal = true + } +} + +func (r *RemoteSvr) GotReportWidth(data []byte) { + atomic.StoreInt32(&r.width, int32(binary.BigEndian.Uint16(data))) + if r.funcWidthChan != nil { + r.funcWidthChan() + } +} + +func (r *RemoteSvr) GetWidth() int { + return int(atomic.LoadInt32(&r.width)) +} + +// ----------------------------------------------------------------------------- + +type Message struct { + Type MsgType + Data []byte +} + +func ReadMessage(r io.Reader) (*Message, error) { + m := new(Message) + var length int32 + if err := binary.Read(r, binary.BigEndian, &length); err != nil { + return nil, err + } + if err := binary.Read(r, binary.BigEndian, &m.Type); err != nil { + return nil, err + } + m.Data = make([]byte, int(length)-2) + if _, err := io.ReadFull(r, m.Data); err != nil { + return nil, err + } + return m, nil +} + +func NewMessage(t MsgType, data []byte) *Message { + return &Message{t, data} +} + +func (m *Message) WriteTo(w io.Writer) (int, error) { + buf := bytes.NewBuffer(make([]byte, 0, len(m.Data)+2+4)) + binary.Write(buf, binary.BigEndian, int32(len(m.Data)+2)) + binary.Write(buf, binary.BigEndian, m.Type) + buf.Write(m.Data) + n, err := buf.WriteTo(w) + return int(n), err +} + +// ----------------------------------------------------------------------------- + +type RemoteCli struct { + conn net.Conn + raw RawMode + receiveChan chan struct{} + inited int32 + isTerminal *bool + + data bytes.Buffer + dataM sync.Mutex +} + +func NewRemoteCli(conn net.Conn) (*RemoteCli, error) { + r := &RemoteCli{ + conn: conn, + receiveChan: make(chan struct{}), + } + return r, nil +} + +func (r *RemoteCli) MarkIsTerminal(is bool) { + r.isTerminal = &is +} + +func (r *RemoteCli) init() error { + if !atomic.CompareAndSwapInt32(&r.inited, 0, 1) { + return nil + } + + if err := r.reportIsTerminal(); err != nil { + return err + } + + if err := r.reportWidth(); err != nil { + return err + } + + // register sig for width changed + DefaultOnWidthChanged(func() { + r.reportWidth() + }) + return nil +} + +func (r *RemoteCli) writeMsg(m *Message) error { + r.dataM.Lock() + _, err := m.WriteTo(r.conn) + r.dataM.Unlock() + return err +} + +func (r *RemoteCli) Write(b []byte) (int, error) { + m := NewMessage(T_DATA, b) + r.dataM.Lock() + _, err := m.WriteTo(r.conn) + r.dataM.Unlock() + return len(b), err +} + +func (r *RemoteCli) reportWidth() error { + screenWidth := GetScreenWidth() + data := make([]byte, 2) + binary.BigEndian.PutUint16(data, uint16(screenWidth)) + msg := NewMessage(T_WIDTH_REPORT, data) + + if err := r.writeMsg(msg); err != nil { + return err + } + return nil +} + +func (r *RemoteCli) reportIsTerminal() error { + var isTerminal bool + if r.isTerminal != nil { + isTerminal = *r.isTerminal + } else { + isTerminal = DefaultIsTerminal() + } + data := make([]byte, 2) + if isTerminal { + binary.BigEndian.PutUint16(data, 1) + } else { + binary.BigEndian.PutUint16(data, 0) + } + msg := NewMessage(T_ISTTY_REPORT, data) + if err := r.writeMsg(msg); err != nil { + return err + } + return nil +} + +func (r *RemoteCli) readLoop() { + buf := bufio.NewReader(r.conn) + for { + msg, err := ReadMessage(buf) + if err != nil { + break + } + switch msg.Type { + case T_ERAW: + r.raw.Exit() + case T_RAW: + r.raw.Enter() + case T_DATA: + os.Stdout.Write(msg.Data) + } + } +} + +func (r *RemoteCli) ServeBy(source io.Reader) error { + if err := r.init(); err != nil { + return err + } + + go func() { + defer r.Close() + for { + n, _ := io.Copy(r, source) + if n == 0 { + break + } + } + }() + defer r.raw.Exit() + r.readLoop() + return nil +} + +func (r *RemoteCli) Close() { + r.writeMsg(NewMessage(T_EOF, nil)) +} + +func (r *RemoteCli) Serve() error { + return r.ServeBy(os.Stdin) +} + +func ListenRemote(n, addr string, cfg *Config, h func(*Instance), onListen ...func(net.Listener) error) error { + ln, err := net.Listen(n, addr) + if err != nil { + return err + } + if len(onListen) > 0 { + if err := onListen[0](ln); err != nil { + return err + } + } + for { + conn, err := ln.Accept() + if err != nil { + break + } + go func() { + defer conn.Close() + rl, err := HandleConn(*cfg, conn) + if err != nil { + return + } + h(rl) + }() + } + return nil +} + +func HandleConn(cfg Config, conn net.Conn) (*Instance, error) { + r, err := NewRemoteSvr(conn) + if err != nil { + return nil, err + } + r.HandleConfig(&cfg) + + rl, err := NewEx(&cfg) + if err != nil { + return nil, err + } + return rl, nil +} + +func DialRemote(n, addr string) error { + conn, err := net.Dial(n, addr) + if err != nil { + return err + } + defer conn.Close() + + cli, err := NewRemoteCli(conn) + if err != nil { + return err + } + return cli.Serve() +} diff --git a/vendor/github.com/chzyer/readline/runebuf.go b/vendor/github.com/chzyer/readline/runebuf.go new file mode 100644 index 00000000000..81d2da50ccb --- /dev/null +++ b/vendor/github.com/chzyer/readline/runebuf.go @@ -0,0 +1,629 @@ +package readline + +import ( + "bufio" + "bytes" + "io" + "strconv" + "strings" + "sync" +) + +type runeBufferBck struct { + buf []rune + idx int +} + +type RuneBuffer struct { + buf []rune + idx int + prompt []rune + w io.Writer + + hadClean bool + interactive bool + cfg *Config + + width int + + bck *runeBufferBck + + offset string + + lastKill []rune + + sync.Mutex +} + +func (r* RuneBuffer) pushKill(text []rune) { + r.lastKill = append([]rune{}, text...) +} + +func (r *RuneBuffer) OnWidthChange(newWidth int) { + r.Lock() + r.width = newWidth + r.Unlock() +} + +func (r *RuneBuffer) Backup() { + r.Lock() + r.bck = &runeBufferBck{r.buf, r.idx} + r.Unlock() +} + +func (r *RuneBuffer) Restore() { + r.Refresh(func() { + if r.bck == nil { + return + } + r.buf = r.bck.buf + r.idx = r.bck.idx + }) +} + +func NewRuneBuffer(w io.Writer, prompt string, cfg *Config, width int) *RuneBuffer { + rb := &RuneBuffer{ + w: w, + interactive: cfg.useInteractive(), + cfg: cfg, + width: width, + } + rb.SetPrompt(prompt) + return rb +} + +func (r *RuneBuffer) SetConfig(cfg *Config) { + r.Lock() + r.cfg = cfg + r.interactive = cfg.useInteractive() + r.Unlock() +} + +func (r *RuneBuffer) SetMask(m rune) { + r.Lock() + r.cfg.MaskRune = m + r.Unlock() +} + +func (r *RuneBuffer) CurrentWidth(x int) int { + r.Lock() + defer r.Unlock() + return runes.WidthAll(r.buf[:x]) +} + +func (r *RuneBuffer) PromptLen() int { + r.Lock() + width := r.promptLen() + r.Unlock() + return width +} + +func (r *RuneBuffer) promptLen() int { + return runes.WidthAll(runes.ColorFilter(r.prompt)) +} + +func (r *RuneBuffer) RuneSlice(i int) []rune { + r.Lock() + defer r.Unlock() + + if i > 0 { + rs := make([]rune, i) + copy(rs, r.buf[r.idx:r.idx+i]) + return rs + } + rs := make([]rune, -i) + copy(rs, r.buf[r.idx+i:r.idx]) + return rs +} + +func (r *RuneBuffer) Runes() []rune { + r.Lock() + newr := make([]rune, len(r.buf)) + copy(newr, r.buf) + r.Unlock() + return newr +} + +func (r *RuneBuffer) Pos() int { + r.Lock() + defer r.Unlock() + return r.idx +} + +func (r *RuneBuffer) Len() int { + r.Lock() + defer r.Unlock() + return len(r.buf) +} + +func (r *RuneBuffer) MoveToLineStart() { + r.Refresh(func() { + if r.idx == 0 { + return + } + r.idx = 0 + }) +} + +func (r *RuneBuffer) MoveBackward() { + r.Refresh(func() { + if r.idx == 0 { + return + } + r.idx-- + }) +} + +func (r *RuneBuffer) WriteString(s string) { + r.WriteRunes([]rune(s)) +} + +func (r *RuneBuffer) WriteRune(s rune) { + r.WriteRunes([]rune{s}) +} + +func (r *RuneBuffer) WriteRunes(s []rune) { + r.Refresh(func() { + tail := append(s, r.buf[r.idx:]...) + r.buf = append(r.buf[:r.idx], tail...) + r.idx += len(s) + }) +} + +func (r *RuneBuffer) MoveForward() { + r.Refresh(func() { + if r.idx == len(r.buf) { + return + } + r.idx++ + }) +} + +func (r *RuneBuffer) IsCursorInEnd() bool { + r.Lock() + defer r.Unlock() + return r.idx == len(r.buf) +} + +func (r *RuneBuffer) Replace(ch rune) { + r.Refresh(func() { + r.buf[r.idx] = ch + }) +} + +func (r *RuneBuffer) Erase() { + r.Refresh(func() { + r.idx = 0 + r.pushKill(r.buf[:]) + r.buf = r.buf[:0] + }) +} + +func (r *RuneBuffer) Delete() (success bool) { + r.Refresh(func() { + if r.idx == len(r.buf) { + return + } + r.pushKill(r.buf[r.idx : r.idx+1]) + r.buf = append(r.buf[:r.idx], r.buf[r.idx+1:]...) + success = true + }) + return +} + +func (r *RuneBuffer) DeleteWord() { + if r.idx == len(r.buf) { + return + } + init := r.idx + for init < len(r.buf) && IsWordBreak(r.buf[init]) { + init++ + } + for i := init + 1; i < len(r.buf); i++ { + if !IsWordBreak(r.buf[i]) && IsWordBreak(r.buf[i-1]) { + r.pushKill(r.buf[r.idx:i-1]) + r.Refresh(func() { + r.buf = append(r.buf[:r.idx], r.buf[i-1:]...) + }) + return + } + } + r.Kill() +} + +func (r *RuneBuffer) MoveToPrevWord() (success bool) { + r.Refresh(func() { + if r.idx == 0 { + return + } + + for i := r.idx - 1; i > 0; i-- { + if !IsWordBreak(r.buf[i]) && IsWordBreak(r.buf[i-1]) { + r.idx = i + success = true + return + } + } + r.idx = 0 + success = true + }) + return +} + +func (r *RuneBuffer) KillFront() { + r.Refresh(func() { + if r.idx == 0 { + return + } + + length := len(r.buf) - r.idx + r.pushKill(r.buf[:r.idx]) + copy(r.buf[:length], r.buf[r.idx:]) + r.idx = 0 + r.buf = r.buf[:length] + }) +} + +func (r *RuneBuffer) Kill() { + r.Refresh(func() { + r.pushKill(r.buf[r.idx:]) + r.buf = r.buf[:r.idx] + }) +} + +func (r *RuneBuffer) Transpose() { + r.Refresh(func() { + if len(r.buf) == 1 { + r.idx++ + } + + if len(r.buf) < 2 { + return + } + + if r.idx == 0 { + r.idx = 1 + } else if r.idx >= len(r.buf) { + r.idx = len(r.buf) - 1 + } + r.buf[r.idx], r.buf[r.idx-1] = r.buf[r.idx-1], r.buf[r.idx] + r.idx++ + }) +} + +func (r *RuneBuffer) MoveToNextWord() { + r.Refresh(func() { + for i := r.idx + 1; i < len(r.buf); i++ { + if !IsWordBreak(r.buf[i]) && IsWordBreak(r.buf[i-1]) { + r.idx = i + return + } + } + + r.idx = len(r.buf) + }) +} + +func (r *RuneBuffer) MoveToEndWord() { + r.Refresh(func() { + // already at the end, so do nothing + if r.idx == len(r.buf) { + return + } + // if we are at the end of a word already, go to next + if !IsWordBreak(r.buf[r.idx]) && IsWordBreak(r.buf[r.idx+1]) { + r.idx++ + } + + // keep going until at the end of a word + for i := r.idx + 1; i < len(r.buf); i++ { + if IsWordBreak(r.buf[i]) && !IsWordBreak(r.buf[i-1]) { + r.idx = i - 1 + return + } + } + r.idx = len(r.buf) + }) +} + +func (r *RuneBuffer) BackEscapeWord() { + r.Refresh(func() { + if r.idx == 0 { + return + } + for i := r.idx - 1; i > 0; i-- { + if !IsWordBreak(r.buf[i]) && IsWordBreak(r.buf[i-1]) { + r.pushKill(r.buf[i:r.idx]) + r.buf = append(r.buf[:i], r.buf[r.idx:]...) + r.idx = i + return + } + } + + r.buf = r.buf[:0] + r.idx = 0 + }) +} + +func (r *RuneBuffer) Yank() { + if len(r.lastKill) == 0 { + return + } + r.Refresh(func() { + buf := make([]rune, 0, len(r.buf) + len(r.lastKill)) + buf = append(buf, r.buf[:r.idx]...) + buf = append(buf, r.lastKill...) + buf = append(buf, r.buf[r.idx:]...) + r.buf = buf + r.idx += len(r.lastKill) + }) +} + +func (r *RuneBuffer) Backspace() { + r.Refresh(func() { + if r.idx == 0 { + return + } + + r.idx-- + r.buf = append(r.buf[:r.idx], r.buf[r.idx+1:]...) + }) +} + +func (r *RuneBuffer) MoveToLineEnd() { + r.Refresh(func() { + if r.idx == len(r.buf) { + return + } + + r.idx = len(r.buf) + }) +} + +func (r *RuneBuffer) LineCount(width int) int { + if width == -1 { + width = r.width + } + return LineCount(width, + runes.WidthAll(r.buf)+r.PromptLen()) +} + +func (r *RuneBuffer) MoveTo(ch rune, prevChar, reverse bool) (success bool) { + r.Refresh(func() { + if reverse { + for i := r.idx - 1; i >= 0; i-- { + if r.buf[i] == ch { + r.idx = i + if prevChar { + r.idx++ + } + success = true + return + } + } + return + } + for i := r.idx + 1; i < len(r.buf); i++ { + if r.buf[i] == ch { + r.idx = i + if prevChar { + r.idx-- + } + success = true + return + } + } + }) + return +} + +func (r *RuneBuffer) isInLineEdge() bool { + if isWindows { + return false + } + sp := r.getSplitByLine(r.buf) + return len(sp[len(sp)-1]) == 0 +} + +func (r *RuneBuffer) getSplitByLine(rs []rune) []string { + return SplitByLine(r.promptLen(), r.width, rs) +} + +func (r *RuneBuffer) IdxLine(width int) int { + r.Lock() + defer r.Unlock() + return r.idxLine(width) +} + +func (r *RuneBuffer) idxLine(width int) int { + if width == 0 { + return 0 + } + sp := r.getSplitByLine(r.buf[:r.idx]) + return len(sp) - 1 +} + +func (r *RuneBuffer) CursorLineCount() int { + return r.LineCount(r.width) - r.IdxLine(r.width) +} + +func (r *RuneBuffer) Refresh(f func()) { + r.Lock() + defer r.Unlock() + + if !r.interactive { + if f != nil { + f() + } + return + } + + r.clean() + if f != nil { + f() + } + r.print() +} + +func (r *RuneBuffer) SetOffset(offset string) { + r.Lock() + r.offset = offset + r.Unlock() +} + +func (r *RuneBuffer) print() { + r.w.Write(r.output()) + r.hadClean = false +} + +func (r *RuneBuffer) output() []byte { + buf := bytes.NewBuffer(nil) + buf.WriteString(string(r.prompt)) + if r.cfg.EnableMask && len(r.buf) > 0 { + buf.Write([]byte(strings.Repeat(string(r.cfg.MaskRune), len(r.buf)-1))) + if r.buf[len(r.buf)-1] == '\n' { + buf.Write([]byte{'\n'}) + } else { + buf.Write([]byte(string(r.cfg.MaskRune))) + } + if len(r.buf) > r.idx { + buf.Write(r.getBackspaceSequence()) + } + + } else { + for _, e := range r.cfg.Painter.Paint(r.buf, r.idx) { + if e == '\t' { + buf.WriteString(strings.Repeat(" ", TabWidth)) + } else { + buf.WriteRune(e) + } + } + if r.isInLineEdge() { + buf.Write([]byte(" \b")) + } + } + // cursor position + if len(r.buf) > r.idx { + buf.Write(r.getBackspaceSequence()) + } + return buf.Bytes() +} + +func (r *RuneBuffer) getBackspaceSequence() []byte { + var sep = map[int]bool{} + + var i int + for { + if i >= runes.WidthAll(r.buf) { + break + } + + if i == 0 { + i -= r.promptLen() + } + i += r.width + + sep[i] = true + } + var buf []byte + for i := len(r.buf); i > r.idx; i-- { + // move input to the left of one + buf = append(buf, '\b') + if sep[i] { + // up one line, go to the start of the line and move cursor right to the end (r.width) + buf = append(buf, "\033[A\r"+"\033["+strconv.Itoa(r.width)+"C"...) + } + } + + return buf + +} + +func (r *RuneBuffer) Reset() []rune { + ret := runes.Copy(r.buf) + r.buf = r.buf[:0] + r.idx = 0 + return ret +} + +func (r *RuneBuffer) calWidth(m int) int { + if m > 0 { + return runes.WidthAll(r.buf[r.idx : r.idx+m]) + } + return runes.WidthAll(r.buf[r.idx+m : r.idx]) +} + +func (r *RuneBuffer) SetStyle(start, end int, style string) { + if end < start { + panic("end < start") + } + + // goto start + move := start - r.idx + if move > 0 { + r.w.Write([]byte(string(r.buf[r.idx : r.idx+move]))) + } else { + r.w.Write(bytes.Repeat([]byte("\b"), r.calWidth(move))) + } + r.w.Write([]byte("\033[" + style + "m")) + r.w.Write([]byte(string(r.buf[start:end]))) + r.w.Write([]byte("\033[0m")) + // TODO: move back +} + +func (r *RuneBuffer) SetWithIdx(idx int, buf []rune) { + r.Refresh(func() { + r.buf = buf + r.idx = idx + }) +} + +func (r *RuneBuffer) Set(buf []rune) { + r.SetWithIdx(len(buf), buf) +} + +func (r *RuneBuffer) SetPrompt(prompt string) { + r.Lock() + r.prompt = []rune(prompt) + r.Unlock() +} + +func (r *RuneBuffer) cleanOutput(w io.Writer, idxLine int) { + buf := bufio.NewWriter(w) + + if r.width == 0 { + buf.WriteString(strings.Repeat("\r\b", len(r.buf)+r.promptLen())) + buf.Write([]byte("\033[J")) + } else { + buf.Write([]byte("\033[J")) // just like ^k :) + if idxLine == 0 { + buf.WriteString("\033[2K") + buf.WriteString("\r") + } else { + for i := 0; i < idxLine; i++ { + io.WriteString(buf, "\033[2K\r\033[A") + } + io.WriteString(buf, "\033[2K\r") + } + } + buf.Flush() + return +} + +func (r *RuneBuffer) Clean() { + r.Lock() + r.clean() + r.Unlock() +} + +func (r *RuneBuffer) clean() { + r.cleanWithIdxLine(r.idxLine(r.width)) +} + +func (r *RuneBuffer) cleanWithIdxLine(idxLine int) { + if r.hadClean || !r.interactive { + return + } + r.hadClean = true + r.cleanOutput(r.w, idxLine) +} diff --git a/vendor/github.com/chzyer/readline/runes.go b/vendor/github.com/chzyer/readline/runes.go new file mode 100644 index 00000000000..a669bc48c30 --- /dev/null +++ b/vendor/github.com/chzyer/readline/runes.go @@ -0,0 +1,223 @@ +package readline + +import ( + "bytes" + "unicode" + "unicode/utf8" +) + +var runes = Runes{} +var TabWidth = 4 + +type Runes struct{} + +func (Runes) EqualRune(a, b rune, fold bool) bool { + if a == b { + return true + } + if !fold { + return false + } + if a > b { + a, b = b, a + } + if b < utf8.RuneSelf && 'A' <= a && a <= 'Z' { + if b == a+'a'-'A' { + return true + } + } + return false +} + +func (r Runes) EqualRuneFold(a, b rune) bool { + return r.EqualRune(a, b, true) +} + +func (r Runes) EqualFold(a, b []rune) bool { + if len(a) != len(b) { + return false + } + for i := 0; i < len(a); i++ { + if r.EqualRuneFold(a[i], b[i]) { + continue + } + return false + } + + return true +} + +func (Runes) Equal(a, b []rune) bool { + if len(a) != len(b) { + return false + } + for i := 0; i < len(a); i++ { + if a[i] != b[i] { + return false + } + } + return true +} + +func (rs Runes) IndexAllBckEx(r, sub []rune, fold bool) int { + for i := len(r) - len(sub); i >= 0; i-- { + found := true + for j := 0; j < len(sub); j++ { + if !rs.EqualRune(r[i+j], sub[j], fold) { + found = false + break + } + } + if found { + return i + } + } + return -1 +} + +// Search in runes from end to front +func (rs Runes) IndexAllBck(r, sub []rune) int { + return rs.IndexAllBckEx(r, sub, false) +} + +// Search in runes from front to end +func (rs Runes) IndexAll(r, sub []rune) int { + return rs.IndexAllEx(r, sub, false) +} + +func (rs Runes) IndexAllEx(r, sub []rune, fold bool) int { + for i := 0; i < len(r); i++ { + found := true + if len(r[i:]) < len(sub) { + return -1 + } + for j := 0; j < len(sub); j++ { + if !rs.EqualRune(r[i+j], sub[j], fold) { + found = false + break + } + } + if found { + return i + } + } + return -1 +} + +func (Runes) Index(r rune, rs []rune) int { + for i := 0; i < len(rs); i++ { + if rs[i] == r { + return i + } + } + return -1 +} + +func (Runes) ColorFilter(r []rune) []rune { + newr := make([]rune, 0, len(r)) + for pos := 0; pos < len(r); pos++ { + if r[pos] == '\033' && r[pos+1] == '[' { + idx := runes.Index('m', r[pos+2:]) + if idx == -1 { + continue + } + pos += idx + 2 + continue + } + newr = append(newr, r[pos]) + } + return newr +} + +var zeroWidth = []*unicode.RangeTable{ + unicode.Mn, + unicode.Me, + unicode.Cc, + unicode.Cf, +} + +var doubleWidth = []*unicode.RangeTable{ + unicode.Han, + unicode.Hangul, + unicode.Hiragana, + unicode.Katakana, +} + +func (Runes) Width(r rune) int { + if r == '\t' { + return TabWidth + } + if unicode.IsOneOf(zeroWidth, r) { + return 0 + } + if unicode.IsOneOf(doubleWidth, r) { + return 2 + } + return 1 +} + +func (Runes) WidthAll(r []rune) (length int) { + for i := 0; i < len(r); i++ { + length += runes.Width(r[i]) + } + return +} + +func (Runes) Backspace(r []rune) []byte { + return bytes.Repeat([]byte{'\b'}, runes.WidthAll(r)) +} + +func (Runes) Copy(r []rune) []rune { + n := make([]rune, len(r)) + copy(n, r) + return n +} + +func (Runes) HasPrefixFold(r, prefix []rune) bool { + if len(r) < len(prefix) { + return false + } + return runes.EqualFold(r[:len(prefix)], prefix) +} + +func (Runes) HasPrefix(r, prefix []rune) bool { + if len(r) < len(prefix) { + return false + } + return runes.Equal(r[:len(prefix)], prefix) +} + +func (Runes) Aggregate(candicate [][]rune) (same []rune, size int) { + for i := 0; i < len(candicate[0]); i++ { + for j := 0; j < len(candicate)-1; j++ { + if i >= len(candicate[j]) || i >= len(candicate[j+1]) { + goto aggregate + } + if candicate[j][i] != candicate[j+1][i] { + goto aggregate + } + } + size = i + 1 + } +aggregate: + if size > 0 { + same = runes.Copy(candicate[0][:size]) + for i := 0; i < len(candicate); i++ { + n := runes.Copy(candicate[i]) + copy(n, n[size:]) + candicate[i] = n[:len(n)-size] + } + } + return +} + +func (Runes) TrimSpaceLeft(in []rune) []rune { + firstIndex := len(in) + for i, r := range in { + if unicode.IsSpace(r) == false { + firstIndex = i + break + } + } + return in[firstIndex:] +} diff --git a/vendor/github.com/chzyer/readline/search.go b/vendor/github.com/chzyer/readline/search.go new file mode 100644 index 00000000000..52e8ff09953 --- /dev/null +++ b/vendor/github.com/chzyer/readline/search.go @@ -0,0 +1,164 @@ +package readline + +import ( + "bytes" + "container/list" + "fmt" + "io" +) + +const ( + S_STATE_FOUND = iota + S_STATE_FAILING +) + +const ( + S_DIR_BCK = iota + S_DIR_FWD +) + +type opSearch struct { + inMode bool + state int + dir int + source *list.Element + w io.Writer + buf *RuneBuffer + data []rune + history *opHistory + cfg *Config + markStart int + markEnd int + width int +} + +func newOpSearch(w io.Writer, buf *RuneBuffer, history *opHistory, cfg *Config, width int) *opSearch { + return &opSearch{ + w: w, + buf: buf, + cfg: cfg, + history: history, + width: width, + } +} + +func (o *opSearch) OnWidthChange(newWidth int) { + o.width = newWidth +} + +func (o *opSearch) IsSearchMode() bool { + return o.inMode +} + +func (o *opSearch) SearchBackspace() { + if len(o.data) > 0 { + o.data = o.data[:len(o.data)-1] + o.search(true) + } +} + +func (o *opSearch) findHistoryBy(isNewSearch bool) (int, *list.Element) { + if o.dir == S_DIR_BCK { + return o.history.FindBck(isNewSearch, o.data, o.buf.idx) + } + return o.history.FindFwd(isNewSearch, o.data, o.buf.idx) +} + +func (o *opSearch) search(isChange bool) bool { + if len(o.data) == 0 { + o.state = S_STATE_FOUND + o.SearchRefresh(-1) + return true + } + idx, elem := o.findHistoryBy(isChange) + if elem == nil { + o.SearchRefresh(-2) + return false + } + o.history.current = elem + + item := o.history.showItem(o.history.current.Value) + start, end := 0, 0 + if o.dir == S_DIR_BCK { + start, end = idx, idx+len(o.data) + } else { + start, end = idx, idx+len(o.data) + idx += len(o.data) + } + o.buf.SetWithIdx(idx, item) + o.markStart, o.markEnd = start, end + o.SearchRefresh(idx) + return true +} + +func (o *opSearch) SearchChar(r rune) { + o.data = append(o.data, r) + o.search(true) +} + +func (o *opSearch) SearchMode(dir int) bool { + if o.width == 0 { + return false + } + alreadyInMode := o.inMode + o.inMode = true + o.dir = dir + o.source = o.history.current + if alreadyInMode { + o.search(false) + } else { + o.SearchRefresh(-1) + } + return true +} + +func (o *opSearch) ExitSearchMode(revert bool) { + if revert { + o.history.current = o.source + o.buf.Set(o.history.showItem(o.history.current.Value)) + } + o.markStart, o.markEnd = 0, 0 + o.state = S_STATE_FOUND + o.inMode = false + o.source = nil + o.data = nil +} + +func (o *opSearch) SearchRefresh(x int) { + if x == -2 { + o.state = S_STATE_FAILING + } else if x >= 0 { + o.state = S_STATE_FOUND + } + if x < 0 { + x = o.buf.idx + } + x = o.buf.CurrentWidth(x) + x += o.buf.PromptLen() + x = x % o.width + + if o.markStart > 0 { + o.buf.SetStyle(o.markStart, o.markEnd, "4") + } + + lineCnt := o.buf.CursorLineCount() + buf := bytes.NewBuffer(nil) + buf.Write(bytes.Repeat([]byte("\n"), lineCnt)) + buf.WriteString("\033[J") + if o.state == S_STATE_FAILING { + buf.WriteString("failing ") + } + if o.dir == S_DIR_BCK { + buf.WriteString("bck") + } else if o.dir == S_DIR_FWD { + buf.WriteString("fwd") + } + buf.WriteString("-i-search: ") + buf.WriteString(string(o.data)) // keyword + buf.WriteString("\033[4m \033[0m") // _ + fmt.Fprintf(buf, "\r\033[%dA", lineCnt) // move prev + if x > 0 { + fmt.Fprintf(buf, "\033[%dC", x) // move forward + } + o.w.Write(buf.Bytes()) +} diff --git a/vendor/github.com/chzyer/readline/std.go b/vendor/github.com/chzyer/readline/std.go new file mode 100644 index 00000000000..61d44b75974 --- /dev/null +++ b/vendor/github.com/chzyer/readline/std.go @@ -0,0 +1,197 @@ +package readline + +import ( + "io" + "os" + "sync" + "sync/atomic" +) + +var ( + Stdin io.ReadCloser = os.Stdin + Stdout io.WriteCloser = os.Stdout + Stderr io.WriteCloser = os.Stderr +) + +var ( + std *Instance + stdOnce sync.Once +) + +// global instance will not submit history automatic +func getInstance() *Instance { + stdOnce.Do(func() { + std, _ = NewEx(&Config{ + DisableAutoSaveHistory: true, + }) + }) + return std +} + +// let readline load history from filepath +// and try to persist history into disk +// set fp to "" to prevent readline persisting history to disk +// so the `AddHistory` will return nil error forever. +func SetHistoryPath(fp string) { + ins := getInstance() + cfg := ins.Config.Clone() + cfg.HistoryFile = fp + ins.SetConfig(cfg) +} + +// set auto completer to global instance +func SetAutoComplete(completer AutoCompleter) { + ins := getInstance() + cfg := ins.Config.Clone() + cfg.AutoComplete = completer + ins.SetConfig(cfg) +} + +// add history to global instance manually +// raise error only if `SetHistoryPath` is set with a non-empty path +func AddHistory(content string) error { + ins := getInstance() + return ins.SaveHistory(content) +} + +func Password(prompt string) ([]byte, error) { + ins := getInstance() + return ins.ReadPassword(prompt) +} + +// readline with global configs +func Line(prompt string) (string, error) { + ins := getInstance() + ins.SetPrompt(prompt) + return ins.Readline() +} + +type CancelableStdin struct { + r io.Reader + mutex sync.Mutex + stop chan struct{} + closed int32 + notify chan struct{} + data []byte + read int + err error +} + +func NewCancelableStdin(r io.Reader) *CancelableStdin { + c := &CancelableStdin{ + r: r, + notify: make(chan struct{}), + stop: make(chan struct{}), + } + go c.ioloop() + return c +} + +func (c *CancelableStdin) ioloop() { +loop: + for { + select { + case <-c.notify: + c.read, c.err = c.r.Read(c.data) + select { + case c.notify <- struct{}{}: + case <-c.stop: + break loop + } + case <-c.stop: + break loop + } + } +} + +func (c *CancelableStdin) Read(b []byte) (n int, err error) { + c.mutex.Lock() + defer c.mutex.Unlock() + if atomic.LoadInt32(&c.closed) == 1 { + return 0, io.EOF + } + + c.data = b + select { + case c.notify <- struct{}{}: + case <-c.stop: + return 0, io.EOF + } + select { + case <-c.notify: + return c.read, c.err + case <-c.stop: + return 0, io.EOF + } +} + +func (c *CancelableStdin) Close() error { + if atomic.CompareAndSwapInt32(&c.closed, 0, 1) { + close(c.stop) + } + return nil +} + +// FillableStdin is a stdin reader which can prepend some data before +// reading into the real stdin +type FillableStdin struct { + sync.Mutex + stdin io.Reader + stdinBuffer io.ReadCloser + buf []byte + bufErr error +} + +// NewFillableStdin gives you FillableStdin +func NewFillableStdin(stdin io.Reader) (io.ReadCloser, io.Writer) { + r, w := io.Pipe() + s := &FillableStdin{ + stdinBuffer: r, + stdin: stdin, + } + s.ioloop() + return s, w +} + +func (s *FillableStdin) ioloop() { + go func() { + for { + bufR := make([]byte, 100) + var n int + n, s.bufErr = s.stdinBuffer.Read(bufR) + if s.bufErr != nil { + if s.bufErr == io.ErrClosedPipe { + break + } + } + s.Lock() + s.buf = append(s.buf, bufR[:n]...) + s.Unlock() + } + }() +} + +// Read will read from the local buffer and if no data, read from stdin +func (s *FillableStdin) Read(p []byte) (n int, err error) { + s.Lock() + i := len(s.buf) + if len(p) < i { + i = len(p) + } + if i > 0 { + n := copy(p, s.buf) + s.buf = s.buf[:0] + cerr := s.bufErr + s.bufErr = nil + s.Unlock() + return n, cerr + } + s.Unlock() + n, err = s.stdin.Read(p) + return n, err +} + +func (s *FillableStdin) Close() error { + s.stdinBuffer.Close() + return nil +} diff --git a/vendor/github.com/chzyer/readline/std_windows.go b/vendor/github.com/chzyer/readline/std_windows.go new file mode 100644 index 00000000000..b10f91bcb7e --- /dev/null +++ b/vendor/github.com/chzyer/readline/std_windows.go @@ -0,0 +1,9 @@ +// +build windows + +package readline + +func init() { + Stdin = NewRawReader() + Stdout = NewANSIWriter(Stdout) + Stderr = NewANSIWriter(Stderr) +} diff --git a/vendor/github.com/chzyer/readline/term.go b/vendor/github.com/chzyer/readline/term.go new file mode 100644 index 00000000000..133993ca8ea --- /dev/null +++ b/vendor/github.com/chzyer/readline/term.go @@ -0,0 +1,123 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux,!appengine netbsd openbsd solaris + +// Package terminal provides support functions for dealing with terminals, as +// commonly found on UNIX systems. +// +// Putting a terminal into raw mode is the most common requirement: +// +// oldState, err := terminal.MakeRaw(0) +// if err != nil { +// panic(err) +// } +// defer terminal.Restore(0, oldState) +package readline + +import ( + "io" + "syscall" +) + +// State contains the state of a terminal. +type State struct { + termios Termios +} + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd int) bool { + _, err := getTermios(fd) + return err == nil +} + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd int) (*State, error) { + var oldState State + + if termios, err := getTermios(fd); err != nil { + return nil, err + } else { + oldState.termios = *termios + } + + newState := oldState.termios + // This attempts to replicate the behaviour documented for cfmakeraw in + // the termios(3) manpage. + newState.Iflag &^= syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON + // newState.Oflag &^= syscall.OPOST + newState.Lflag &^= syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN + newState.Cflag &^= syscall.CSIZE | syscall.PARENB + newState.Cflag |= syscall.CS8 + + newState.Cc[syscall.VMIN] = 1 + newState.Cc[syscall.VTIME] = 0 + + return &oldState, setTermios(fd, &newState) +} + +// GetState returns the current state of a terminal which may be useful to +// restore the terminal after a signal. +func GetState(fd int) (*State, error) { + termios, err := getTermios(fd) + if err != nil { + return nil, err + } + + return &State{termios: *termios}, nil +} + +// Restore restores the terminal connected to the given file descriptor to a +// previous state. +func restoreTerm(fd int, state *State) error { + return setTermios(fd, &state.termios) +} + +// ReadPassword reads a line of input from a terminal without local echo. This +// is commonly used for inputting passwords and other sensitive data. The slice +// returned does not include the \n. +func ReadPassword(fd int) ([]byte, error) { + oldState, err := getTermios(fd) + if err != nil { + return nil, err + } + + newState := oldState + newState.Lflag &^= syscall.ECHO + newState.Lflag |= syscall.ICANON | syscall.ISIG + newState.Iflag |= syscall.ICRNL + if err := setTermios(fd, newState); err != nil { + return nil, err + } + + defer func() { + setTermios(fd, oldState) + }() + + var buf [16]byte + var ret []byte + for { + n, err := syscall.Read(fd, buf[:]) + if err != nil { + return nil, err + } + if n == 0 { + if len(ret) == 0 { + return nil, io.EOF + } + break + } + if buf[n-1] == '\n' { + n-- + } + ret = append(ret, buf[:n]...) + if n < len(buf) { + break + } + } + + return ret, nil +} diff --git a/vendor/github.com/chzyer/readline/term_bsd.go b/vendor/github.com/chzyer/readline/term_bsd.go new file mode 100644 index 00000000000..68b56ea6ba7 --- /dev/null +++ b/vendor/github.com/chzyer/readline/term_bsd.go @@ -0,0 +1,29 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package readline + +import ( + "syscall" + "unsafe" +) + +func getTermios(fd int) (*Termios, error) { + termios := new(Termios) + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), syscall.TIOCGETA, uintptr(unsafe.Pointer(termios)), 0, 0, 0) + if err != 0 { + return nil, err + } + return termios, nil +} + +func setTermios(fd int, termios *Termios) error { + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), syscall.TIOCSETA, uintptr(unsafe.Pointer(termios)), 0, 0, 0) + if err != 0 { + return err + } + return nil +} diff --git a/vendor/github.com/chzyer/readline/term_linux.go b/vendor/github.com/chzyer/readline/term_linux.go new file mode 100644 index 00000000000..e3392b4ac2d --- /dev/null +++ b/vendor/github.com/chzyer/readline/term_linux.go @@ -0,0 +1,33 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package readline + +import ( + "syscall" + "unsafe" +) + +// These constants are declared here, rather than importing +// them from the syscall package as some syscall packages, even +// on linux, for example gccgo, do not declare them. +const ioctlReadTermios = 0x5401 // syscall.TCGETS +const ioctlWriteTermios = 0x5402 // syscall.TCSETS + +func getTermios(fd int) (*Termios, error) { + termios := new(Termios) + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(termios)), 0, 0, 0) + if err != 0 { + return nil, err + } + return termios, nil +} + +func setTermios(fd int, termios *Termios) error { + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(termios)), 0, 0, 0) + if err != 0 { + return err + } + return nil +} diff --git a/vendor/github.com/chzyer/readline/term_solaris.go b/vendor/github.com/chzyer/readline/term_solaris.go new file mode 100644 index 00000000000..4c27273c7ab --- /dev/null +++ b/vendor/github.com/chzyer/readline/term_solaris.go @@ -0,0 +1,32 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build solaris + +package readline + +import "golang.org/x/sys/unix" + +// GetSize returns the dimensions of the given terminal. +func GetSize(fd int) (int, int, error) { + ws, err := unix.IoctlGetWinsize(fd, unix.TIOCGWINSZ) + if err != nil { + return 0, 0, err + } + return int(ws.Col), int(ws.Row), nil +} + +type Termios unix.Termios + +func getTermios(fd int) (*Termios, error) { + termios, err := unix.IoctlGetTermios(fd, unix.TCGETS) + if err != nil { + return nil, err + } + return (*Termios)(termios), nil +} + +func setTermios(fd int, termios *Termios) error { + return unix.IoctlSetTermios(fd, unix.TCSETSF, (*unix.Termios)(termios)) +} diff --git a/vendor/github.com/chzyer/readline/term_unix.go b/vendor/github.com/chzyer/readline/term_unix.go new file mode 100644 index 00000000000..d3ea242448d --- /dev/null +++ b/vendor/github.com/chzyer/readline/term_unix.go @@ -0,0 +1,24 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux,!appengine netbsd openbsd + +package readline + +import ( + "syscall" + "unsafe" +) + +type Termios syscall.Termios + +// GetSize returns the dimensions of the given terminal. +func GetSize(fd int) (int, int, error) { + var dimensions [4]uint16 + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(&dimensions)), 0, 0, 0) + if err != 0 { + return 0, 0, err + } + return int(dimensions[1]), int(dimensions[0]), nil +} diff --git a/vendor/github.com/chzyer/readline/term_windows.go b/vendor/github.com/chzyer/readline/term_windows.go new file mode 100644 index 00000000000..1290e00bc14 --- /dev/null +++ b/vendor/github.com/chzyer/readline/term_windows.go @@ -0,0 +1,171 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +// Package terminal provides support functions for dealing with terminals, as +// commonly found on UNIX systems. +// +// Putting a terminal into raw mode is the most common requirement: +// +// oldState, err := terminal.MakeRaw(0) +// if err != nil { +// panic(err) +// } +// defer terminal.Restore(0, oldState) +package readline + +import ( + "io" + "syscall" + "unsafe" +) + +const ( + enableLineInput = 2 + enableEchoInput = 4 + enableProcessedInput = 1 + enableWindowInput = 8 + enableMouseInput = 16 + enableInsertMode = 32 + enableQuickEditMode = 64 + enableExtendedFlags = 128 + enableAutoPosition = 256 + enableProcessedOutput = 1 + enableWrapAtEolOutput = 2 +) + +var kernel32 = syscall.NewLazyDLL("kernel32.dll") + +var ( + procGetConsoleMode = kernel32.NewProc("GetConsoleMode") + procSetConsoleMode = kernel32.NewProc("SetConsoleMode") + procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") +) + +type ( + coord struct { + x short + y short + } + smallRect struct { + left short + top short + right short + bottom short + } + consoleScreenBufferInfo struct { + size coord + cursorPosition coord + attributes word + window smallRect + maximumWindowSize coord + } +) + +type State struct { + mode uint32 +} + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd int) bool { + var st uint32 + r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) + return r != 0 && e == 0 +} + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd int) (*State, error) { + var st uint32 + _, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) + if e != 0 { + return nil, error(e) + } + raw := st &^ (enableEchoInput | enableProcessedInput | enableLineInput | enableProcessedOutput) + _, _, e = syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(raw), 0) + if e != 0 { + return nil, error(e) + } + return &State{st}, nil +} + +// GetState returns the current state of a terminal which may be useful to +// restore the terminal after a signal. +func GetState(fd int) (*State, error) { + var st uint32 + _, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) + if e != 0 { + return nil, error(e) + } + return &State{st}, nil +} + +// Restore restores the terminal connected to the given file descriptor to a +// previous state. +func restoreTerm(fd int, state *State) error { + _, _, err := syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(state.mode), 0) + return err +} + +// GetSize returns the dimensions of the given terminal. +func GetSize(fd int) (width, height int, err error) { + var info consoleScreenBufferInfo + _, _, e := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&info)), 0) + if e != 0 { + return 0, 0, error(e) + } + return int(info.size.x), int(info.size.y), nil +} + +// ReadPassword reads a line of input from a terminal without local echo. This +// is commonly used for inputting passwords and other sensitive data. The slice +// returned does not include the \n. +func ReadPassword(fd int) ([]byte, error) { + var st uint32 + _, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) + if e != 0 { + return nil, error(e) + } + old := st + + st &^= (enableEchoInput) + st |= (enableProcessedInput | enableLineInput | enableProcessedOutput) + _, _, e = syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(st), 0) + if e != 0 { + return nil, error(e) + } + + defer func() { + syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(old), 0) + }() + + var buf [16]byte + var ret []byte + for { + n, err := syscall.Read(syscall.Handle(fd), buf[:]) + if err != nil { + return nil, err + } + if n == 0 { + if len(ret) == 0 { + return nil, io.EOF + } + break + } + if buf[n-1] == '\n' { + n-- + } + if n > 0 && buf[n-1] == '\r' { + n-- + } + ret = append(ret, buf[:n]...) + if n < len(buf) { + break + } + } + + return ret, nil +} diff --git a/vendor/github.com/chzyer/readline/terminal.go b/vendor/github.com/chzyer/readline/terminal.go new file mode 100644 index 00000000000..1078631c14a --- /dev/null +++ b/vendor/github.com/chzyer/readline/terminal.go @@ -0,0 +1,238 @@ +package readline + +import ( + "bufio" + "fmt" + "io" + "strings" + "sync" + "sync/atomic" +) + +type Terminal struct { + m sync.Mutex + cfg *Config + outchan chan rune + closed int32 + stopChan chan struct{} + kickChan chan struct{} + wg sync.WaitGroup + isReading int32 + sleeping int32 + + sizeChan chan string +} + +func NewTerminal(cfg *Config) (*Terminal, error) { + if err := cfg.Init(); err != nil { + return nil, err + } + t := &Terminal{ + cfg: cfg, + kickChan: make(chan struct{}, 1), + outchan: make(chan rune), + stopChan: make(chan struct{}, 1), + sizeChan: make(chan string, 1), + } + + go t.ioloop() + return t, nil +} + +// SleepToResume will sleep myself, and return only if I'm resumed. +func (t *Terminal) SleepToResume() { + if !atomic.CompareAndSwapInt32(&t.sleeping, 0, 1) { + return + } + defer atomic.StoreInt32(&t.sleeping, 0) + + t.ExitRawMode() + ch := WaitForResume() + SuspendMe() + <-ch + t.EnterRawMode() +} + +func (t *Terminal) EnterRawMode() (err error) { + return t.cfg.FuncMakeRaw() +} + +func (t *Terminal) ExitRawMode() (err error) { + return t.cfg.FuncExitRaw() +} + +func (t *Terminal) Write(b []byte) (int, error) { + return t.cfg.Stdout.Write(b) +} + +// WriteStdin prefill the next Stdin fetch +// Next time you call ReadLine() this value will be writen before the user input +func (t *Terminal) WriteStdin(b []byte) (int, error) { + return t.cfg.StdinWriter.Write(b) +} + +type termSize struct { + left int + top int +} + +func (t *Terminal) GetOffset(f func(offset string)) { + go func() { + f(<-t.sizeChan) + }() + t.Write([]byte("\033[6n")) +} + +func (t *Terminal) Print(s string) { + fmt.Fprintf(t.cfg.Stdout, "%s", s) +} + +func (t *Terminal) PrintRune(r rune) { + fmt.Fprintf(t.cfg.Stdout, "%c", r) +} + +func (t *Terminal) Readline() *Operation { + return NewOperation(t, t.cfg) +} + +// return rune(0) if meet EOF +func (t *Terminal) ReadRune() rune { + ch, ok := <-t.outchan + if !ok { + return rune(0) + } + return ch +} + +func (t *Terminal) IsReading() bool { + return atomic.LoadInt32(&t.isReading) == 1 +} + +func (t *Terminal) KickRead() { + select { + case t.kickChan <- struct{}{}: + default: + } +} + +func (t *Terminal) ioloop() { + t.wg.Add(1) + defer func() { + t.wg.Done() + close(t.outchan) + }() + + var ( + isEscape bool + isEscapeEx bool + expectNextChar bool + ) + + buf := bufio.NewReader(t.getStdin()) + for { + if !expectNextChar { + atomic.StoreInt32(&t.isReading, 0) + select { + case <-t.kickChan: + atomic.StoreInt32(&t.isReading, 1) + case <-t.stopChan: + return + } + } + expectNextChar = false + r, _, err := buf.ReadRune() + if err != nil { + if strings.Contains(err.Error(), "interrupted system call") { + expectNextChar = true + continue + } + break + } + + if isEscape { + isEscape = false + if r == CharEscapeEx { + expectNextChar = true + isEscapeEx = true + continue + } + r = escapeKey(r, buf) + } else if isEscapeEx { + isEscapeEx = false + if key := readEscKey(r, buf); key != nil { + r = escapeExKey(key) + // offset + if key.typ == 'R' { + if _, _, ok := key.Get2(); ok { + select { + case t.sizeChan <- key.attr: + default: + } + } + expectNextChar = true + continue + } + } + if r == 0 { + expectNextChar = true + continue + } + } + + expectNextChar = true + switch r { + case CharEsc: + if t.cfg.VimMode { + t.outchan <- r + break + } + isEscape = true + case CharInterrupt, CharEnter, CharCtrlJ, CharDelete: + expectNextChar = false + fallthrough + default: + t.outchan <- r + } + } + +} + +func (t *Terminal) Bell() { + fmt.Fprintf(t, "%c", CharBell) +} + +func (t *Terminal) Close() error { + if atomic.SwapInt32(&t.closed, 1) != 0 { + return nil + } + if closer, ok := t.cfg.Stdin.(io.Closer); ok { + closer.Close() + } + close(t.stopChan) + t.wg.Wait() + return t.ExitRawMode() +} + +func (t *Terminal) GetConfig() *Config { + t.m.Lock() + cfg := *t.cfg + t.m.Unlock() + return &cfg +} + +func (t *Terminal) getStdin() io.Reader { + t.m.Lock() + r := t.cfg.Stdin + t.m.Unlock() + return r +} + +func (t *Terminal) SetConfig(c *Config) error { + if err := c.Init(); err != nil { + return err + } + t.m.Lock() + t.cfg = c + t.m.Unlock() + return nil +} diff --git a/vendor/github.com/chzyer/readline/utils.go b/vendor/github.com/chzyer/readline/utils.go new file mode 100644 index 00000000000..af4e005216f --- /dev/null +++ b/vendor/github.com/chzyer/readline/utils.go @@ -0,0 +1,277 @@ +package readline + +import ( + "bufio" + "bytes" + "container/list" + "fmt" + "os" + "strconv" + "strings" + "sync" + "time" + "unicode" +) + +var ( + isWindows = false +) + +const ( + CharLineStart = 1 + CharBackward = 2 + CharInterrupt = 3 + CharDelete = 4 + CharLineEnd = 5 + CharForward = 6 + CharBell = 7 + CharCtrlH = 8 + CharTab = 9 + CharCtrlJ = 10 + CharKill = 11 + CharCtrlL = 12 + CharEnter = 13 + CharNext = 14 + CharPrev = 16 + CharBckSearch = 18 + CharFwdSearch = 19 + CharTranspose = 20 + CharCtrlU = 21 + CharCtrlW = 23 + CharCtrlY = 25 + CharCtrlZ = 26 + CharEsc = 27 + CharEscapeEx = 91 + CharBackspace = 127 +) + +const ( + MetaBackward rune = -iota - 1 + MetaForward + MetaDelete + MetaBackspace + MetaTranspose +) + +// WaitForResume need to call before current process got suspend. +// It will run a ticker until a long duration is occurs, +// which means this process is resumed. +func WaitForResume() chan struct{} { + ch := make(chan struct{}) + var wg sync.WaitGroup + wg.Add(1) + go func() { + ticker := time.NewTicker(10 * time.Millisecond) + t := time.Now() + wg.Done() + for { + now := <-ticker.C + if now.Sub(t) > 100*time.Millisecond { + break + } + t = now + } + ticker.Stop() + ch <- struct{}{} + }() + wg.Wait() + return ch +} + +func Restore(fd int, state *State) error { + err := restoreTerm(fd, state) + if err != nil { + // errno 0 means everything is ok :) + if err.Error() == "errno 0" { + return nil + } else { + return err + } + } + return nil +} + +func IsPrintable(key rune) bool { + isInSurrogateArea := key >= 0xd800 && key <= 0xdbff + return key >= 32 && !isInSurrogateArea +} + +// translate Esc[X +func escapeExKey(key *escapeKeyPair) rune { + var r rune + switch key.typ { + case 'D': + r = CharBackward + case 'C': + r = CharForward + case 'A': + r = CharPrev + case 'B': + r = CharNext + case 'H': + r = CharLineStart + case 'F': + r = CharLineEnd + case '~': + if key.attr == "3" { + r = CharDelete + } + default: + } + return r +} + +type escapeKeyPair struct { + attr string + typ rune +} + +func (e *escapeKeyPair) Get2() (int, int, bool) { + sp := strings.Split(e.attr, ";") + if len(sp) < 2 { + return -1, -1, false + } + s1, err := strconv.Atoi(sp[0]) + if err != nil { + return -1, -1, false + } + s2, err := strconv.Atoi(sp[1]) + if err != nil { + return -1, -1, false + } + return s1, s2, true +} + +func readEscKey(r rune, reader *bufio.Reader) *escapeKeyPair { + p := escapeKeyPair{} + buf := bytes.NewBuffer(nil) + for { + if r == ';' { + } else if unicode.IsNumber(r) { + } else { + p.typ = r + break + } + buf.WriteRune(r) + r, _, _ = reader.ReadRune() + } + p.attr = buf.String() + return &p +} + +// translate EscX to Meta+X +func escapeKey(r rune, reader *bufio.Reader) rune { + switch r { + case 'b': + r = MetaBackward + case 'f': + r = MetaForward + case 'd': + r = MetaDelete + case CharTranspose: + r = MetaTranspose + case CharBackspace: + r = MetaBackspace + case 'O': + d, _, _ := reader.ReadRune() + switch d { + case 'H': + r = CharLineStart + case 'F': + r = CharLineEnd + default: + reader.UnreadRune() + } + case CharEsc: + + } + return r +} + +func SplitByLine(start, screenWidth int, rs []rune) []string { + var ret []string + buf := bytes.NewBuffer(nil) + currentWidth := start + for _, r := range rs { + w := runes.Width(r) + currentWidth += w + buf.WriteRune(r) + if currentWidth >= screenWidth { + ret = append(ret, buf.String()) + buf.Reset() + currentWidth = 0 + } + } + ret = append(ret, buf.String()) + return ret +} + +// calculate how many lines for N character +func LineCount(screenWidth, w int) int { + r := w / screenWidth + if w%screenWidth != 0 { + r++ + } + return r +} + +func IsWordBreak(i rune) bool { + switch { + case i >= 'a' && i <= 'z': + case i >= 'A' && i <= 'Z': + case i >= '0' && i <= '9': + default: + return true + } + return false +} + +func GetInt(s []string, def int) int { + if len(s) == 0 { + return def + } + c, err := strconv.Atoi(s[0]) + if err != nil { + return def + } + return c +} + +type RawMode struct { + state *State +} + +func (r *RawMode) Enter() (err error) { + r.state, err = MakeRaw(GetStdin()) + return err +} + +func (r *RawMode) Exit() error { + if r.state == nil { + return nil + } + return Restore(GetStdin(), r.state) +} + +// ----------------------------------------------------------------------------- + +func sleep(n int) { + Debug(n) + time.Sleep(2000 * time.Millisecond) +} + +// print a linked list to Debug() +func debugList(l *list.List) { + idx := 0 + for e := l.Front(); e != nil; e = e.Next() { + Debug(idx, fmt.Sprintf("%+v", e.Value)) + idx++ + } +} + +// append log info to another file +func Debug(o ...interface{}) { + f, _ := os.OpenFile("debug.tmp", os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666) + fmt.Fprintln(f, o...) + f.Close() +} diff --git a/vendor/github.com/chzyer/readline/utils_unix.go b/vendor/github.com/chzyer/readline/utils_unix.go new file mode 100644 index 00000000000..f88dac97bd7 --- /dev/null +++ b/vendor/github.com/chzyer/readline/utils_unix.go @@ -0,0 +1,83 @@ +// +build darwin dragonfly freebsd linux,!appengine netbsd openbsd solaris + +package readline + +import ( + "io" + "os" + "os/signal" + "sync" + "syscall" +) + +type winsize struct { + Row uint16 + Col uint16 + Xpixel uint16 + Ypixel uint16 +} + +// SuspendMe use to send suspend signal to myself, when we in the raw mode. +// For OSX it need to send to parent's pid +// For Linux it need to send to myself +func SuspendMe() { + p, _ := os.FindProcess(os.Getppid()) + p.Signal(syscall.SIGTSTP) + p, _ = os.FindProcess(os.Getpid()) + p.Signal(syscall.SIGTSTP) +} + +// get width of the terminal +func getWidth(stdoutFd int) int { + cols, _, err := GetSize(stdoutFd) + if err != nil { + return -1 + } + return cols +} + +func GetScreenWidth() int { + w := getWidth(syscall.Stdout) + if w < 0 { + w = getWidth(syscall.Stderr) + } + return w +} + +// ClearScreen clears the console screen +func ClearScreen(w io.Writer) (int, error) { + return w.Write([]byte("\033[H")) +} + +func DefaultIsTerminal() bool { + return IsTerminal(syscall.Stdin) && (IsTerminal(syscall.Stdout) || IsTerminal(syscall.Stderr)) +} + +func GetStdin() int { + return syscall.Stdin +} + +// ----------------------------------------------------------------------------- + +var ( + widthChange sync.Once + widthChangeCallback func() +) + +func DefaultOnWidthChanged(f func()) { + widthChangeCallback = f + widthChange.Do(func() { + ch := make(chan os.Signal, 1) + signal.Notify(ch, syscall.SIGWINCH) + + go func() { + for { + _, ok := <-ch + if !ok { + break + } + widthChangeCallback() + } + }() + }) +} diff --git a/vendor/github.com/chzyer/readline/utils_windows.go b/vendor/github.com/chzyer/readline/utils_windows.go new file mode 100644 index 00000000000..5bfa55dcce8 --- /dev/null +++ b/vendor/github.com/chzyer/readline/utils_windows.go @@ -0,0 +1,41 @@ +// +build windows + +package readline + +import ( + "io" + "syscall" +) + +func SuspendMe() { +} + +func GetStdin() int { + return int(syscall.Stdin) +} + +func init() { + isWindows = true +} + +// get width of the terminal +func GetScreenWidth() int { + info, _ := GetConsoleScreenBufferInfo() + if info == nil { + return -1 + } + return int(info.dwSize.x) +} + +// ClearScreen clears the console screen +func ClearScreen(_ io.Writer) error { + return SetConsoleCursorPosition(&_COORD{0, 0}) +} + +func DefaultIsTerminal() bool { + return true +} + +func DefaultOnWidthChanged(func()) { + +} diff --git a/vendor/github.com/chzyer/readline/vim.go b/vendor/github.com/chzyer/readline/vim.go new file mode 100644 index 00000000000..bedf2c1a693 --- /dev/null +++ b/vendor/github.com/chzyer/readline/vim.go @@ -0,0 +1,176 @@ +package readline + +const ( + VIM_NORMAL = iota + VIM_INSERT + VIM_VISUAL +) + +type opVim struct { + cfg *Config + op *Operation + vimMode int +} + +func newVimMode(op *Operation) *opVim { + ov := &opVim{ + cfg: op.cfg, + op: op, + } + ov.SetVimMode(ov.cfg.VimMode) + return ov +} + +func (o *opVim) SetVimMode(on bool) { + if o.cfg.VimMode && !on { // turn off + o.ExitVimMode() + } + o.cfg.VimMode = on + o.vimMode = VIM_INSERT +} + +func (o *opVim) ExitVimMode() { + o.vimMode = VIM_INSERT +} + +func (o *opVim) IsEnableVimMode() bool { + return o.cfg.VimMode +} + +func (o *opVim) handleVimNormalMovement(r rune, readNext func() rune) (t rune, handled bool) { + rb := o.op.buf + handled = true + switch r { + case 'h': + t = CharBackward + case 'j': + t = CharNext + case 'k': + t = CharPrev + case 'l': + t = CharForward + case '0', '^': + rb.MoveToLineStart() + case '$': + rb.MoveToLineEnd() + case 'x': + rb.Delete() + if rb.IsCursorInEnd() { + rb.MoveBackward() + } + case 'r': + rb.Replace(readNext()) + case 'd': + next := readNext() + switch next { + case 'd': + rb.Erase() + case 'w': + rb.DeleteWord() + case 'h': + rb.Backspace() + case 'l': + rb.Delete() + } + case 'p': + rb.Yank() + case 'b', 'B': + rb.MoveToPrevWord() + case 'w', 'W': + rb.MoveToNextWord() + case 'e', 'E': + rb.MoveToEndWord() + case 'f', 'F', 't', 'T': + next := readNext() + prevChar := r == 't' || r == 'T' + reverse := r == 'F' || r == 'T' + switch next { + case CharEsc: + default: + rb.MoveTo(next, prevChar, reverse) + } + default: + return r, false + } + return t, true +} + +func (o *opVim) handleVimNormalEnterInsert(r rune, readNext func() rune) (t rune, handled bool) { + rb := o.op.buf + handled = true + switch r { + case 'i': + case 'I': + rb.MoveToLineStart() + case 'a': + rb.MoveForward() + case 'A': + rb.MoveToLineEnd() + case 's': + rb.Delete() + case 'S': + rb.Erase() + case 'c': + next := readNext() + switch next { + case 'c': + rb.Erase() + case 'w': + rb.DeleteWord() + case 'h': + rb.Backspace() + case 'l': + rb.Delete() + } + default: + return r, false + } + + o.EnterVimInsertMode() + return +} + +func (o *opVim) HandleVimNormal(r rune, readNext func() rune) (t rune) { + switch r { + case CharEnter, CharInterrupt: + o.ExitVimMode() + return r + } + + if r, handled := o.handleVimNormalMovement(r, readNext); handled { + return r + } + + if r, handled := o.handleVimNormalEnterInsert(r, readNext); handled { + return r + } + + // invalid operation + o.op.t.Bell() + return 0 +} + +func (o *opVim) EnterVimInsertMode() { + o.vimMode = VIM_INSERT +} + +func (o *opVim) ExitVimInsertMode() { + o.vimMode = VIM_NORMAL +} + +func (o *opVim) HandleVim(r rune, readNext func() rune) rune { + if o.vimMode == VIM_NORMAL { + return o.HandleVimNormal(r, readNext) + } + if r == CharEsc { + o.ExitVimInsertMode() + return 0 + } + + switch o.vimMode { + case VIM_INSERT: + return r + case VIM_VISUAL: + } + return r +} diff --git a/vendor/github.com/chzyer/readline/windows_api.go b/vendor/github.com/chzyer/readline/windows_api.go new file mode 100644 index 00000000000..63f4f7b78fc --- /dev/null +++ b/vendor/github.com/chzyer/readline/windows_api.go @@ -0,0 +1,152 @@ +// +build windows + +package readline + +import ( + "reflect" + "syscall" + "unsafe" +) + +var ( + kernel = NewKernel() + stdout = uintptr(syscall.Stdout) + stdin = uintptr(syscall.Stdin) +) + +type Kernel struct { + SetConsoleCursorPosition, + SetConsoleTextAttribute, + FillConsoleOutputCharacterW, + FillConsoleOutputAttribute, + ReadConsoleInputW, + GetConsoleScreenBufferInfo, + GetConsoleCursorInfo, + GetStdHandle CallFunc +} + +type short int16 +type word uint16 +type dword uint32 +type wchar uint16 + +type _COORD struct { + x short + y short +} + +func (c *_COORD) ptr() uintptr { + return uintptr(*(*int32)(unsafe.Pointer(c))) +} + +const ( + EVENT_KEY = 0x0001 + EVENT_MOUSE = 0x0002 + EVENT_WINDOW_BUFFER_SIZE = 0x0004 + EVENT_MENU = 0x0008 + EVENT_FOCUS = 0x0010 +) + +type _KEY_EVENT_RECORD struct { + bKeyDown int32 + wRepeatCount word + wVirtualKeyCode word + wVirtualScanCode word + unicodeChar wchar + dwControlKeyState dword +} + +// KEY_EVENT_RECORD KeyEvent; +// MOUSE_EVENT_RECORD MouseEvent; +// WINDOW_BUFFER_SIZE_RECORD WindowBufferSizeEvent; +// MENU_EVENT_RECORD MenuEvent; +// FOCUS_EVENT_RECORD FocusEvent; +type _INPUT_RECORD struct { + EventType word + Padding uint16 + Event [16]byte +} + +type _CONSOLE_SCREEN_BUFFER_INFO struct { + dwSize _COORD + dwCursorPosition _COORD + wAttributes word + srWindow _SMALL_RECT + dwMaximumWindowSize _COORD +} + +type _SMALL_RECT struct { + left short + top short + right short + bottom short +} + +type _CONSOLE_CURSOR_INFO struct { + dwSize dword + bVisible bool +} + +type CallFunc func(u ...uintptr) error + +func NewKernel() *Kernel { + k := &Kernel{} + kernel32 := syscall.NewLazyDLL("kernel32.dll") + v := reflect.ValueOf(k).Elem() + t := v.Type() + for i := 0; i < t.NumField(); i++ { + name := t.Field(i).Name + f := kernel32.NewProc(name) + v.Field(i).Set(reflect.ValueOf(k.Wrap(f))) + } + return k +} + +func (k *Kernel) Wrap(p *syscall.LazyProc) CallFunc { + return func(args ...uintptr) error { + var r0 uintptr + var e1 syscall.Errno + size := uintptr(len(args)) + if len(args) <= 3 { + buf := make([]uintptr, 3) + copy(buf, args) + r0, _, e1 = syscall.Syscall(p.Addr(), size, + buf[0], buf[1], buf[2]) + } else { + buf := make([]uintptr, 6) + copy(buf, args) + r0, _, e1 = syscall.Syscall6(p.Addr(), size, + buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], + ) + } + + if int(r0) == 0 { + if e1 != 0 { + return error(e1) + } else { + return syscall.EINVAL + } + } + return nil + } + +} + +func GetConsoleScreenBufferInfo() (*_CONSOLE_SCREEN_BUFFER_INFO, error) { + t := new(_CONSOLE_SCREEN_BUFFER_INFO) + err := kernel.GetConsoleScreenBufferInfo( + stdout, + uintptr(unsafe.Pointer(t)), + ) + return t, err +} + +func GetConsoleCursorInfo() (*_CONSOLE_CURSOR_INFO, error) { + t := new(_CONSOLE_CURSOR_INFO) + err := kernel.GetConsoleCursorInfo(stdout, uintptr(unsafe.Pointer(t))) + return t, err +} + +func SetConsoleCursorPosition(c *_COORD) error { + return kernel.SetConsoleCursorPosition(stdout, c.ptr()) +} diff --git a/vendor/github.com/containerd/cgroups/LICENSE b/vendor/github.com/containerd/cgroups/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/vendor/github.com/containerd/cgroups/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containerd/cgroups/stats/v1/doc.go b/vendor/github.com/containerd/cgroups/stats/v1/doc.go new file mode 100644 index 00000000000..23f3cdd4b37 --- /dev/null +++ b/vendor/github.com/containerd/cgroups/stats/v1/doc.go @@ -0,0 +1,17 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package v1 diff --git a/vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.go b/vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.go new file mode 100644 index 00000000000..6d2d41770b9 --- /dev/null +++ b/vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.go @@ -0,0 +1,6125 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/containerd/cgroups/stats/v1/metrics.proto + +package v1 + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type Metrics struct { + Hugetlb []*HugetlbStat `protobuf:"bytes,1,rep,name=hugetlb,proto3" json:"hugetlb,omitempty"` + Pids *PidsStat `protobuf:"bytes,2,opt,name=pids,proto3" json:"pids,omitempty"` + CPU *CPUStat `protobuf:"bytes,3,opt,name=cpu,proto3" json:"cpu,omitempty"` + Memory *MemoryStat `protobuf:"bytes,4,opt,name=memory,proto3" json:"memory,omitempty"` + Blkio *BlkIOStat `protobuf:"bytes,5,opt,name=blkio,proto3" json:"blkio,omitempty"` + Rdma *RdmaStat `protobuf:"bytes,6,opt,name=rdma,proto3" json:"rdma,omitempty"` + Network []*NetworkStat `protobuf:"bytes,7,rep,name=network,proto3" json:"network,omitempty"` + CgroupStats *CgroupStats `protobuf:"bytes,8,opt,name=cgroup_stats,json=cgroupStats,proto3" json:"cgroup_stats,omitempty"` + MemoryOomControl *MemoryOomControl `protobuf:"bytes,9,opt,name=memory_oom_control,json=memoryOomControl,proto3" json:"memory_oom_control,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Metrics) Reset() { *m = Metrics{} } +func (*Metrics) ProtoMessage() {} +func (*Metrics) Descriptor() ([]byte, []int) { + return fileDescriptor_a17b2d87c332bfaa, []int{0} +} +func (m *Metrics) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Metrics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Metrics.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Metrics) XXX_Merge(src proto.Message) { + xxx_messageInfo_Metrics.Merge(m, src) +} +func (m *Metrics) XXX_Size() int { + return m.Size() +} +func (m *Metrics) XXX_DiscardUnknown() { + xxx_messageInfo_Metrics.DiscardUnknown(m) +} + +var xxx_messageInfo_Metrics proto.InternalMessageInfo + +type HugetlbStat struct { + Usage uint64 `protobuf:"varint,1,opt,name=usage,proto3" json:"usage,omitempty"` + Max uint64 `protobuf:"varint,2,opt,name=max,proto3" json:"max,omitempty"` + Failcnt uint64 `protobuf:"varint,3,opt,name=failcnt,proto3" json:"failcnt,omitempty"` + Pagesize string `protobuf:"bytes,4,opt,name=pagesize,proto3" json:"pagesize,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HugetlbStat) Reset() { *m = HugetlbStat{} } +func (*HugetlbStat) ProtoMessage() {} +func (*HugetlbStat) Descriptor() ([]byte, []int) { + return fileDescriptor_a17b2d87c332bfaa, []int{1} +} +func (m *HugetlbStat) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HugetlbStat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_HugetlbStat.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *HugetlbStat) XXX_Merge(src proto.Message) { + xxx_messageInfo_HugetlbStat.Merge(m, src) +} +func (m *HugetlbStat) XXX_Size() int { + return m.Size() +} +func (m *HugetlbStat) XXX_DiscardUnknown() { + xxx_messageInfo_HugetlbStat.DiscardUnknown(m) +} + +var xxx_messageInfo_HugetlbStat proto.InternalMessageInfo + +type PidsStat struct { + Current uint64 `protobuf:"varint,1,opt,name=current,proto3" json:"current,omitempty"` + Limit uint64 `protobuf:"varint,2,opt,name=limit,proto3" json:"limit,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PidsStat) Reset() { *m = PidsStat{} } +func (*PidsStat) ProtoMessage() {} +func (*PidsStat) Descriptor() ([]byte, []int) { + return fileDescriptor_a17b2d87c332bfaa, []int{2} +} +func (m *PidsStat) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PidsStat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PidsStat.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PidsStat) XXX_Merge(src proto.Message) { + xxx_messageInfo_PidsStat.Merge(m, src) +} +func (m *PidsStat) XXX_Size() int { + return m.Size() +} +func (m *PidsStat) XXX_DiscardUnknown() { + xxx_messageInfo_PidsStat.DiscardUnknown(m) +} + +var xxx_messageInfo_PidsStat proto.InternalMessageInfo + +type CPUStat struct { + Usage *CPUUsage `protobuf:"bytes,1,opt,name=usage,proto3" json:"usage,omitempty"` + Throttling *Throttle `protobuf:"bytes,2,opt,name=throttling,proto3" json:"throttling,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CPUStat) Reset() { *m = CPUStat{} } +func (*CPUStat) ProtoMessage() {} +func (*CPUStat) Descriptor() ([]byte, []int) { + return fileDescriptor_a17b2d87c332bfaa, []int{3} +} +func (m *CPUStat) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CPUStat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CPUStat.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CPUStat) XXX_Merge(src proto.Message) { + xxx_messageInfo_CPUStat.Merge(m, src) +} +func (m *CPUStat) XXX_Size() int { + return m.Size() +} +func (m *CPUStat) XXX_DiscardUnknown() { + xxx_messageInfo_CPUStat.DiscardUnknown(m) +} + +var xxx_messageInfo_CPUStat proto.InternalMessageInfo + +type CPUUsage struct { + // values in nanoseconds + Total uint64 `protobuf:"varint,1,opt,name=total,proto3" json:"total,omitempty"` + Kernel uint64 `protobuf:"varint,2,opt,name=kernel,proto3" json:"kernel,omitempty"` + User uint64 `protobuf:"varint,3,opt,name=user,proto3" json:"user,omitempty"` + PerCPU []uint64 `protobuf:"varint,4,rep,packed,name=per_cpu,json=perCpu,proto3" json:"per_cpu,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CPUUsage) Reset() { *m = CPUUsage{} } +func (*CPUUsage) ProtoMessage() {} +func (*CPUUsage) Descriptor() ([]byte, []int) { + return fileDescriptor_a17b2d87c332bfaa, []int{4} +} +func (m *CPUUsage) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CPUUsage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CPUUsage.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CPUUsage) XXX_Merge(src proto.Message) { + xxx_messageInfo_CPUUsage.Merge(m, src) +} +func (m *CPUUsage) XXX_Size() int { + return m.Size() +} +func (m *CPUUsage) XXX_DiscardUnknown() { + xxx_messageInfo_CPUUsage.DiscardUnknown(m) +} + +var xxx_messageInfo_CPUUsage proto.InternalMessageInfo + +type Throttle struct { + Periods uint64 `protobuf:"varint,1,opt,name=periods,proto3" json:"periods,omitempty"` + ThrottledPeriods uint64 `protobuf:"varint,2,opt,name=throttled_periods,json=throttledPeriods,proto3" json:"throttled_periods,omitempty"` + ThrottledTime uint64 `protobuf:"varint,3,opt,name=throttled_time,json=throttledTime,proto3" json:"throttled_time,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Throttle) Reset() { *m = Throttle{} } +func (*Throttle) ProtoMessage() {} +func (*Throttle) Descriptor() ([]byte, []int) { + return fileDescriptor_a17b2d87c332bfaa, []int{5} +} +func (m *Throttle) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Throttle) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Throttle.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Throttle) XXX_Merge(src proto.Message) { + xxx_messageInfo_Throttle.Merge(m, src) +} +func (m *Throttle) XXX_Size() int { + return m.Size() +} +func (m *Throttle) XXX_DiscardUnknown() { + xxx_messageInfo_Throttle.DiscardUnknown(m) +} + +var xxx_messageInfo_Throttle proto.InternalMessageInfo + +type MemoryStat struct { + Cache uint64 `protobuf:"varint,1,opt,name=cache,proto3" json:"cache,omitempty"` + RSS uint64 `protobuf:"varint,2,opt,name=rss,proto3" json:"rss,omitempty"` + RSSHuge uint64 `protobuf:"varint,3,opt,name=rss_huge,json=rssHuge,proto3" json:"rss_huge,omitempty"` + MappedFile uint64 `protobuf:"varint,4,opt,name=mapped_file,json=mappedFile,proto3" json:"mapped_file,omitempty"` + Dirty uint64 `protobuf:"varint,5,opt,name=dirty,proto3" json:"dirty,omitempty"` + Writeback uint64 `protobuf:"varint,6,opt,name=writeback,proto3" json:"writeback,omitempty"` + PgPgIn uint64 `protobuf:"varint,7,opt,name=pg_pg_in,json=pgPgIn,proto3" json:"pg_pg_in,omitempty"` + PgPgOut uint64 `protobuf:"varint,8,opt,name=pg_pg_out,json=pgPgOut,proto3" json:"pg_pg_out,omitempty"` + PgFault uint64 `protobuf:"varint,9,opt,name=pg_fault,json=pgFault,proto3" json:"pg_fault,omitempty"` + PgMajFault uint64 `protobuf:"varint,10,opt,name=pg_maj_fault,json=pgMajFault,proto3" json:"pg_maj_fault,omitempty"` + InactiveAnon uint64 `protobuf:"varint,11,opt,name=inactive_anon,json=inactiveAnon,proto3" json:"inactive_anon,omitempty"` + ActiveAnon uint64 `protobuf:"varint,12,opt,name=active_anon,json=activeAnon,proto3" json:"active_anon,omitempty"` + InactiveFile uint64 `protobuf:"varint,13,opt,name=inactive_file,json=inactiveFile,proto3" json:"inactive_file,omitempty"` + ActiveFile uint64 `protobuf:"varint,14,opt,name=active_file,json=activeFile,proto3" json:"active_file,omitempty"` + Unevictable uint64 `protobuf:"varint,15,opt,name=unevictable,proto3" json:"unevictable,omitempty"` + HierarchicalMemoryLimit uint64 `protobuf:"varint,16,opt,name=hierarchical_memory_limit,json=hierarchicalMemoryLimit,proto3" json:"hierarchical_memory_limit,omitempty"` + HierarchicalSwapLimit uint64 `protobuf:"varint,17,opt,name=hierarchical_swap_limit,json=hierarchicalSwapLimit,proto3" json:"hierarchical_swap_limit,omitempty"` + TotalCache uint64 `protobuf:"varint,18,opt,name=total_cache,json=totalCache,proto3" json:"total_cache,omitempty"` + TotalRSS uint64 `protobuf:"varint,19,opt,name=total_rss,json=totalRss,proto3" json:"total_rss,omitempty"` + TotalRSSHuge uint64 `protobuf:"varint,20,opt,name=total_rss_huge,json=totalRssHuge,proto3" json:"total_rss_huge,omitempty"` + TotalMappedFile uint64 `protobuf:"varint,21,opt,name=total_mapped_file,json=totalMappedFile,proto3" json:"total_mapped_file,omitempty"` + TotalDirty uint64 `protobuf:"varint,22,opt,name=total_dirty,json=totalDirty,proto3" json:"total_dirty,omitempty"` + TotalWriteback uint64 `protobuf:"varint,23,opt,name=total_writeback,json=totalWriteback,proto3" json:"total_writeback,omitempty"` + TotalPgPgIn uint64 `protobuf:"varint,24,opt,name=total_pg_pg_in,json=totalPgPgIn,proto3" json:"total_pg_pg_in,omitempty"` + TotalPgPgOut uint64 `protobuf:"varint,25,opt,name=total_pg_pg_out,json=totalPgPgOut,proto3" json:"total_pg_pg_out,omitempty"` + TotalPgFault uint64 `protobuf:"varint,26,opt,name=total_pg_fault,json=totalPgFault,proto3" json:"total_pg_fault,omitempty"` + TotalPgMajFault uint64 `protobuf:"varint,27,opt,name=total_pg_maj_fault,json=totalPgMajFault,proto3" json:"total_pg_maj_fault,omitempty"` + TotalInactiveAnon uint64 `protobuf:"varint,28,opt,name=total_inactive_anon,json=totalInactiveAnon,proto3" json:"total_inactive_anon,omitempty"` + TotalActiveAnon uint64 `protobuf:"varint,29,opt,name=total_active_anon,json=totalActiveAnon,proto3" json:"total_active_anon,omitempty"` + TotalInactiveFile uint64 `protobuf:"varint,30,opt,name=total_inactive_file,json=totalInactiveFile,proto3" json:"total_inactive_file,omitempty"` + TotalActiveFile uint64 `protobuf:"varint,31,opt,name=total_active_file,json=totalActiveFile,proto3" json:"total_active_file,omitempty"` + TotalUnevictable uint64 `protobuf:"varint,32,opt,name=total_unevictable,json=totalUnevictable,proto3" json:"total_unevictable,omitempty"` + Usage *MemoryEntry `protobuf:"bytes,33,opt,name=usage,proto3" json:"usage,omitempty"` + Swap *MemoryEntry `protobuf:"bytes,34,opt,name=swap,proto3" json:"swap,omitempty"` + Kernel *MemoryEntry `protobuf:"bytes,35,opt,name=kernel,proto3" json:"kernel,omitempty"` + KernelTCP *MemoryEntry `protobuf:"bytes,36,opt,name=kernel_tcp,json=kernelTcp,proto3" json:"kernel_tcp,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MemoryStat) Reset() { *m = MemoryStat{} } +func (*MemoryStat) ProtoMessage() {} +func (*MemoryStat) Descriptor() ([]byte, []int) { + return fileDescriptor_a17b2d87c332bfaa, []int{6} +} +func (m *MemoryStat) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MemoryStat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MemoryStat.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MemoryStat) XXX_Merge(src proto.Message) { + xxx_messageInfo_MemoryStat.Merge(m, src) +} +func (m *MemoryStat) XXX_Size() int { + return m.Size() +} +func (m *MemoryStat) XXX_DiscardUnknown() { + xxx_messageInfo_MemoryStat.DiscardUnknown(m) +} + +var xxx_messageInfo_MemoryStat proto.InternalMessageInfo + +type MemoryEntry struct { + Limit uint64 `protobuf:"varint,1,opt,name=limit,proto3" json:"limit,omitempty"` + Usage uint64 `protobuf:"varint,2,opt,name=usage,proto3" json:"usage,omitempty"` + Max uint64 `protobuf:"varint,3,opt,name=max,proto3" json:"max,omitempty"` + Failcnt uint64 `protobuf:"varint,4,opt,name=failcnt,proto3" json:"failcnt,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MemoryEntry) Reset() { *m = MemoryEntry{} } +func (*MemoryEntry) ProtoMessage() {} +func (*MemoryEntry) Descriptor() ([]byte, []int) { + return fileDescriptor_a17b2d87c332bfaa, []int{7} +} +func (m *MemoryEntry) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MemoryEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MemoryEntry.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MemoryEntry) XXX_Merge(src proto.Message) { + xxx_messageInfo_MemoryEntry.Merge(m, src) +} +func (m *MemoryEntry) XXX_Size() int { + return m.Size() +} +func (m *MemoryEntry) XXX_DiscardUnknown() { + xxx_messageInfo_MemoryEntry.DiscardUnknown(m) +} + +var xxx_messageInfo_MemoryEntry proto.InternalMessageInfo + +type MemoryOomControl struct { + OomKillDisable uint64 `protobuf:"varint,1,opt,name=oom_kill_disable,json=oomKillDisable,proto3" json:"oom_kill_disable,omitempty"` + UnderOom uint64 `protobuf:"varint,2,opt,name=under_oom,json=underOom,proto3" json:"under_oom,omitempty"` + OomKill uint64 `protobuf:"varint,3,opt,name=oom_kill,json=oomKill,proto3" json:"oom_kill,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MemoryOomControl) Reset() { *m = MemoryOomControl{} } +func (*MemoryOomControl) ProtoMessage() {} +func (*MemoryOomControl) Descriptor() ([]byte, []int) { + return fileDescriptor_a17b2d87c332bfaa, []int{8} +} +func (m *MemoryOomControl) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MemoryOomControl) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MemoryOomControl.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MemoryOomControl) XXX_Merge(src proto.Message) { + xxx_messageInfo_MemoryOomControl.Merge(m, src) +} +func (m *MemoryOomControl) XXX_Size() int { + return m.Size() +} +func (m *MemoryOomControl) XXX_DiscardUnknown() { + xxx_messageInfo_MemoryOomControl.DiscardUnknown(m) +} + +var xxx_messageInfo_MemoryOomControl proto.InternalMessageInfo + +type BlkIOStat struct { + IoServiceBytesRecursive []*BlkIOEntry `protobuf:"bytes,1,rep,name=io_service_bytes_recursive,json=ioServiceBytesRecursive,proto3" json:"io_service_bytes_recursive,omitempty"` + IoServicedRecursive []*BlkIOEntry `protobuf:"bytes,2,rep,name=io_serviced_recursive,json=ioServicedRecursive,proto3" json:"io_serviced_recursive,omitempty"` + IoQueuedRecursive []*BlkIOEntry `protobuf:"bytes,3,rep,name=io_queued_recursive,json=ioQueuedRecursive,proto3" json:"io_queued_recursive,omitempty"` + IoServiceTimeRecursive []*BlkIOEntry `protobuf:"bytes,4,rep,name=io_service_time_recursive,json=ioServiceTimeRecursive,proto3" json:"io_service_time_recursive,omitempty"` + IoWaitTimeRecursive []*BlkIOEntry `protobuf:"bytes,5,rep,name=io_wait_time_recursive,json=ioWaitTimeRecursive,proto3" json:"io_wait_time_recursive,omitempty"` + IoMergedRecursive []*BlkIOEntry `protobuf:"bytes,6,rep,name=io_merged_recursive,json=ioMergedRecursive,proto3" json:"io_merged_recursive,omitempty"` + IoTimeRecursive []*BlkIOEntry `protobuf:"bytes,7,rep,name=io_time_recursive,json=ioTimeRecursive,proto3" json:"io_time_recursive,omitempty"` + SectorsRecursive []*BlkIOEntry `protobuf:"bytes,8,rep,name=sectors_recursive,json=sectorsRecursive,proto3" json:"sectors_recursive,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BlkIOStat) Reset() { *m = BlkIOStat{} } +func (*BlkIOStat) ProtoMessage() {} +func (*BlkIOStat) Descriptor() ([]byte, []int) { + return fileDescriptor_a17b2d87c332bfaa, []int{9} +} +func (m *BlkIOStat) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BlkIOStat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BlkIOStat.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BlkIOStat) XXX_Merge(src proto.Message) { + xxx_messageInfo_BlkIOStat.Merge(m, src) +} +func (m *BlkIOStat) XXX_Size() int { + return m.Size() +} +func (m *BlkIOStat) XXX_DiscardUnknown() { + xxx_messageInfo_BlkIOStat.DiscardUnknown(m) +} + +var xxx_messageInfo_BlkIOStat proto.InternalMessageInfo + +type BlkIOEntry struct { + Op string `protobuf:"bytes,1,opt,name=op,proto3" json:"op,omitempty"` + Device string `protobuf:"bytes,2,opt,name=device,proto3" json:"device,omitempty"` + Major uint64 `protobuf:"varint,3,opt,name=major,proto3" json:"major,omitempty"` + Minor uint64 `protobuf:"varint,4,opt,name=minor,proto3" json:"minor,omitempty"` + Value uint64 `protobuf:"varint,5,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BlkIOEntry) Reset() { *m = BlkIOEntry{} } +func (*BlkIOEntry) ProtoMessage() {} +func (*BlkIOEntry) Descriptor() ([]byte, []int) { + return fileDescriptor_a17b2d87c332bfaa, []int{10} +} +func (m *BlkIOEntry) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BlkIOEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BlkIOEntry.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BlkIOEntry) XXX_Merge(src proto.Message) { + xxx_messageInfo_BlkIOEntry.Merge(m, src) +} +func (m *BlkIOEntry) XXX_Size() int { + return m.Size() +} +func (m *BlkIOEntry) XXX_DiscardUnknown() { + xxx_messageInfo_BlkIOEntry.DiscardUnknown(m) +} + +var xxx_messageInfo_BlkIOEntry proto.InternalMessageInfo + +type RdmaStat struct { + Current []*RdmaEntry `protobuf:"bytes,1,rep,name=current,proto3" json:"current,omitempty"` + Limit []*RdmaEntry `protobuf:"bytes,2,rep,name=limit,proto3" json:"limit,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RdmaStat) Reset() { *m = RdmaStat{} } +func (*RdmaStat) ProtoMessage() {} +func (*RdmaStat) Descriptor() ([]byte, []int) { + return fileDescriptor_a17b2d87c332bfaa, []int{11} +} +func (m *RdmaStat) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RdmaStat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RdmaStat.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RdmaStat) XXX_Merge(src proto.Message) { + xxx_messageInfo_RdmaStat.Merge(m, src) +} +func (m *RdmaStat) XXX_Size() int { + return m.Size() +} +func (m *RdmaStat) XXX_DiscardUnknown() { + xxx_messageInfo_RdmaStat.DiscardUnknown(m) +} + +var xxx_messageInfo_RdmaStat proto.InternalMessageInfo + +type RdmaEntry struct { + Device string `protobuf:"bytes,1,opt,name=device,proto3" json:"device,omitempty"` + HcaHandles uint32 `protobuf:"varint,2,opt,name=hca_handles,json=hcaHandles,proto3" json:"hca_handles,omitempty"` + HcaObjects uint32 `protobuf:"varint,3,opt,name=hca_objects,json=hcaObjects,proto3" json:"hca_objects,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RdmaEntry) Reset() { *m = RdmaEntry{} } +func (*RdmaEntry) ProtoMessage() {} +func (*RdmaEntry) Descriptor() ([]byte, []int) { + return fileDescriptor_a17b2d87c332bfaa, []int{12} +} +func (m *RdmaEntry) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RdmaEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RdmaEntry.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RdmaEntry) XXX_Merge(src proto.Message) { + xxx_messageInfo_RdmaEntry.Merge(m, src) +} +func (m *RdmaEntry) XXX_Size() int { + return m.Size() +} +func (m *RdmaEntry) XXX_DiscardUnknown() { + xxx_messageInfo_RdmaEntry.DiscardUnknown(m) +} + +var xxx_messageInfo_RdmaEntry proto.InternalMessageInfo + +type NetworkStat struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + RxBytes uint64 `protobuf:"varint,2,opt,name=rx_bytes,json=rxBytes,proto3" json:"rx_bytes,omitempty"` + RxPackets uint64 `protobuf:"varint,3,opt,name=rx_packets,json=rxPackets,proto3" json:"rx_packets,omitempty"` + RxErrors uint64 `protobuf:"varint,4,opt,name=rx_errors,json=rxErrors,proto3" json:"rx_errors,omitempty"` + RxDropped uint64 `protobuf:"varint,5,opt,name=rx_dropped,json=rxDropped,proto3" json:"rx_dropped,omitempty"` + TxBytes uint64 `protobuf:"varint,6,opt,name=tx_bytes,json=txBytes,proto3" json:"tx_bytes,omitempty"` + TxPackets uint64 `protobuf:"varint,7,opt,name=tx_packets,json=txPackets,proto3" json:"tx_packets,omitempty"` + TxErrors uint64 `protobuf:"varint,8,opt,name=tx_errors,json=txErrors,proto3" json:"tx_errors,omitempty"` + TxDropped uint64 `protobuf:"varint,9,opt,name=tx_dropped,json=txDropped,proto3" json:"tx_dropped,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NetworkStat) Reset() { *m = NetworkStat{} } +func (*NetworkStat) ProtoMessage() {} +func (*NetworkStat) Descriptor() ([]byte, []int) { + return fileDescriptor_a17b2d87c332bfaa, []int{13} +} +func (m *NetworkStat) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkStat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_NetworkStat.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *NetworkStat) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkStat.Merge(m, src) +} +func (m *NetworkStat) XXX_Size() int { + return m.Size() +} +func (m *NetworkStat) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkStat.DiscardUnknown(m) +} + +var xxx_messageInfo_NetworkStat proto.InternalMessageInfo + +// CgroupStats exports per-cgroup statistics. +type CgroupStats struct { + // number of tasks sleeping + NrSleeping uint64 `protobuf:"varint,1,opt,name=nr_sleeping,json=nrSleeping,proto3" json:"nr_sleeping,omitempty"` + // number of tasks running + NrRunning uint64 `protobuf:"varint,2,opt,name=nr_running,json=nrRunning,proto3" json:"nr_running,omitempty"` + // number of tasks in stopped state + NrStopped uint64 `protobuf:"varint,3,opt,name=nr_stopped,json=nrStopped,proto3" json:"nr_stopped,omitempty"` + // number of tasks in uninterruptible state + NrUninterruptible uint64 `protobuf:"varint,4,opt,name=nr_uninterruptible,json=nrUninterruptible,proto3" json:"nr_uninterruptible,omitempty"` + // number of tasks waiting on IO + NrIoWait uint64 `protobuf:"varint,5,opt,name=nr_io_wait,json=nrIoWait,proto3" json:"nr_io_wait,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CgroupStats) Reset() { *m = CgroupStats{} } +func (*CgroupStats) ProtoMessage() {} +func (*CgroupStats) Descriptor() ([]byte, []int) { + return fileDescriptor_a17b2d87c332bfaa, []int{14} +} +func (m *CgroupStats) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CgroupStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CgroupStats.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CgroupStats) XXX_Merge(src proto.Message) { + xxx_messageInfo_CgroupStats.Merge(m, src) +} +func (m *CgroupStats) XXX_Size() int { + return m.Size() +} +func (m *CgroupStats) XXX_DiscardUnknown() { + xxx_messageInfo_CgroupStats.DiscardUnknown(m) +} + +var xxx_messageInfo_CgroupStats proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Metrics)(nil), "io.containerd.cgroups.v1.Metrics") + proto.RegisterType((*HugetlbStat)(nil), "io.containerd.cgroups.v1.HugetlbStat") + proto.RegisterType((*PidsStat)(nil), "io.containerd.cgroups.v1.PidsStat") + proto.RegisterType((*CPUStat)(nil), "io.containerd.cgroups.v1.CPUStat") + proto.RegisterType((*CPUUsage)(nil), "io.containerd.cgroups.v1.CPUUsage") + proto.RegisterType((*Throttle)(nil), "io.containerd.cgroups.v1.Throttle") + proto.RegisterType((*MemoryStat)(nil), "io.containerd.cgroups.v1.MemoryStat") + proto.RegisterType((*MemoryEntry)(nil), "io.containerd.cgroups.v1.MemoryEntry") + proto.RegisterType((*MemoryOomControl)(nil), "io.containerd.cgroups.v1.MemoryOomControl") + proto.RegisterType((*BlkIOStat)(nil), "io.containerd.cgroups.v1.BlkIOStat") + proto.RegisterType((*BlkIOEntry)(nil), "io.containerd.cgroups.v1.BlkIOEntry") + proto.RegisterType((*RdmaStat)(nil), "io.containerd.cgroups.v1.RdmaStat") + proto.RegisterType((*RdmaEntry)(nil), "io.containerd.cgroups.v1.RdmaEntry") + proto.RegisterType((*NetworkStat)(nil), "io.containerd.cgroups.v1.NetworkStat") + proto.RegisterType((*CgroupStats)(nil), "io.containerd.cgroups.v1.CgroupStats") +} + +func init() { + proto.RegisterFile("github.com/containerd/cgroups/stats/v1/metrics.proto", fileDescriptor_a17b2d87c332bfaa) +} + +var fileDescriptor_a17b2d87c332bfaa = []byte{ + // 1749 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x58, 0xcd, 0x72, 0xe3, 0xc6, + 0x11, 0x36, 0x45, 0x48, 0x24, 0x9a, 0x92, 0x56, 0x9a, 0xfd, 0x83, 0xe4, 0xb5, 0x28, 0x53, 0xbb, + 0x89, 0xe2, 0xad, 0x48, 0x65, 0x27, 0xb5, 0x95, 0x75, 0xec, 0x4a, 0x59, 0x5a, 0xbb, 0x76, 0xcb, + 0x51, 0x44, 0x83, 0x52, 0xd9, 0x39, 0xa1, 0x40, 0x70, 0x16, 0x9c, 0x15, 0x80, 0x81, 0x07, 0x03, + 0x89, 0xca, 0x29, 0x87, 0x54, 0xe5, 0x94, 0x07, 0xca, 0x1b, 0xf8, 0x98, 0x4b, 0x52, 0xc9, 0x45, + 0x15, 0xf3, 0x49, 0x52, 0x33, 0x3d, 0xf8, 0xa1, 0xbc, 0x5a, 0x85, 0x37, 0x76, 0xcf, 0xd7, 0x5f, + 0xf7, 0x34, 0xbe, 0x19, 0x34, 0x08, 0xbf, 0x0e, 0x99, 0x1c, 0xe7, 0xc3, 0xbd, 0x80, 0xc7, 0xfb, + 0x01, 0x4f, 0xa4, 0xcf, 0x12, 0x2a, 0x46, 0xfb, 0x41, 0x28, 0x78, 0x9e, 0x66, 0xfb, 0x99, 0xf4, + 0x65, 0xb6, 0x7f, 0xfe, 0xf1, 0x7e, 0x4c, 0xa5, 0x60, 0x41, 0xb6, 0x97, 0x0a, 0x2e, 0x39, 0x71, + 0x18, 0xdf, 0xab, 0xd0, 0x7b, 0x06, 0xbd, 0x77, 0xfe, 0xf1, 0xe6, 0xbd, 0x90, 0x87, 0x5c, 0x83, + 0xf6, 0xd5, 0x2f, 0xc4, 0xf7, 0xfe, 0x65, 0x41, 0xeb, 0x08, 0x19, 0xc8, 0xef, 0xa0, 0x35, 0xce, + 0x43, 0x2a, 0xa3, 0xa1, 0xd3, 0xd8, 0x6e, 0xee, 0x76, 0x3e, 0x79, 0xb2, 0x77, 0x13, 0xdb, 0xde, + 0x4b, 0x04, 0x0e, 0xa4, 0x2f, 0xdd, 0x22, 0x8a, 0x3c, 0x03, 0x2b, 0x65, 0xa3, 0xcc, 0x59, 0xd8, + 0x6e, 0xec, 0x76, 0x3e, 0xe9, 0xdd, 0x1c, 0xdd, 0x67, 0xa3, 0x4c, 0x87, 0x6a, 0x3c, 0xf9, 0x0c, + 0x9a, 0x41, 0x9a, 0x3b, 0x4d, 0x1d, 0xf6, 0xe1, 0xcd, 0x61, 0x87, 0xfd, 0x53, 0x15, 0x75, 0xd0, + 0x9a, 0x5e, 0x75, 0x9b, 0x87, 0xfd, 0x53, 0x57, 0x85, 0x91, 0xcf, 0x60, 0x29, 0xa6, 0x31, 0x17, + 0x97, 0x8e, 0xa5, 0x09, 0x1e, 0xdf, 0x4c, 0x70, 0xa4, 0x71, 0x3a, 0xb3, 0x89, 0x21, 0xcf, 0x61, + 0x71, 0x18, 0x9d, 0x31, 0xee, 0x2c, 0xea, 0xe0, 0x9d, 0x9b, 0x83, 0x0f, 0xa2, 0xb3, 0x57, 0xc7, + 0x3a, 0x16, 0x23, 0xd4, 0x76, 0xc5, 0x28, 0xf6, 0x9d, 0xa5, 0xdb, 0xb6, 0xeb, 0x8e, 0x62, 0x1f, + 0xb7, 0xab, 0xf0, 0xaa, 0xcf, 0x09, 0x95, 0x17, 0x5c, 0x9c, 0x39, 0xad, 0xdb, 0xfa, 0xfc, 0x07, + 0x04, 0x62, 0x9f, 0x4d, 0x14, 0x79, 0x09, 0xcb, 0x08, 0xf1, 0xb4, 0x0a, 0x9c, 0xb6, 0x2e, 0xe0, + 0x1d, 0x2c, 0x87, 0xfa, 0xa7, 0x22, 0xc9, 0xdc, 0x4e, 0x50, 0x19, 0xe4, 0x3b, 0x20, 0xd8, 0x07, + 0x8f, 0xf3, 0xd8, 0x53, 0xc1, 0x82, 0x47, 0x8e, 0xad, 0xf9, 0x3e, 0xba, 0xad, 0x8f, 0xc7, 0x3c, + 0x3e, 0xc4, 0x08, 0x77, 0x2d, 0xbe, 0xe6, 0xe9, 0x9d, 0x41, 0xa7, 0xa6, 0x11, 0x72, 0x0f, 0x16, + 0xf3, 0xcc, 0x0f, 0xa9, 0xd3, 0xd8, 0x6e, 0xec, 0x5a, 0x2e, 0x1a, 0x64, 0x0d, 0x9a, 0xb1, 0x3f, + 0xd1, 0x7a, 0xb1, 0x5c, 0xf5, 0x93, 0x38, 0xd0, 0x7a, 0xed, 0xb3, 0x28, 0x48, 0xa4, 0x96, 0x83, + 0xe5, 0x16, 0x26, 0xd9, 0x84, 0x76, 0xea, 0x87, 0x34, 0x63, 0x7f, 0xa2, 0xfa, 0x41, 0xdb, 0x6e, + 0x69, 0xf7, 0x3e, 0x85, 0x76, 0x21, 0x29, 0xc5, 0x10, 0xe4, 0x42, 0xd0, 0x44, 0x9a, 0x5c, 0x85, + 0xa9, 0x6a, 0x88, 0x58, 0xcc, 0xa4, 0xc9, 0x87, 0x46, 0xef, 0xaf, 0x0d, 0x68, 0x19, 0x61, 0x91, + 0xdf, 0xd4, 0xab, 0x7c, 0xe7, 0x23, 0x3d, 0xec, 0x9f, 0x9e, 0x2a, 0x64, 0xb1, 0x93, 0x03, 0x00, + 0x39, 0x16, 0x5c, 0xca, 0x88, 0x25, 0xe1, 0xed, 0x07, 0xe0, 0x04, 0xb1, 0xd4, 0xad, 0x45, 0xf5, + 0xbe, 0x87, 0x76, 0x41, 0xab, 0x6a, 0x95, 0x5c, 0xfa, 0x51, 0xd1, 0x2f, 0x6d, 0x90, 0x07, 0xb0, + 0x74, 0x46, 0x45, 0x42, 0x23, 0xb3, 0x05, 0x63, 0x11, 0x02, 0x56, 0x9e, 0x51, 0x61, 0x5a, 0xa6, + 0x7f, 0x93, 0x1d, 0x68, 0xa5, 0x54, 0x78, 0xea, 0x60, 0x59, 0xdb, 0xcd, 0x5d, 0xeb, 0x00, 0xa6, + 0x57, 0xdd, 0xa5, 0x3e, 0x15, 0xea, 0xe0, 0x2c, 0xa5, 0x54, 0x1c, 0xa6, 0x79, 0x6f, 0x02, 0xed, + 0xa2, 0x14, 0xd5, 0xb8, 0x94, 0x0a, 0xc6, 0x47, 0x59, 0xd1, 0x38, 0x63, 0x92, 0xa7, 0xb0, 0x6e, + 0xca, 0xa4, 0x23, 0xaf, 0xc0, 0x60, 0x05, 0x6b, 0xe5, 0x42, 0xdf, 0x80, 0x9f, 0xc0, 0x6a, 0x05, + 0x96, 0x2c, 0xa6, 0xa6, 0xaa, 0x95, 0xd2, 0x7b, 0xc2, 0x62, 0xda, 0xfb, 0x4f, 0x07, 0xa0, 0x3a, + 0x8e, 0x6a, 0xbf, 0x81, 0x1f, 0x8c, 0x4b, 0x7d, 0x68, 0x83, 0x6c, 0x40, 0x53, 0x64, 0x26, 0x15, + 0x9e, 0x7a, 0x77, 0x30, 0x70, 0x95, 0x8f, 0xfc, 0x0c, 0xda, 0x22, 0xcb, 0x3c, 0x75, 0xf5, 0x60, + 0x82, 0x83, 0xce, 0xf4, 0xaa, 0xdb, 0x72, 0x07, 0x03, 0x25, 0x3b, 0xb7, 0x25, 0xb2, 0x4c, 0xfd, + 0x20, 0x5d, 0xe8, 0xc4, 0x7e, 0x9a, 0xd2, 0x91, 0xf7, 0x9a, 0x45, 0xa8, 0x1c, 0xcb, 0x05, 0x74, + 0x7d, 0xc5, 0x22, 0xdd, 0xe9, 0x11, 0x13, 0xf2, 0x52, 0x5f, 0x00, 0x96, 0x8b, 0x06, 0x79, 0x04, + 0xf6, 0x85, 0x60, 0x92, 0x0e, 0xfd, 0xe0, 0x4c, 0x1f, 0x70, 0xcb, 0xad, 0x1c, 0xc4, 0x81, 0x76, + 0x1a, 0x7a, 0x69, 0xe8, 0xb1, 0xc4, 0x69, 0xe1, 0x93, 0x48, 0xc3, 0x7e, 0xf8, 0x2a, 0x21, 0x9b, + 0x60, 0xe3, 0x0a, 0xcf, 0xa5, 0x3e, 0x97, 0xaa, 0x8d, 0x61, 0x3f, 0x3c, 0xce, 0x25, 0xd9, 0xd0, + 0x51, 0xaf, 0xfd, 0x3c, 0x92, 0xfa, 0x88, 0xe9, 0xa5, 0xaf, 0x94, 0x49, 0xb6, 0x61, 0x39, 0x0d, + 0xbd, 0xd8, 0x7f, 0x63, 0x96, 0x01, 0xcb, 0x4c, 0xc3, 0x23, 0xff, 0x0d, 0x22, 0x76, 0x60, 0x85, + 0x25, 0x7e, 0x20, 0xd9, 0x39, 0xf5, 0xfc, 0x84, 0x27, 0x4e, 0x47, 0x43, 0x96, 0x0b, 0xe7, 0x17, + 0x09, 0x4f, 0xd4, 0x66, 0xeb, 0x90, 0x65, 0x64, 0xa9, 0x01, 0xea, 0x2c, 0xba, 0x1f, 0x2b, 0xb3, + 0x2c, 0xba, 0x23, 0x15, 0x8b, 0x86, 0xac, 0xd6, 0x59, 0x34, 0x60, 0x1b, 0x3a, 0x79, 0x42, 0xcf, + 0x59, 0x20, 0xfd, 0x61, 0x44, 0x9d, 0x3b, 0x1a, 0x50, 0x77, 0x91, 0x4f, 0x61, 0x63, 0xcc, 0xa8, + 0xf0, 0x45, 0x30, 0x66, 0x81, 0x1f, 0x79, 0xe6, 0x92, 0xc1, 0xe3, 0xb7, 0xa6, 0xf1, 0x0f, 0xeb, + 0x00, 0x54, 0xc2, 0xef, 0xd5, 0x32, 0x79, 0x06, 0x33, 0x4b, 0x5e, 0x76, 0xe1, 0xa7, 0x26, 0x72, + 0x5d, 0x47, 0xde, 0xaf, 0x2f, 0x0f, 0x2e, 0xfc, 0x14, 0xe3, 0xba, 0xd0, 0xd1, 0xa7, 0xc4, 0x43, + 0x21, 0x11, 0x2c, 0x5b, 0xbb, 0x0e, 0xb5, 0x9a, 0x7e, 0x01, 0x36, 0x02, 0x94, 0xa6, 0xee, 0x6a, + 0xcd, 0x2c, 0x4f, 0xaf, 0xba, 0xed, 0x13, 0xe5, 0x54, 0xc2, 0x6a, 0xeb, 0x65, 0x37, 0xcb, 0xc8, + 0x33, 0x58, 0x2d, 0xa1, 0xa8, 0xb1, 0x7b, 0x1a, 0xbf, 0x36, 0xbd, 0xea, 0x2e, 0x17, 0x78, 0x2d, + 0xb4, 0xe5, 0x22, 0x46, 0xab, 0xed, 0x23, 0x58, 0xc7, 0xb8, 0xba, 0xe6, 0xee, 0xeb, 0x4a, 0xee, + 0xe8, 0x85, 0xa3, 0x4a, 0x78, 0x65, 0xbd, 0x28, 0xbf, 0x07, 0xb5, 0x7a, 0x5f, 0x68, 0x0d, 0xfe, + 0x1c, 0x30, 0xc6, 0xab, 0x94, 0xf8, 0x50, 0x83, 0xb0, 0xb6, 0x6f, 0x4b, 0x39, 0xee, 0x14, 0xd5, + 0x96, 0xa2, 0x74, 0xf0, 0x91, 0x68, 0x6f, 0x1f, 0x95, 0xf9, 0xa4, 0x60, 0xab, 0xf4, 0xb9, 0x81, + 0x0f, 0xbf, 0x44, 0x29, 0x91, 0x3e, 0xae, 0x71, 0xa1, 0x16, 0x37, 0x67, 0x50, 0xa8, 0xc6, 0xa7, + 0x40, 0x4a, 0x54, 0xa5, 0xda, 0xf7, 0x6b, 0x1b, 0xed, 0x57, 0xd2, 0xdd, 0x83, 0xbb, 0x08, 0x9e, + 0x15, 0xf0, 0x23, 0x8d, 0xc6, 0x7e, 0xbd, 0xaa, 0xab, 0xb8, 0x6c, 0x62, 0x1d, 0xfd, 0x41, 0x8d, + 0xfb, 0x8b, 0x0a, 0xfb, 0x53, 0x6e, 0xdd, 0xf2, 0xad, 0xb7, 0x70, 0xeb, 0xa6, 0x5f, 0xe7, 0xd6, + 0xe8, 0xee, 0x4f, 0xb8, 0x35, 0xf6, 0x69, 0x81, 0xad, 0x8b, 0x7d, 0xdb, 0x5c, 0x7b, 0x6a, 0xe1, + 0xb4, 0xa6, 0xf8, 0xdf, 0x16, 0xaf, 0x8e, 0x0f, 0x6f, 0x7b, 0x19, 0xa3, 0xd6, 0xbf, 0x4c, 0xa4, + 0xb8, 0x2c, 0xde, 0x1e, 0xcf, 0xc1, 0x52, 0x2a, 0x77, 0x7a, 0xf3, 0xc4, 0xea, 0x10, 0xf2, 0x79, + 0xf9, 0x4a, 0xd8, 0x99, 0x27, 0xb8, 0x78, 0x73, 0x0c, 0x00, 0xf0, 0x97, 0x27, 0x83, 0xd4, 0x79, + 0x3c, 0x07, 0xc5, 0xc1, 0xca, 0xf4, 0xaa, 0x6b, 0x7f, 0xad, 0x83, 0x4f, 0x0e, 0xfb, 0xae, 0x8d, + 0x3c, 0x27, 0x41, 0xda, 0xa3, 0xd0, 0xa9, 0x01, 0xab, 0xf7, 0x6e, 0xa3, 0xf6, 0xde, 0xad, 0x26, + 0x82, 0x85, 0xb7, 0x4c, 0x04, 0xcd, 0xb7, 0x4e, 0x04, 0xd6, 0xcc, 0x44, 0xd0, 0x93, 0xb0, 0x76, + 0x7d, 0x10, 0x21, 0xbb, 0xb0, 0xa6, 0x26, 0x99, 0x33, 0x16, 0xa9, 0x73, 0x95, 0xe9, 0x47, 0x86, + 0x69, 0x57, 0x39, 0x8f, 0xbf, 0x66, 0x51, 0xf4, 0x02, 0xbd, 0xe4, 0x7d, 0xb0, 0xf3, 0x64, 0x44, + 0x85, 0x9a, 0x7c, 0x4c, 0x0d, 0x6d, 0xed, 0x38, 0xe6, 0xb1, 0xba, 0xaa, 0x0b, 0x9a, 0x62, 0x0e, + 0x31, 0xe1, 0xbd, 0x7f, 0x2e, 0x82, 0x5d, 0x8e, 0x82, 0xc4, 0x87, 0x4d, 0xc6, 0xbd, 0x8c, 0x8a, + 0x73, 0x16, 0x50, 0x6f, 0x78, 0x29, 0x69, 0xe6, 0x09, 0x1a, 0xe4, 0x22, 0x63, 0xe7, 0xd4, 0x8c, + 0xd1, 0x8f, 0x6f, 0x99, 0x29, 0xf1, 0x89, 0x3c, 0x64, 0x7c, 0x80, 0x34, 0x07, 0x8a, 0xc5, 0x2d, + 0x48, 0xc8, 0x77, 0x70, 0xbf, 0x4a, 0x31, 0xaa, 0xb1, 0x2f, 0xcc, 0xc1, 0x7e, 0xb7, 0x64, 0x1f, + 0x55, 0xcc, 0x27, 0x70, 0x97, 0x71, 0xef, 0xfb, 0x9c, 0xe6, 0x33, 0xbc, 0xcd, 0x39, 0x78, 0xd7, + 0x19, 0xff, 0x46, 0xc7, 0x57, 0xac, 0x1e, 0x6c, 0xd4, 0x5a, 0xa2, 0x26, 0x80, 0x1a, 0xb7, 0x35, + 0x07, 0xf7, 0x83, 0xb2, 0x66, 0x35, 0x31, 0x54, 0x09, 0xfe, 0x08, 0x0f, 0x18, 0xf7, 0x2e, 0x7c, + 0x26, 0xaf, 0xb3, 0x2f, 0xce, 0xd7, 0x91, 0x6f, 0x7d, 0x26, 0x67, 0xa9, 0xb1, 0x23, 0x31, 0x15, + 0xe1, 0x4c, 0x47, 0x96, 0xe6, 0xeb, 0xc8, 0x91, 0x8e, 0xaf, 0x58, 0xfb, 0xb0, 0xce, 0xf8, 0xf5, + 0x5a, 0x5b, 0x73, 0x70, 0xde, 0x61, 0x7c, 0xb6, 0xce, 0x6f, 0x60, 0x3d, 0xa3, 0x81, 0xe4, 0xa2, + 0xae, 0xb6, 0xf6, 0x1c, 0x8c, 0x6b, 0x26, 0xbc, 0xa4, 0xec, 0x9d, 0x03, 0x54, 0xeb, 0x64, 0x15, + 0x16, 0x78, 0xaa, 0x4f, 0x8e, 0xed, 0x2e, 0xf0, 0x54, 0x4d, 0x9e, 0x23, 0x75, 0xd9, 0xe1, 0x71, + 0xb5, 0x5d, 0x63, 0xa9, 0x53, 0x1c, 0xfb, 0x6f, 0x78, 0x31, 0x7a, 0xa2, 0xa1, 0xbd, 0x2c, 0xe1, + 0xc2, 0x9c, 0x58, 0x34, 0x94, 0xf7, 0xdc, 0x8f, 0x72, 0x5a, 0x4c, 0x5a, 0xda, 0xe8, 0xfd, 0xa5, + 0x01, 0xed, 0xe2, 0x03, 0x89, 0x7c, 0x5e, 0x1f, 0xde, 0x9b, 0xef, 0xfe, 0x1e, 0x53, 0x41, 0xb8, + 0x99, 0x72, 0xc2, 0x7f, 0x5e, 0x4d, 0xf8, 0xff, 0x77, 0xb0, 0xf9, 0x0c, 0xa0, 0x60, 0x97, 0xbe, + 0xda, 0x6e, 0x1b, 0x33, 0xbb, 0xed, 0x42, 0x67, 0x1c, 0xf8, 0xde, 0xd8, 0x4f, 0x46, 0x11, 0xc5, + 0xb9, 0x74, 0xc5, 0x85, 0x71, 0xe0, 0xbf, 0x44, 0x4f, 0x01, 0xe0, 0xc3, 0x37, 0x34, 0x90, 0x99, + 0x6e, 0x0a, 0x02, 0x8e, 0xd1, 0xd3, 0xfb, 0xdb, 0x02, 0x74, 0x6a, 0xdf, 0x74, 0x6a, 0x72, 0x4f, + 0xfc, 0xb8, 0xc8, 0xa3, 0x7f, 0xab, 0xcb, 0x47, 0x4c, 0xf0, 0x2e, 0x31, 0x17, 0x53, 0x4b, 0x4c, + 0xf4, 0xa5, 0x40, 0x3e, 0x00, 0x10, 0x13, 0x2f, 0xf5, 0x83, 0x33, 0x6a, 0xe8, 0x2d, 0xd7, 0x16, + 0x93, 0x3e, 0x3a, 0xd4, 0x9d, 0x26, 0x26, 0x1e, 0x15, 0x82, 0x8b, 0xcc, 0xf4, 0xbe, 0x2d, 0x26, + 0x5f, 0x6a, 0xdb, 0xc4, 0x8e, 0x04, 0x57, 0x13, 0x88, 0x79, 0x06, 0xb6, 0x98, 0xbc, 0x40, 0x87, + 0xca, 0x2a, 0x8b, 0xac, 0x38, 0xf0, 0xb6, 0x64, 0x95, 0x55, 0x56, 0x59, 0x71, 0xe0, 0xb5, 0x65, + 0x3d, 0xab, 0x2c, 0xb3, 0xe2, 0xcc, 0xdb, 0x96, 0xb5, 0xac, 0xb2, 0xca, 0x6a, 0x17, 0xb1, 0x26, + 0x6b, 0xef, 0xef, 0x0d, 0xe8, 0xd4, 0xbe, 0x4e, 0x55, 0x03, 0x13, 0xe1, 0x65, 0x11, 0xa5, 0xa9, + 0xfa, 0x90, 0xc2, 0xab, 0x1b, 0x12, 0x31, 0x30, 0x1e, 0xc5, 0x97, 0x08, 0x4f, 0xe4, 0x49, 0x52, + 0x7c, 0x68, 0x59, 0xae, 0x9d, 0x08, 0x17, 0x1d, 0x66, 0x39, 0x93, 0x98, 0xae, 0x59, 0x2c, 0x0f, + 0xd0, 0x41, 0x7e, 0x09, 0x24, 0x11, 0x5e, 0x9e, 0xb0, 0x44, 0x52, 0x21, 0xf2, 0x54, 0xb2, 0x61, + 0xf9, 0x51, 0xb0, 0x9e, 0x88, 0xd3, 0xd9, 0x05, 0xf2, 0x48, 0xb3, 0x99, 0xcb, 0xc6, 0xb4, 0xac, + 0x9d, 0x88, 0x57, 0xfa, 0xe6, 0x38, 0x70, 0x7e, 0xf8, 0x71, 0xeb, 0xbd, 0x7f, 0xff, 0xb8, 0xf5, + 0xde, 0x9f, 0xa7, 0x5b, 0x8d, 0x1f, 0xa6, 0x5b, 0x8d, 0x7f, 0x4c, 0xb7, 0x1a, 0xff, 0x9d, 0x6e, + 0x35, 0x86, 0x4b, 0xfa, 0xcf, 0x95, 0x5f, 0xfd, 0x2f, 0x00, 0x00, 0xff, 0xff, 0xc4, 0x4e, 0x24, + 0x22, 0xc4, 0x11, 0x00, 0x00, +} + +func (m *Metrics) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Metrics) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Metrics) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.MemoryOomControl != nil { + { + size, err := m.MemoryOomControl.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + if m.CgroupStats != nil { + { + size, err := m.CgroupStats.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + if len(m.Network) > 0 { + for iNdEx := len(m.Network) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Network[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + } + if m.Rdma != nil { + { + size, err := m.Rdma.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if m.Blkio != nil { + { + size, err := m.Blkio.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.Memory != nil { + { + size, err := m.Memory.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.CPU != nil { + { + size, err := m.CPU.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Pids != nil { + { + size, err := m.Pids.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Hugetlb) > 0 { + for iNdEx := len(m.Hugetlb) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Hugetlb[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *HugetlbStat) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HugetlbStat) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HugetlbStat) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Pagesize) > 0 { + i -= len(m.Pagesize) + copy(dAtA[i:], m.Pagesize) + i = encodeVarintMetrics(dAtA, i, uint64(len(m.Pagesize))) + i-- + dAtA[i] = 0x22 + } + if m.Failcnt != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Failcnt)) + i-- + dAtA[i] = 0x18 + } + if m.Max != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Max)) + i-- + dAtA[i] = 0x10 + } + if m.Usage != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Usage)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *PidsStat) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PidsStat) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PidsStat) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Limit != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Limit)) + i-- + dAtA[i] = 0x10 + } + if m.Current != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Current)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *CPUStat) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CPUStat) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CPUStat) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Throttling != nil { + { + size, err := m.Throttling.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Usage != nil { + { + size, err := m.Usage.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CPUUsage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CPUUsage) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CPUUsage) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.PerCPU) > 0 { + dAtA11 := make([]byte, len(m.PerCPU)*10) + var j10 int + for _, num := range m.PerCPU { + for num >= 1<<7 { + dAtA11[j10] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j10++ + } + dAtA11[j10] = uint8(num) + j10++ + } + i -= j10 + copy(dAtA[i:], dAtA11[:j10]) + i = encodeVarintMetrics(dAtA, i, uint64(j10)) + i-- + dAtA[i] = 0x22 + } + if m.User != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.User)) + i-- + dAtA[i] = 0x18 + } + if m.Kernel != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Kernel)) + i-- + dAtA[i] = 0x10 + } + if m.Total != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Total)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Throttle) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Throttle) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Throttle) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.ThrottledTime != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.ThrottledTime)) + i-- + dAtA[i] = 0x18 + } + if m.ThrottledPeriods != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.ThrottledPeriods)) + i-- + dAtA[i] = 0x10 + } + if m.Periods != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Periods)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *MemoryStat) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MemoryStat) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MemoryStat) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.KernelTCP != nil { + { + size, err := m.KernelTCP.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xa2 + } + if m.Kernel != nil { + { + size, err := m.Kernel.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x9a + } + if m.Swap != nil { + { + size, err := m.Swap.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x92 + } + if m.Usage != nil { + { + size, err := m.Usage.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x8a + } + if m.TotalUnevictable != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalUnevictable)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x80 + } + if m.TotalActiveFile != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalActiveFile)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xf8 + } + if m.TotalInactiveFile != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalInactiveFile)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xf0 + } + if m.TotalActiveAnon != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalActiveAnon)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xe8 + } + if m.TotalInactiveAnon != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalInactiveAnon)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xe0 + } + if m.TotalPgMajFault != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalPgMajFault)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd8 + } + if m.TotalPgFault != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalPgFault)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd0 + } + if m.TotalPgPgOut != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalPgPgOut)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc8 + } + if m.TotalPgPgIn != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalPgPgIn)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc0 + } + if m.TotalWriteback != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalWriteback)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb8 + } + if m.TotalDirty != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalDirty)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb0 + } + if m.TotalMappedFile != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalMappedFile)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa8 + } + if m.TotalRSSHuge != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalRSSHuge)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa0 + } + if m.TotalRSS != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalRSS)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x98 + } + if m.TotalCache != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalCache)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x90 + } + if m.HierarchicalSwapLimit != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.HierarchicalSwapLimit)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x88 + } + if m.HierarchicalMemoryLimit != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.HierarchicalMemoryLimit)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x80 + } + if m.Unevictable != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Unevictable)) + i-- + dAtA[i] = 0x78 + } + if m.ActiveFile != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.ActiveFile)) + i-- + dAtA[i] = 0x70 + } + if m.InactiveFile != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.InactiveFile)) + i-- + dAtA[i] = 0x68 + } + if m.ActiveAnon != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.ActiveAnon)) + i-- + dAtA[i] = 0x60 + } + if m.InactiveAnon != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.InactiveAnon)) + i-- + dAtA[i] = 0x58 + } + if m.PgMajFault != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.PgMajFault)) + i-- + dAtA[i] = 0x50 + } + if m.PgFault != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.PgFault)) + i-- + dAtA[i] = 0x48 + } + if m.PgPgOut != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.PgPgOut)) + i-- + dAtA[i] = 0x40 + } + if m.PgPgIn != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.PgPgIn)) + i-- + dAtA[i] = 0x38 + } + if m.Writeback != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Writeback)) + i-- + dAtA[i] = 0x30 + } + if m.Dirty != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Dirty)) + i-- + dAtA[i] = 0x28 + } + if m.MappedFile != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.MappedFile)) + i-- + dAtA[i] = 0x20 + } + if m.RSSHuge != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.RSSHuge)) + i-- + dAtA[i] = 0x18 + } + if m.RSS != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.RSS)) + i-- + dAtA[i] = 0x10 + } + if m.Cache != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Cache)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *MemoryEntry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MemoryEntry) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MemoryEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Failcnt != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Failcnt)) + i-- + dAtA[i] = 0x20 + } + if m.Max != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Max)) + i-- + dAtA[i] = 0x18 + } + if m.Usage != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Usage)) + i-- + dAtA[i] = 0x10 + } + if m.Limit != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Limit)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *MemoryOomControl) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MemoryOomControl) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MemoryOomControl) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.OomKill != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.OomKill)) + i-- + dAtA[i] = 0x18 + } + if m.UnderOom != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.UnderOom)) + i-- + dAtA[i] = 0x10 + } + if m.OomKillDisable != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.OomKillDisable)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *BlkIOStat) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BlkIOStat) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BlkIOStat) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.SectorsRecursive) > 0 { + for iNdEx := len(m.SectorsRecursive) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.SectorsRecursive[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + } + if len(m.IoTimeRecursive) > 0 { + for iNdEx := len(m.IoTimeRecursive) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.IoTimeRecursive[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + } + if len(m.IoMergedRecursive) > 0 { + for iNdEx := len(m.IoMergedRecursive) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.IoMergedRecursive[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + if len(m.IoWaitTimeRecursive) > 0 { + for iNdEx := len(m.IoWaitTimeRecursive) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.IoWaitTimeRecursive[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + if len(m.IoServiceTimeRecursive) > 0 { + for iNdEx := len(m.IoServiceTimeRecursive) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.IoServiceTimeRecursive[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.IoQueuedRecursive) > 0 { + for iNdEx := len(m.IoQueuedRecursive) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.IoQueuedRecursive[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.IoServicedRecursive) > 0 { + for iNdEx := len(m.IoServicedRecursive) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.IoServicedRecursive[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.IoServiceBytesRecursive) > 0 { + for iNdEx := len(m.IoServiceBytesRecursive) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.IoServiceBytesRecursive[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *BlkIOEntry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BlkIOEntry) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BlkIOEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Value != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x28 + } + if m.Minor != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Minor)) + i-- + dAtA[i] = 0x20 + } + if m.Major != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Major)) + i-- + dAtA[i] = 0x18 + } + if len(m.Device) > 0 { + i -= len(m.Device) + copy(dAtA[i:], m.Device) + i = encodeVarintMetrics(dAtA, i, uint64(len(m.Device))) + i-- + dAtA[i] = 0x12 + } + if len(m.Op) > 0 { + i -= len(m.Op) + copy(dAtA[i:], m.Op) + i = encodeVarintMetrics(dAtA, i, uint64(len(m.Op))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RdmaStat) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RdmaStat) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RdmaStat) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Limit) > 0 { + for iNdEx := len(m.Limit) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Limit[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Current) > 0 { + for iNdEx := len(m.Current) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Current[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *RdmaEntry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RdmaEntry) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RdmaEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.HcaObjects != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.HcaObjects)) + i-- + dAtA[i] = 0x18 + } + if m.HcaHandles != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.HcaHandles)) + i-- + dAtA[i] = 0x10 + } + if len(m.Device) > 0 { + i -= len(m.Device) + copy(dAtA[i:], m.Device) + i = encodeVarintMetrics(dAtA, i, uint64(len(m.Device))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *NetworkStat) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NetworkStat) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkStat) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.TxDropped != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TxDropped)) + i-- + dAtA[i] = 0x48 + } + if m.TxErrors != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TxErrors)) + i-- + dAtA[i] = 0x40 + } + if m.TxPackets != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TxPackets)) + i-- + dAtA[i] = 0x38 + } + if m.TxBytes != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TxBytes)) + i-- + dAtA[i] = 0x30 + } + if m.RxDropped != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.RxDropped)) + i-- + dAtA[i] = 0x28 + } + if m.RxErrors != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.RxErrors)) + i-- + dAtA[i] = 0x20 + } + if m.RxPackets != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.RxPackets)) + i-- + dAtA[i] = 0x18 + } + if m.RxBytes != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.RxBytes)) + i-- + dAtA[i] = 0x10 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintMetrics(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CgroupStats) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CgroupStats) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CgroupStats) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.NrIoWait != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.NrIoWait)) + i-- + dAtA[i] = 0x28 + } + if m.NrUninterruptible != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.NrUninterruptible)) + i-- + dAtA[i] = 0x20 + } + if m.NrStopped != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.NrStopped)) + i-- + dAtA[i] = 0x18 + } + if m.NrRunning != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.NrRunning)) + i-- + dAtA[i] = 0x10 + } + if m.NrSleeping != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.NrSleeping)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintMetrics(dAtA []byte, offset int, v uint64) int { + offset -= sovMetrics(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Metrics) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Hugetlb) > 0 { + for _, e := range m.Hugetlb { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if m.Pids != nil { + l = m.Pids.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if m.CPU != nil { + l = m.CPU.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if m.Memory != nil { + l = m.Memory.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if m.Blkio != nil { + l = m.Blkio.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if m.Rdma != nil { + l = m.Rdma.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if len(m.Network) > 0 { + for _, e := range m.Network { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if m.CgroupStats != nil { + l = m.CgroupStats.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if m.MemoryOomControl != nil { + l = m.MemoryOomControl.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *HugetlbStat) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Usage != 0 { + n += 1 + sovMetrics(uint64(m.Usage)) + } + if m.Max != 0 { + n += 1 + sovMetrics(uint64(m.Max)) + } + if m.Failcnt != 0 { + n += 1 + sovMetrics(uint64(m.Failcnt)) + } + l = len(m.Pagesize) + if l > 0 { + n += 1 + l + sovMetrics(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *PidsStat) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Current != 0 { + n += 1 + sovMetrics(uint64(m.Current)) + } + if m.Limit != 0 { + n += 1 + sovMetrics(uint64(m.Limit)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *CPUStat) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Usage != nil { + l = m.Usage.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if m.Throttling != nil { + l = m.Throttling.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *CPUUsage) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Total != 0 { + n += 1 + sovMetrics(uint64(m.Total)) + } + if m.Kernel != 0 { + n += 1 + sovMetrics(uint64(m.Kernel)) + } + if m.User != 0 { + n += 1 + sovMetrics(uint64(m.User)) + } + if len(m.PerCPU) > 0 { + l = 0 + for _, e := range m.PerCPU { + l += sovMetrics(uint64(e)) + } + n += 1 + sovMetrics(uint64(l)) + l + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Throttle) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Periods != 0 { + n += 1 + sovMetrics(uint64(m.Periods)) + } + if m.ThrottledPeriods != 0 { + n += 1 + sovMetrics(uint64(m.ThrottledPeriods)) + } + if m.ThrottledTime != 0 { + n += 1 + sovMetrics(uint64(m.ThrottledTime)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *MemoryStat) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Cache != 0 { + n += 1 + sovMetrics(uint64(m.Cache)) + } + if m.RSS != 0 { + n += 1 + sovMetrics(uint64(m.RSS)) + } + if m.RSSHuge != 0 { + n += 1 + sovMetrics(uint64(m.RSSHuge)) + } + if m.MappedFile != 0 { + n += 1 + sovMetrics(uint64(m.MappedFile)) + } + if m.Dirty != 0 { + n += 1 + sovMetrics(uint64(m.Dirty)) + } + if m.Writeback != 0 { + n += 1 + sovMetrics(uint64(m.Writeback)) + } + if m.PgPgIn != 0 { + n += 1 + sovMetrics(uint64(m.PgPgIn)) + } + if m.PgPgOut != 0 { + n += 1 + sovMetrics(uint64(m.PgPgOut)) + } + if m.PgFault != 0 { + n += 1 + sovMetrics(uint64(m.PgFault)) + } + if m.PgMajFault != 0 { + n += 1 + sovMetrics(uint64(m.PgMajFault)) + } + if m.InactiveAnon != 0 { + n += 1 + sovMetrics(uint64(m.InactiveAnon)) + } + if m.ActiveAnon != 0 { + n += 1 + sovMetrics(uint64(m.ActiveAnon)) + } + if m.InactiveFile != 0 { + n += 1 + sovMetrics(uint64(m.InactiveFile)) + } + if m.ActiveFile != 0 { + n += 1 + sovMetrics(uint64(m.ActiveFile)) + } + if m.Unevictable != 0 { + n += 1 + sovMetrics(uint64(m.Unevictable)) + } + if m.HierarchicalMemoryLimit != 0 { + n += 2 + sovMetrics(uint64(m.HierarchicalMemoryLimit)) + } + if m.HierarchicalSwapLimit != 0 { + n += 2 + sovMetrics(uint64(m.HierarchicalSwapLimit)) + } + if m.TotalCache != 0 { + n += 2 + sovMetrics(uint64(m.TotalCache)) + } + if m.TotalRSS != 0 { + n += 2 + sovMetrics(uint64(m.TotalRSS)) + } + if m.TotalRSSHuge != 0 { + n += 2 + sovMetrics(uint64(m.TotalRSSHuge)) + } + if m.TotalMappedFile != 0 { + n += 2 + sovMetrics(uint64(m.TotalMappedFile)) + } + if m.TotalDirty != 0 { + n += 2 + sovMetrics(uint64(m.TotalDirty)) + } + if m.TotalWriteback != 0 { + n += 2 + sovMetrics(uint64(m.TotalWriteback)) + } + if m.TotalPgPgIn != 0 { + n += 2 + sovMetrics(uint64(m.TotalPgPgIn)) + } + if m.TotalPgPgOut != 0 { + n += 2 + sovMetrics(uint64(m.TotalPgPgOut)) + } + if m.TotalPgFault != 0 { + n += 2 + sovMetrics(uint64(m.TotalPgFault)) + } + if m.TotalPgMajFault != 0 { + n += 2 + sovMetrics(uint64(m.TotalPgMajFault)) + } + if m.TotalInactiveAnon != 0 { + n += 2 + sovMetrics(uint64(m.TotalInactiveAnon)) + } + if m.TotalActiveAnon != 0 { + n += 2 + sovMetrics(uint64(m.TotalActiveAnon)) + } + if m.TotalInactiveFile != 0 { + n += 2 + sovMetrics(uint64(m.TotalInactiveFile)) + } + if m.TotalActiveFile != 0 { + n += 2 + sovMetrics(uint64(m.TotalActiveFile)) + } + if m.TotalUnevictable != 0 { + n += 2 + sovMetrics(uint64(m.TotalUnevictable)) + } + if m.Usage != nil { + l = m.Usage.Size() + n += 2 + l + sovMetrics(uint64(l)) + } + if m.Swap != nil { + l = m.Swap.Size() + n += 2 + l + sovMetrics(uint64(l)) + } + if m.Kernel != nil { + l = m.Kernel.Size() + n += 2 + l + sovMetrics(uint64(l)) + } + if m.KernelTCP != nil { + l = m.KernelTCP.Size() + n += 2 + l + sovMetrics(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *MemoryEntry) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Limit != 0 { + n += 1 + sovMetrics(uint64(m.Limit)) + } + if m.Usage != 0 { + n += 1 + sovMetrics(uint64(m.Usage)) + } + if m.Max != 0 { + n += 1 + sovMetrics(uint64(m.Max)) + } + if m.Failcnt != 0 { + n += 1 + sovMetrics(uint64(m.Failcnt)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *MemoryOomControl) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.OomKillDisable != 0 { + n += 1 + sovMetrics(uint64(m.OomKillDisable)) + } + if m.UnderOom != 0 { + n += 1 + sovMetrics(uint64(m.UnderOom)) + } + if m.OomKill != 0 { + n += 1 + sovMetrics(uint64(m.OomKill)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *BlkIOStat) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.IoServiceBytesRecursive) > 0 { + for _, e := range m.IoServiceBytesRecursive { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if len(m.IoServicedRecursive) > 0 { + for _, e := range m.IoServicedRecursive { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if len(m.IoQueuedRecursive) > 0 { + for _, e := range m.IoQueuedRecursive { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if len(m.IoServiceTimeRecursive) > 0 { + for _, e := range m.IoServiceTimeRecursive { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if len(m.IoWaitTimeRecursive) > 0 { + for _, e := range m.IoWaitTimeRecursive { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if len(m.IoMergedRecursive) > 0 { + for _, e := range m.IoMergedRecursive { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if len(m.IoTimeRecursive) > 0 { + for _, e := range m.IoTimeRecursive { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if len(m.SectorsRecursive) > 0 { + for _, e := range m.SectorsRecursive { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *BlkIOEntry) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Op) + if l > 0 { + n += 1 + l + sovMetrics(uint64(l)) + } + l = len(m.Device) + if l > 0 { + n += 1 + l + sovMetrics(uint64(l)) + } + if m.Major != 0 { + n += 1 + sovMetrics(uint64(m.Major)) + } + if m.Minor != 0 { + n += 1 + sovMetrics(uint64(m.Minor)) + } + if m.Value != 0 { + n += 1 + sovMetrics(uint64(m.Value)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *RdmaStat) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Current) > 0 { + for _, e := range m.Current { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if len(m.Limit) > 0 { + for _, e := range m.Limit { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *RdmaEntry) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Device) + if l > 0 { + n += 1 + l + sovMetrics(uint64(l)) + } + if m.HcaHandles != 0 { + n += 1 + sovMetrics(uint64(m.HcaHandles)) + } + if m.HcaObjects != 0 { + n += 1 + sovMetrics(uint64(m.HcaObjects)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *NetworkStat) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovMetrics(uint64(l)) + } + if m.RxBytes != 0 { + n += 1 + sovMetrics(uint64(m.RxBytes)) + } + if m.RxPackets != 0 { + n += 1 + sovMetrics(uint64(m.RxPackets)) + } + if m.RxErrors != 0 { + n += 1 + sovMetrics(uint64(m.RxErrors)) + } + if m.RxDropped != 0 { + n += 1 + sovMetrics(uint64(m.RxDropped)) + } + if m.TxBytes != 0 { + n += 1 + sovMetrics(uint64(m.TxBytes)) + } + if m.TxPackets != 0 { + n += 1 + sovMetrics(uint64(m.TxPackets)) + } + if m.TxErrors != 0 { + n += 1 + sovMetrics(uint64(m.TxErrors)) + } + if m.TxDropped != 0 { + n += 1 + sovMetrics(uint64(m.TxDropped)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *CgroupStats) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NrSleeping != 0 { + n += 1 + sovMetrics(uint64(m.NrSleeping)) + } + if m.NrRunning != 0 { + n += 1 + sovMetrics(uint64(m.NrRunning)) + } + if m.NrStopped != 0 { + n += 1 + sovMetrics(uint64(m.NrStopped)) + } + if m.NrUninterruptible != 0 { + n += 1 + sovMetrics(uint64(m.NrUninterruptible)) + } + if m.NrIoWait != 0 { + n += 1 + sovMetrics(uint64(m.NrIoWait)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovMetrics(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozMetrics(x uint64) (n int) { + return sovMetrics(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Metrics) String() string { + if this == nil { + return "nil" + } + repeatedStringForHugetlb := "[]*HugetlbStat{" + for _, f := range this.Hugetlb { + repeatedStringForHugetlb += strings.Replace(f.String(), "HugetlbStat", "HugetlbStat", 1) + "," + } + repeatedStringForHugetlb += "}" + repeatedStringForNetwork := "[]*NetworkStat{" + for _, f := range this.Network { + repeatedStringForNetwork += strings.Replace(f.String(), "NetworkStat", "NetworkStat", 1) + "," + } + repeatedStringForNetwork += "}" + s := strings.Join([]string{`&Metrics{`, + `Hugetlb:` + repeatedStringForHugetlb + `,`, + `Pids:` + strings.Replace(this.Pids.String(), "PidsStat", "PidsStat", 1) + `,`, + `CPU:` + strings.Replace(this.CPU.String(), "CPUStat", "CPUStat", 1) + `,`, + `Memory:` + strings.Replace(this.Memory.String(), "MemoryStat", "MemoryStat", 1) + `,`, + `Blkio:` + strings.Replace(this.Blkio.String(), "BlkIOStat", "BlkIOStat", 1) + `,`, + `Rdma:` + strings.Replace(this.Rdma.String(), "RdmaStat", "RdmaStat", 1) + `,`, + `Network:` + repeatedStringForNetwork + `,`, + `CgroupStats:` + strings.Replace(this.CgroupStats.String(), "CgroupStats", "CgroupStats", 1) + `,`, + `MemoryOomControl:` + strings.Replace(this.MemoryOomControl.String(), "MemoryOomControl", "MemoryOomControl", 1) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *HugetlbStat) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HugetlbStat{`, + `Usage:` + fmt.Sprintf("%v", this.Usage) + `,`, + `Max:` + fmt.Sprintf("%v", this.Max) + `,`, + `Failcnt:` + fmt.Sprintf("%v", this.Failcnt) + `,`, + `Pagesize:` + fmt.Sprintf("%v", this.Pagesize) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *PidsStat) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PidsStat{`, + `Current:` + fmt.Sprintf("%v", this.Current) + `,`, + `Limit:` + fmt.Sprintf("%v", this.Limit) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *CPUStat) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CPUStat{`, + `Usage:` + strings.Replace(this.Usage.String(), "CPUUsage", "CPUUsage", 1) + `,`, + `Throttling:` + strings.Replace(this.Throttling.String(), "Throttle", "Throttle", 1) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *CPUUsage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CPUUsage{`, + `Total:` + fmt.Sprintf("%v", this.Total) + `,`, + `Kernel:` + fmt.Sprintf("%v", this.Kernel) + `,`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `PerCPU:` + fmt.Sprintf("%v", this.PerCPU) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *Throttle) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Throttle{`, + `Periods:` + fmt.Sprintf("%v", this.Periods) + `,`, + `ThrottledPeriods:` + fmt.Sprintf("%v", this.ThrottledPeriods) + `,`, + `ThrottledTime:` + fmt.Sprintf("%v", this.ThrottledTime) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *MemoryStat) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MemoryStat{`, + `Cache:` + fmt.Sprintf("%v", this.Cache) + `,`, + `RSS:` + fmt.Sprintf("%v", this.RSS) + `,`, + `RSSHuge:` + fmt.Sprintf("%v", this.RSSHuge) + `,`, + `MappedFile:` + fmt.Sprintf("%v", this.MappedFile) + `,`, + `Dirty:` + fmt.Sprintf("%v", this.Dirty) + `,`, + `Writeback:` + fmt.Sprintf("%v", this.Writeback) + `,`, + `PgPgIn:` + fmt.Sprintf("%v", this.PgPgIn) + `,`, + `PgPgOut:` + fmt.Sprintf("%v", this.PgPgOut) + `,`, + `PgFault:` + fmt.Sprintf("%v", this.PgFault) + `,`, + `PgMajFault:` + fmt.Sprintf("%v", this.PgMajFault) + `,`, + `InactiveAnon:` + fmt.Sprintf("%v", this.InactiveAnon) + `,`, + `ActiveAnon:` + fmt.Sprintf("%v", this.ActiveAnon) + `,`, + `InactiveFile:` + fmt.Sprintf("%v", this.InactiveFile) + `,`, + `ActiveFile:` + fmt.Sprintf("%v", this.ActiveFile) + `,`, + `Unevictable:` + fmt.Sprintf("%v", this.Unevictable) + `,`, + `HierarchicalMemoryLimit:` + fmt.Sprintf("%v", this.HierarchicalMemoryLimit) + `,`, + `HierarchicalSwapLimit:` + fmt.Sprintf("%v", this.HierarchicalSwapLimit) + `,`, + `TotalCache:` + fmt.Sprintf("%v", this.TotalCache) + `,`, + `TotalRSS:` + fmt.Sprintf("%v", this.TotalRSS) + `,`, + `TotalRSSHuge:` + fmt.Sprintf("%v", this.TotalRSSHuge) + `,`, + `TotalMappedFile:` + fmt.Sprintf("%v", this.TotalMappedFile) + `,`, + `TotalDirty:` + fmt.Sprintf("%v", this.TotalDirty) + `,`, + `TotalWriteback:` + fmt.Sprintf("%v", this.TotalWriteback) + `,`, + `TotalPgPgIn:` + fmt.Sprintf("%v", this.TotalPgPgIn) + `,`, + `TotalPgPgOut:` + fmt.Sprintf("%v", this.TotalPgPgOut) + `,`, + `TotalPgFault:` + fmt.Sprintf("%v", this.TotalPgFault) + `,`, + `TotalPgMajFault:` + fmt.Sprintf("%v", this.TotalPgMajFault) + `,`, + `TotalInactiveAnon:` + fmt.Sprintf("%v", this.TotalInactiveAnon) + `,`, + `TotalActiveAnon:` + fmt.Sprintf("%v", this.TotalActiveAnon) + `,`, + `TotalInactiveFile:` + fmt.Sprintf("%v", this.TotalInactiveFile) + `,`, + `TotalActiveFile:` + fmt.Sprintf("%v", this.TotalActiveFile) + `,`, + `TotalUnevictable:` + fmt.Sprintf("%v", this.TotalUnevictable) + `,`, + `Usage:` + strings.Replace(this.Usage.String(), "MemoryEntry", "MemoryEntry", 1) + `,`, + `Swap:` + strings.Replace(this.Swap.String(), "MemoryEntry", "MemoryEntry", 1) + `,`, + `Kernel:` + strings.Replace(this.Kernel.String(), "MemoryEntry", "MemoryEntry", 1) + `,`, + `KernelTCP:` + strings.Replace(this.KernelTCP.String(), "MemoryEntry", "MemoryEntry", 1) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *MemoryEntry) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MemoryEntry{`, + `Limit:` + fmt.Sprintf("%v", this.Limit) + `,`, + `Usage:` + fmt.Sprintf("%v", this.Usage) + `,`, + `Max:` + fmt.Sprintf("%v", this.Max) + `,`, + `Failcnt:` + fmt.Sprintf("%v", this.Failcnt) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *MemoryOomControl) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MemoryOomControl{`, + `OomKillDisable:` + fmt.Sprintf("%v", this.OomKillDisable) + `,`, + `UnderOom:` + fmt.Sprintf("%v", this.UnderOom) + `,`, + `OomKill:` + fmt.Sprintf("%v", this.OomKill) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *BlkIOStat) String() string { + if this == nil { + return "nil" + } + repeatedStringForIoServiceBytesRecursive := "[]*BlkIOEntry{" + for _, f := range this.IoServiceBytesRecursive { + repeatedStringForIoServiceBytesRecursive += strings.Replace(f.String(), "BlkIOEntry", "BlkIOEntry", 1) + "," + } + repeatedStringForIoServiceBytesRecursive += "}" + repeatedStringForIoServicedRecursive := "[]*BlkIOEntry{" + for _, f := range this.IoServicedRecursive { + repeatedStringForIoServicedRecursive += strings.Replace(f.String(), "BlkIOEntry", "BlkIOEntry", 1) + "," + } + repeatedStringForIoServicedRecursive += "}" + repeatedStringForIoQueuedRecursive := "[]*BlkIOEntry{" + for _, f := range this.IoQueuedRecursive { + repeatedStringForIoQueuedRecursive += strings.Replace(f.String(), "BlkIOEntry", "BlkIOEntry", 1) + "," + } + repeatedStringForIoQueuedRecursive += "}" + repeatedStringForIoServiceTimeRecursive := "[]*BlkIOEntry{" + for _, f := range this.IoServiceTimeRecursive { + repeatedStringForIoServiceTimeRecursive += strings.Replace(f.String(), "BlkIOEntry", "BlkIOEntry", 1) + "," + } + repeatedStringForIoServiceTimeRecursive += "}" + repeatedStringForIoWaitTimeRecursive := "[]*BlkIOEntry{" + for _, f := range this.IoWaitTimeRecursive { + repeatedStringForIoWaitTimeRecursive += strings.Replace(f.String(), "BlkIOEntry", "BlkIOEntry", 1) + "," + } + repeatedStringForIoWaitTimeRecursive += "}" + repeatedStringForIoMergedRecursive := "[]*BlkIOEntry{" + for _, f := range this.IoMergedRecursive { + repeatedStringForIoMergedRecursive += strings.Replace(f.String(), "BlkIOEntry", "BlkIOEntry", 1) + "," + } + repeatedStringForIoMergedRecursive += "}" + repeatedStringForIoTimeRecursive := "[]*BlkIOEntry{" + for _, f := range this.IoTimeRecursive { + repeatedStringForIoTimeRecursive += strings.Replace(f.String(), "BlkIOEntry", "BlkIOEntry", 1) + "," + } + repeatedStringForIoTimeRecursive += "}" + repeatedStringForSectorsRecursive := "[]*BlkIOEntry{" + for _, f := range this.SectorsRecursive { + repeatedStringForSectorsRecursive += strings.Replace(f.String(), "BlkIOEntry", "BlkIOEntry", 1) + "," + } + repeatedStringForSectorsRecursive += "}" + s := strings.Join([]string{`&BlkIOStat{`, + `IoServiceBytesRecursive:` + repeatedStringForIoServiceBytesRecursive + `,`, + `IoServicedRecursive:` + repeatedStringForIoServicedRecursive + `,`, + `IoQueuedRecursive:` + repeatedStringForIoQueuedRecursive + `,`, + `IoServiceTimeRecursive:` + repeatedStringForIoServiceTimeRecursive + `,`, + `IoWaitTimeRecursive:` + repeatedStringForIoWaitTimeRecursive + `,`, + `IoMergedRecursive:` + repeatedStringForIoMergedRecursive + `,`, + `IoTimeRecursive:` + repeatedStringForIoTimeRecursive + `,`, + `SectorsRecursive:` + repeatedStringForSectorsRecursive + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *BlkIOEntry) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BlkIOEntry{`, + `Op:` + fmt.Sprintf("%v", this.Op) + `,`, + `Device:` + fmt.Sprintf("%v", this.Device) + `,`, + `Major:` + fmt.Sprintf("%v", this.Major) + `,`, + `Minor:` + fmt.Sprintf("%v", this.Minor) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *RdmaStat) String() string { + if this == nil { + return "nil" + } + repeatedStringForCurrent := "[]*RdmaEntry{" + for _, f := range this.Current { + repeatedStringForCurrent += strings.Replace(f.String(), "RdmaEntry", "RdmaEntry", 1) + "," + } + repeatedStringForCurrent += "}" + repeatedStringForLimit := "[]*RdmaEntry{" + for _, f := range this.Limit { + repeatedStringForLimit += strings.Replace(f.String(), "RdmaEntry", "RdmaEntry", 1) + "," + } + repeatedStringForLimit += "}" + s := strings.Join([]string{`&RdmaStat{`, + `Current:` + repeatedStringForCurrent + `,`, + `Limit:` + repeatedStringForLimit + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *RdmaEntry) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RdmaEntry{`, + `Device:` + fmt.Sprintf("%v", this.Device) + `,`, + `HcaHandles:` + fmt.Sprintf("%v", this.HcaHandles) + `,`, + `HcaObjects:` + fmt.Sprintf("%v", this.HcaObjects) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *NetworkStat) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NetworkStat{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `RxBytes:` + fmt.Sprintf("%v", this.RxBytes) + `,`, + `RxPackets:` + fmt.Sprintf("%v", this.RxPackets) + `,`, + `RxErrors:` + fmt.Sprintf("%v", this.RxErrors) + `,`, + `RxDropped:` + fmt.Sprintf("%v", this.RxDropped) + `,`, + `TxBytes:` + fmt.Sprintf("%v", this.TxBytes) + `,`, + `TxPackets:` + fmt.Sprintf("%v", this.TxPackets) + `,`, + `TxErrors:` + fmt.Sprintf("%v", this.TxErrors) + `,`, + `TxDropped:` + fmt.Sprintf("%v", this.TxDropped) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *CgroupStats) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CgroupStats{`, + `NrSleeping:` + fmt.Sprintf("%v", this.NrSleeping) + `,`, + `NrRunning:` + fmt.Sprintf("%v", this.NrRunning) + `,`, + `NrStopped:` + fmt.Sprintf("%v", this.NrStopped) + `,`, + `NrUninterruptible:` + fmt.Sprintf("%v", this.NrUninterruptible) + `,`, + `NrIoWait:` + fmt.Sprintf("%v", this.NrIoWait) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func valueToStringMetrics(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Metrics) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Metrics: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Metrics: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hugetlb", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hugetlb = append(m.Hugetlb, &HugetlbStat{}) + if err := m.Hugetlb[len(m.Hugetlb)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pids", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pids == nil { + m.Pids = &PidsStat{} + } + if err := m.Pids.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CPU", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CPU == nil { + m.CPU = &CPUStat{} + } + if err := m.CPU.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Memory", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Memory == nil { + m.Memory = &MemoryStat{} + } + if err := m.Memory.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Blkio", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Blkio == nil { + m.Blkio = &BlkIOStat{} + } + if err := m.Blkio.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rdma", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Rdma == nil { + m.Rdma = &RdmaStat{} + } + if err := m.Rdma.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Network", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Network = append(m.Network, &NetworkStat{}) + if err := m.Network[len(m.Network)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CgroupStats", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CgroupStats == nil { + m.CgroupStats = &CgroupStats{} + } + if err := m.CgroupStats.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MemoryOomControl", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MemoryOomControl == nil { + m.MemoryOomControl = &MemoryOomControl{} + } + if err := m.MemoryOomControl.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HugetlbStat) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HugetlbStat: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HugetlbStat: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Usage", wireType) + } + m.Usage = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Usage |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) + } + m.Max = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Max |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Failcnt", wireType) + } + m.Failcnt = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Failcnt |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagesize", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Pagesize = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PidsStat) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PidsStat: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PidsStat: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Current", wireType) + } + m.Current = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Current |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) + } + m.Limit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Limit |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CPUStat) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CPUStat: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CPUStat: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Usage", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Usage == nil { + m.Usage = &CPUUsage{} + } + if err := m.Usage.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Throttling", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Throttling == nil { + m.Throttling = &Throttle{} + } + if err := m.Throttling.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CPUUsage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CPUUsage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CPUUsage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Total", wireType) + } + m.Total = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Total |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Kernel", wireType) + } + m.Kernel = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Kernel |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + m.User = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.User |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.PerCPU = append(m.PerCPU, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.PerCPU) == 0 { + m.PerCPU = make([]uint64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.PerCPU = append(m.PerCPU, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field PerCPU", wireType) + } + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Throttle) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Throttle: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Throttle: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Periods", wireType) + } + m.Periods = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Periods |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ThrottledPeriods", wireType) + } + m.ThrottledPeriods = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ThrottledPeriods |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ThrottledTime", wireType) + } + m.ThrottledTime = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ThrottledTime |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MemoryStat) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MemoryStat: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MemoryStat: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Cache", wireType) + } + m.Cache = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Cache |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RSS", wireType) + } + m.RSS = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RSS |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RSSHuge", wireType) + } + m.RSSHuge = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RSSHuge |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MappedFile", wireType) + } + m.MappedFile = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MappedFile |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Dirty", wireType) + } + m.Dirty = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Dirty |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Writeback", wireType) + } + m.Writeback = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Writeback |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PgPgIn", wireType) + } + m.PgPgIn = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PgPgIn |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PgPgOut", wireType) + } + m.PgPgOut = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PgPgOut |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PgFault", wireType) + } + m.PgFault = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PgFault |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PgMajFault", wireType) + } + m.PgMajFault = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PgMajFault |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InactiveAnon", wireType) + } + m.InactiveAnon = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.InactiveAnon |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ActiveAnon", wireType) + } + m.ActiveAnon = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ActiveAnon |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InactiveFile", wireType) + } + m.InactiveFile = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.InactiveFile |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 14: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ActiveFile", wireType) + } + m.ActiveFile = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ActiveFile |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 15: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Unevictable", wireType) + } + m.Unevictable = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Unevictable |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 16: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HierarchicalMemoryLimit", wireType) + } + m.HierarchicalMemoryLimit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.HierarchicalMemoryLimit |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 17: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HierarchicalSwapLimit", wireType) + } + m.HierarchicalSwapLimit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.HierarchicalSwapLimit |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 18: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalCache", wireType) + } + m.TotalCache = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalCache |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 19: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalRSS", wireType) + } + m.TotalRSS = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalRSS |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 20: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalRSSHuge", wireType) + } + m.TotalRSSHuge = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalRSSHuge |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 21: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalMappedFile", wireType) + } + m.TotalMappedFile = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalMappedFile |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 22: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalDirty", wireType) + } + m.TotalDirty = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalDirty |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 23: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalWriteback", wireType) + } + m.TotalWriteback = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalWriteback |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 24: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalPgPgIn", wireType) + } + m.TotalPgPgIn = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalPgPgIn |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 25: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalPgPgOut", wireType) + } + m.TotalPgPgOut = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalPgPgOut |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 26: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalPgFault", wireType) + } + m.TotalPgFault = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalPgFault |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 27: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalPgMajFault", wireType) + } + m.TotalPgMajFault = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalPgMajFault |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 28: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalInactiveAnon", wireType) + } + m.TotalInactiveAnon = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalInactiveAnon |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 29: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalActiveAnon", wireType) + } + m.TotalActiveAnon = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalActiveAnon |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 30: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalInactiveFile", wireType) + } + m.TotalInactiveFile = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalInactiveFile |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 31: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalActiveFile", wireType) + } + m.TotalActiveFile = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalActiveFile |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 32: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalUnevictable", wireType) + } + m.TotalUnevictable = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalUnevictable |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 33: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Usage", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Usage == nil { + m.Usage = &MemoryEntry{} + } + if err := m.Usage.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 34: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Swap", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Swap == nil { + m.Swap = &MemoryEntry{} + } + if err := m.Swap.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 35: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kernel", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Kernel == nil { + m.Kernel = &MemoryEntry{} + } + if err := m.Kernel.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 36: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KernelTCP", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.KernelTCP == nil { + m.KernelTCP = &MemoryEntry{} + } + if err := m.KernelTCP.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MemoryEntry) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MemoryEntry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MemoryEntry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) + } + m.Limit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Limit |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Usage", wireType) + } + m.Usage = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Usage |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) + } + m.Max = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Max |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Failcnt", wireType) + } + m.Failcnt = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Failcnt |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MemoryOomControl) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MemoryOomControl: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MemoryOomControl: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OomKillDisable", wireType) + } + m.OomKillDisable = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.OomKillDisable |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UnderOom", wireType) + } + m.UnderOom = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UnderOom |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OomKill", wireType) + } + m.OomKill = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.OomKill |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BlkIOStat) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BlkIOStat: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BlkIOStat: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IoServiceBytesRecursive", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IoServiceBytesRecursive = append(m.IoServiceBytesRecursive, &BlkIOEntry{}) + if err := m.IoServiceBytesRecursive[len(m.IoServiceBytesRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IoServicedRecursive", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IoServicedRecursive = append(m.IoServicedRecursive, &BlkIOEntry{}) + if err := m.IoServicedRecursive[len(m.IoServicedRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IoQueuedRecursive", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IoQueuedRecursive = append(m.IoQueuedRecursive, &BlkIOEntry{}) + if err := m.IoQueuedRecursive[len(m.IoQueuedRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IoServiceTimeRecursive", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IoServiceTimeRecursive = append(m.IoServiceTimeRecursive, &BlkIOEntry{}) + if err := m.IoServiceTimeRecursive[len(m.IoServiceTimeRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IoWaitTimeRecursive", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IoWaitTimeRecursive = append(m.IoWaitTimeRecursive, &BlkIOEntry{}) + if err := m.IoWaitTimeRecursive[len(m.IoWaitTimeRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IoMergedRecursive", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IoMergedRecursive = append(m.IoMergedRecursive, &BlkIOEntry{}) + if err := m.IoMergedRecursive[len(m.IoMergedRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IoTimeRecursive", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IoTimeRecursive = append(m.IoTimeRecursive, &BlkIOEntry{}) + if err := m.IoTimeRecursive[len(m.IoTimeRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SectorsRecursive", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SectorsRecursive = append(m.SectorsRecursive, &BlkIOEntry{}) + if err := m.SectorsRecursive[len(m.SectorsRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BlkIOEntry) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BlkIOEntry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BlkIOEntry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Op", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Op = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Device", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Device = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Major", wireType) + } + m.Major = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Major |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Minor", wireType) + } + m.Minor = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Minor |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RdmaStat) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RdmaStat: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RdmaStat: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Current", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Current = append(m.Current, &RdmaEntry{}) + if err := m.Current[len(m.Current)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Limit = append(m.Limit, &RdmaEntry{}) + if err := m.Limit[len(m.Limit)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RdmaEntry) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RdmaEntry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RdmaEntry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Device", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Device = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HcaHandles", wireType) + } + m.HcaHandles = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.HcaHandles |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HcaObjects", wireType) + } + m.HcaObjects = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.HcaObjects |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkStat) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkStat: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkStat: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RxBytes", wireType) + } + m.RxBytes = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RxBytes |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RxPackets", wireType) + } + m.RxPackets = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RxPackets |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RxErrors", wireType) + } + m.RxErrors = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RxErrors |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RxDropped", wireType) + } + m.RxDropped = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RxDropped |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TxBytes", wireType) + } + m.TxBytes = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TxBytes |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TxPackets", wireType) + } + m.TxPackets = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TxPackets |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TxErrors", wireType) + } + m.TxErrors = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TxErrors |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TxDropped", wireType) + } + m.TxDropped = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TxDropped |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CgroupStats) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CgroupStats: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CgroupStats: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NrSleeping", wireType) + } + m.NrSleeping = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NrSleeping |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NrRunning", wireType) + } + m.NrRunning = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NrRunning |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NrStopped", wireType) + } + m.NrStopped = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NrStopped |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NrUninterruptible", wireType) + } + m.NrUninterruptible = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NrUninterruptible |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NrIoWait", wireType) + } + m.NrIoWait = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NrIoWait |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipMetrics(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMetrics + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMetrics + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMetrics + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthMetrics + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupMetrics + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthMetrics + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthMetrics = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowMetrics = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupMetrics = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.txt b/vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.txt new file mode 100644 index 00000000000..e476cea6412 --- /dev/null +++ b/vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.txt @@ -0,0 +1,790 @@ +file { + name: "github.com/containerd/cgroups/stats/v1/metrics.proto" + package: "io.containerd.cgroups.v1" + dependency: "gogoproto/gogo.proto" + message_type { + name: "Metrics" + field { + name: "hugetlb" + number: 1 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.HugetlbStat" + json_name: "hugetlb" + } + field { + name: "pids" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.PidsStat" + json_name: "pids" + } + field { + name: "cpu" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.CPUStat" + options { + 65004: "CPU" + } + json_name: "cpu" + } + field { + name: "memory" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.MemoryStat" + json_name: "memory" + } + field { + name: "blkio" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.BlkIOStat" + json_name: "blkio" + } + field { + name: "rdma" + number: 6 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.RdmaStat" + json_name: "rdma" + } + field { + name: "network" + number: 7 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.NetworkStat" + json_name: "network" + } + field { + name: "cgroup_stats" + number: 8 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.CgroupStats" + json_name: "cgroupStats" + } + field { + name: "memory_oom_control" + number: 9 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.MemoryOomControl" + json_name: "memoryOomControl" + } + } + message_type { + name: "HugetlbStat" + field { + name: "usage" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "usage" + } + field { + name: "max" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "max" + } + field { + name: "failcnt" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "failcnt" + } + field { + name: "pagesize" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "pagesize" + } + } + message_type { + name: "PidsStat" + field { + name: "current" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "current" + } + field { + name: "limit" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "limit" + } + } + message_type { + name: "CPUStat" + field { + name: "usage" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.CPUUsage" + json_name: "usage" + } + field { + name: "throttling" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.Throttle" + json_name: "throttling" + } + } + message_type { + name: "CPUUsage" + field { + name: "total" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "total" + } + field { + name: "kernel" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "kernel" + } + field { + name: "user" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "user" + } + field { + name: "per_cpu" + number: 4 + label: LABEL_REPEATED + type: TYPE_UINT64 + options { + 65004: "PerCPU" + } + json_name: "perCpu" + } + } + message_type { + name: "Throttle" + field { + name: "periods" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "periods" + } + field { + name: "throttled_periods" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "throttledPeriods" + } + field { + name: "throttled_time" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "throttledTime" + } + } + message_type { + name: "MemoryStat" + field { + name: "cache" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "cache" + } + field { + name: "rss" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + options { + 65004: "RSS" + } + json_name: "rss" + } + field { + name: "rss_huge" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + options { + 65004: "RSSHuge" + } + json_name: "rssHuge" + } + field { + name: "mapped_file" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "mappedFile" + } + field { + name: "dirty" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "dirty" + } + field { + name: "writeback" + number: 6 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "writeback" + } + field { + name: "pg_pg_in" + number: 7 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "pgPgIn" + } + field { + name: "pg_pg_out" + number: 8 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "pgPgOut" + } + field { + name: "pg_fault" + number: 9 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "pgFault" + } + field { + name: "pg_maj_fault" + number: 10 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "pgMajFault" + } + field { + name: "inactive_anon" + number: 11 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "inactiveAnon" + } + field { + name: "active_anon" + number: 12 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "activeAnon" + } + field { + name: "inactive_file" + number: 13 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "inactiveFile" + } + field { + name: "active_file" + number: 14 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "activeFile" + } + field { + name: "unevictable" + number: 15 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "unevictable" + } + field { + name: "hierarchical_memory_limit" + number: 16 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "hierarchicalMemoryLimit" + } + field { + name: "hierarchical_swap_limit" + number: 17 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "hierarchicalSwapLimit" + } + field { + name: "total_cache" + number: 18 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "totalCache" + } + field { + name: "total_rss" + number: 19 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + options { + 65004: "TotalRSS" + } + json_name: "totalRss" + } + field { + name: "total_rss_huge" + number: 20 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + options { + 65004: "TotalRSSHuge" + } + json_name: "totalRssHuge" + } + field { + name: "total_mapped_file" + number: 21 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "totalMappedFile" + } + field { + name: "total_dirty" + number: 22 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "totalDirty" + } + field { + name: "total_writeback" + number: 23 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "totalWriteback" + } + field { + name: "total_pg_pg_in" + number: 24 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "totalPgPgIn" + } + field { + name: "total_pg_pg_out" + number: 25 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "totalPgPgOut" + } + field { + name: "total_pg_fault" + number: 26 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "totalPgFault" + } + field { + name: "total_pg_maj_fault" + number: 27 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "totalPgMajFault" + } + field { + name: "total_inactive_anon" + number: 28 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "totalInactiveAnon" + } + field { + name: "total_active_anon" + number: 29 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "totalActiveAnon" + } + field { + name: "total_inactive_file" + number: 30 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "totalInactiveFile" + } + field { + name: "total_active_file" + number: 31 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "totalActiveFile" + } + field { + name: "total_unevictable" + number: 32 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "totalUnevictable" + } + field { + name: "usage" + number: 33 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.MemoryEntry" + json_name: "usage" + } + field { + name: "swap" + number: 34 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.MemoryEntry" + json_name: "swap" + } + field { + name: "kernel" + number: 35 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.MemoryEntry" + json_name: "kernel" + } + field { + name: "kernel_tcp" + number: 36 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.MemoryEntry" + options { + 65004: "KernelTCP" + } + json_name: "kernelTcp" + } + } + message_type { + name: "MemoryEntry" + field { + name: "limit" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "limit" + } + field { + name: "usage" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "usage" + } + field { + name: "max" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "max" + } + field { + name: "failcnt" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "failcnt" + } + } + message_type { + name: "MemoryOomControl" + field { + name: "oom_kill_disable" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "oomKillDisable" + } + field { + name: "under_oom" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "underOom" + } + field { + name: "oom_kill" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "oomKill" + } + } + message_type { + name: "BlkIOStat" + field { + name: "io_service_bytes_recursive" + number: 1 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.BlkIOEntry" + json_name: "ioServiceBytesRecursive" + } + field { + name: "io_serviced_recursive" + number: 2 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.BlkIOEntry" + json_name: "ioServicedRecursive" + } + field { + name: "io_queued_recursive" + number: 3 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.BlkIOEntry" + json_name: "ioQueuedRecursive" + } + field { + name: "io_service_time_recursive" + number: 4 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.BlkIOEntry" + json_name: "ioServiceTimeRecursive" + } + field { + name: "io_wait_time_recursive" + number: 5 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.BlkIOEntry" + json_name: "ioWaitTimeRecursive" + } + field { + name: "io_merged_recursive" + number: 6 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.BlkIOEntry" + json_name: "ioMergedRecursive" + } + field { + name: "io_time_recursive" + number: 7 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.BlkIOEntry" + json_name: "ioTimeRecursive" + } + field { + name: "sectors_recursive" + number: 8 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.BlkIOEntry" + json_name: "sectorsRecursive" + } + } + message_type { + name: "BlkIOEntry" + field { + name: "op" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "op" + } + field { + name: "device" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "device" + } + field { + name: "major" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "major" + } + field { + name: "minor" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "minor" + } + field { + name: "value" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "value" + } + } + message_type { + name: "RdmaStat" + field { + name: "current" + number: 1 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.RdmaEntry" + json_name: "current" + } + field { + name: "limit" + number: 2 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.RdmaEntry" + json_name: "limit" + } + } + message_type { + name: "RdmaEntry" + field { + name: "device" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "device" + } + field { + name: "hca_handles" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_UINT32 + json_name: "hcaHandles" + } + field { + name: "hca_objects" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_UINT32 + json_name: "hcaObjects" + } + } + message_type { + name: "NetworkStat" + field { + name: "name" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "name" + } + field { + name: "rx_bytes" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "rxBytes" + } + field { + name: "rx_packets" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "rxPackets" + } + field { + name: "rx_errors" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "rxErrors" + } + field { + name: "rx_dropped" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "rxDropped" + } + field { + name: "tx_bytes" + number: 6 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "txBytes" + } + field { + name: "tx_packets" + number: 7 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "txPackets" + } + field { + name: "tx_errors" + number: 8 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "txErrors" + } + field { + name: "tx_dropped" + number: 9 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "txDropped" + } + } + message_type { + name: "CgroupStats" + field { + name: "nr_sleeping" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "nrSleeping" + } + field { + name: "nr_running" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "nrRunning" + } + field { + name: "nr_stopped" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "nrStopped" + } + field { + name: "nr_uninterruptible" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "nrUninterruptible" + } + field { + name: "nr_io_wait" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "nrIoWait" + } + } + syntax: "proto3" +} diff --git a/vendor/github.com/containerd/cgroups/stats/v1/metrics.proto b/vendor/github.com/containerd/cgroups/stats/v1/metrics.proto new file mode 100644 index 00000000000..b3f6cc37d8c --- /dev/null +++ b/vendor/github.com/containerd/cgroups/stats/v1/metrics.proto @@ -0,0 +1,158 @@ +syntax = "proto3"; + +package io.containerd.cgroups.v1; + +import "gogoproto/gogo.proto"; + +message Metrics { + repeated HugetlbStat hugetlb = 1; + PidsStat pids = 2; + CPUStat cpu = 3 [(gogoproto.customname) = "CPU"]; + MemoryStat memory = 4; + BlkIOStat blkio = 5; + RdmaStat rdma = 6; + repeated NetworkStat network = 7; + CgroupStats cgroup_stats = 8; + MemoryOomControl memory_oom_control = 9; +} + +message HugetlbStat { + uint64 usage = 1; + uint64 max = 2; + uint64 failcnt = 3; + string pagesize = 4; +} + +message PidsStat { + uint64 current = 1; + uint64 limit = 2; +} + +message CPUStat { + CPUUsage usage = 1; + Throttle throttling = 2; +} + +message CPUUsage { + // values in nanoseconds + uint64 total = 1; + uint64 kernel = 2; + uint64 user = 3; + repeated uint64 per_cpu = 4 [(gogoproto.customname) = "PerCPU"]; + +} + +message Throttle { + uint64 periods = 1; + uint64 throttled_periods = 2; + uint64 throttled_time = 3; +} + +message MemoryStat { + uint64 cache = 1; + uint64 rss = 2 [(gogoproto.customname) = "RSS"]; + uint64 rss_huge = 3 [(gogoproto.customname) = "RSSHuge"]; + uint64 mapped_file = 4; + uint64 dirty = 5; + uint64 writeback = 6; + uint64 pg_pg_in = 7; + uint64 pg_pg_out = 8; + uint64 pg_fault = 9; + uint64 pg_maj_fault = 10; + uint64 inactive_anon = 11; + uint64 active_anon = 12; + uint64 inactive_file = 13; + uint64 active_file = 14; + uint64 unevictable = 15; + uint64 hierarchical_memory_limit = 16; + uint64 hierarchical_swap_limit = 17; + uint64 total_cache = 18; + uint64 total_rss = 19 [(gogoproto.customname) = "TotalRSS"]; + uint64 total_rss_huge = 20 [(gogoproto.customname) = "TotalRSSHuge"]; + uint64 total_mapped_file = 21; + uint64 total_dirty = 22; + uint64 total_writeback = 23; + uint64 total_pg_pg_in = 24; + uint64 total_pg_pg_out = 25; + uint64 total_pg_fault = 26; + uint64 total_pg_maj_fault = 27; + uint64 total_inactive_anon = 28; + uint64 total_active_anon = 29; + uint64 total_inactive_file = 30; + uint64 total_active_file = 31; + uint64 total_unevictable = 32; + MemoryEntry usage = 33; + MemoryEntry swap = 34; + MemoryEntry kernel = 35; + MemoryEntry kernel_tcp = 36 [(gogoproto.customname) = "KernelTCP"]; + +} + +message MemoryEntry { + uint64 limit = 1; + uint64 usage = 2; + uint64 max = 3; + uint64 failcnt = 4; +} + +message MemoryOomControl { + uint64 oom_kill_disable = 1; + uint64 under_oom = 2; + uint64 oom_kill = 3; +} + +message BlkIOStat { + repeated BlkIOEntry io_service_bytes_recursive = 1; + repeated BlkIOEntry io_serviced_recursive = 2; + repeated BlkIOEntry io_queued_recursive = 3; + repeated BlkIOEntry io_service_time_recursive = 4; + repeated BlkIOEntry io_wait_time_recursive = 5; + repeated BlkIOEntry io_merged_recursive = 6; + repeated BlkIOEntry io_time_recursive = 7; + repeated BlkIOEntry sectors_recursive = 8; +} + +message BlkIOEntry { + string op = 1; + string device = 2; + uint64 major = 3; + uint64 minor = 4; + uint64 value = 5; +} + +message RdmaStat { + repeated RdmaEntry current = 1; + repeated RdmaEntry limit = 2; +} + +message RdmaEntry { + string device = 1; + uint32 hca_handles = 2; + uint32 hca_objects = 3; +} + +message NetworkStat { + string name = 1; + uint64 rx_bytes = 2; + uint64 rx_packets = 3; + uint64 rx_errors = 4; + uint64 rx_dropped = 5; + uint64 tx_bytes = 6; + uint64 tx_packets = 7; + uint64 tx_errors = 8; + uint64 tx_dropped = 9; +} + +// CgroupStats exports per-cgroup statistics. +message CgroupStats { + // number of tasks sleeping + uint64 nr_sleeping = 1; + // number of tasks running + uint64 nr_running = 2; + // number of tasks in stopped state + uint64 nr_stopped = 3; + // number of tasks in uninterruptible state + uint64 nr_uninterruptible = 4; + // number of tasks waiting on IO + uint64 nr_io_wait = 5; +} diff --git a/vendor/github.com/containerd/containerd/log/context.go b/vendor/github.com/containerd/containerd/log/context.go new file mode 100644 index 00000000000..37b6a7d1c8a --- /dev/null +++ b/vendor/github.com/containerd/containerd/log/context.go @@ -0,0 +1,68 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package log + +import ( + "context" + + "github.com/sirupsen/logrus" +) + +var ( + // G is an alias for GetLogger. + // + // We may want to define this locally to a package to get package tagged log + // messages. + G = GetLogger + + // L is an alias for the standard logger. + L = logrus.NewEntry(logrus.StandardLogger()) +) + +type ( + loggerKey struct{} +) + +const ( + // RFC3339NanoFixed is time.RFC3339Nano with nanoseconds padded using zeros to + // ensure the formatted time is always the same number of characters. + RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00" + + // TextFormat represents the text logging format + TextFormat = "text" + + // JSONFormat represents the JSON logging format + JSONFormat = "json" +) + +// WithLogger returns a new context with the provided logger. Use in +// combination with logger.WithField(s) for great effect. +func WithLogger(ctx context.Context, logger *logrus.Entry) context.Context { + return context.WithValue(ctx, loggerKey{}, logger) +} + +// GetLogger retrieves the current logger from the context. If no logger is +// available, the default logger is returned. +func GetLogger(ctx context.Context) *logrus.Entry { + logger := ctx.Value(loggerKey{}) + + if logger == nil { + return L + } + + return logger.(*logrus.Entry) +} diff --git a/vendor/github.com/containerd/containerd/pkg/userns/userns_linux.go b/vendor/github.com/containerd/containerd/pkg/userns/userns_linux.go new file mode 100644 index 00000000000..6656465efbc --- /dev/null +++ b/vendor/github.com/containerd/containerd/pkg/userns/userns_linux.go @@ -0,0 +1,62 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package userns + +import ( + "bufio" + "fmt" + "os" + "sync" +) + +var ( + inUserNS bool + nsOnce sync.Once +) + +// RunningInUserNS detects whether we are currently running in a user namespace. +// Originally copied from github.com/lxc/lxd/shared/util.go +func RunningInUserNS() bool { + nsOnce.Do(func() { + file, err := os.Open("/proc/self/uid_map") + if err != nil { + // This kernel-provided file only exists if user namespaces are supported + return + } + defer file.Close() + + buf := bufio.NewReader(file) + l, _, err := buf.ReadLine() + if err != nil { + return + } + + line := string(l) + var a, b, c int64 + fmt.Sscanf(line, "%d %d %d", &a, &b, &c) + + /* + * We assume we are in the initial user namespace if we have a full + * range - 4294967295 uids starting at uid 0. + */ + if a == 0 && b == 0 && c == 4294967295 { + return + } + inUserNS = true + }) + return inUserNS +} diff --git a/vendor/github.com/containerd/containerd/pkg/userns/userns_unsupported.go b/vendor/github.com/containerd/containerd/pkg/userns/userns_unsupported.go new file mode 100644 index 00000000000..aab756fd2a6 --- /dev/null +++ b/vendor/github.com/containerd/containerd/pkg/userns/userns_unsupported.go @@ -0,0 +1,25 @@ +// +build !linux + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package userns + +// RunningInUserNS is a stub for non-Linux systems +// Always returns false +func RunningInUserNS() bool { + return false +} diff --git a/vendor/github.com/containerd/containerd/platforms/compare.go b/vendor/github.com/containerd/containerd/platforms/compare.go new file mode 100644 index 00000000000..c7657e1869b --- /dev/null +++ b/vendor/github.com/containerd/containerd/platforms/compare.go @@ -0,0 +1,193 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "strconv" + "strings" + + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +// MatchComparer is able to match and compare platforms to +// filter and sort platforms. +type MatchComparer interface { + Matcher + + Less(specs.Platform, specs.Platform) bool +} + +// platformVector returns an (ordered) vector of appropriate specs.Platform +// objects to try matching for the given platform object (see platforms.Only). +func platformVector(platform specs.Platform) []specs.Platform { + vector := []specs.Platform{platform} + + switch platform.Architecture { + case "amd64": + vector = append(vector, specs.Platform{ + Architecture: "386", + OS: platform.OS, + OSVersion: platform.OSVersion, + OSFeatures: platform.OSFeatures, + Variant: platform.Variant, + }) + case "arm": + if armVersion, err := strconv.Atoi(strings.TrimPrefix(platform.Variant, "v")); err == nil && armVersion > 5 { + for armVersion--; armVersion >= 5; armVersion-- { + vector = append(vector, specs.Platform{ + Architecture: platform.Architecture, + OS: platform.OS, + OSVersion: platform.OSVersion, + OSFeatures: platform.OSFeatures, + Variant: "v" + strconv.Itoa(armVersion), + }) + } + } + case "arm64": + variant := platform.Variant + if variant == "" { + variant = "v8" + } + vector = append(vector, platformVector(specs.Platform{ + Architecture: "arm", + OS: platform.OS, + OSVersion: platform.OSVersion, + OSFeatures: platform.OSFeatures, + Variant: variant, + })...) + } + + return vector +} + +// Only returns a match comparer for a single platform +// using default resolution logic for the platform. +// +// For arm/v8, will also match arm/v7, arm/v6 and arm/v5 +// For arm/v7, will also match arm/v6 and arm/v5 +// For arm/v6, will also match arm/v5 +// For amd64, will also match 386 +func Only(platform specs.Platform) MatchComparer { + return Ordered(platformVector(Normalize(platform))...) +} + +// OnlyStrict returns a match comparer for a single platform. +// +// Unlike Only, OnlyStrict does not match sub platforms. +// So, "arm/vN" will not match "arm/vM" where M < N, +// and "amd64" will not also match "386". +// +// OnlyStrict matches non-canonical forms. +// So, "arm64" matches "arm/64/v8". +func OnlyStrict(platform specs.Platform) MatchComparer { + return Ordered(Normalize(platform)) +} + +// Ordered returns a platform MatchComparer which matches any of the platforms +// but orders them in order they are provided. +func Ordered(platforms ...specs.Platform) MatchComparer { + matchers := make([]Matcher, len(platforms)) + for i := range platforms { + matchers[i] = NewMatcher(platforms[i]) + } + return orderedPlatformComparer{ + matchers: matchers, + } +} + +// Any returns a platform MatchComparer which matches any of the platforms +// with no preference for ordering. +func Any(platforms ...specs.Platform) MatchComparer { + matchers := make([]Matcher, len(platforms)) + for i := range platforms { + matchers[i] = NewMatcher(platforms[i]) + } + return anyPlatformComparer{ + matchers: matchers, + } +} + +// All is a platform MatchComparer which matches all platforms +// with preference for ordering. +var All MatchComparer = allPlatformComparer{} + +type orderedPlatformComparer struct { + matchers []Matcher +} + +func (c orderedPlatformComparer) Match(platform specs.Platform) bool { + for _, m := range c.matchers { + if m.Match(platform) { + return true + } + } + return false +} + +func (c orderedPlatformComparer) Less(p1 specs.Platform, p2 specs.Platform) bool { + for _, m := range c.matchers { + p1m := m.Match(p1) + p2m := m.Match(p2) + if p1m && !p2m { + return true + } + if p1m || p2m { + return false + } + } + return false +} + +type anyPlatformComparer struct { + matchers []Matcher +} + +func (c anyPlatformComparer) Match(platform specs.Platform) bool { + for _, m := range c.matchers { + if m.Match(platform) { + return true + } + } + return false +} + +func (c anyPlatformComparer) Less(p1, p2 specs.Platform) bool { + var p1m, p2m bool + for _, m := range c.matchers { + if !p1m && m.Match(p1) { + p1m = true + } + if !p2m && m.Match(p2) { + p2m = true + } + if p1m && p2m { + return false + } + } + // If one matches, and the other does, sort match first + return p1m && !p2m +} + +type allPlatformComparer struct{} + +func (allPlatformComparer) Match(specs.Platform) bool { + return true +} + +func (allPlatformComparer) Less(specs.Platform, specs.Platform) bool { + return false +} diff --git a/vendor/github.com/containerd/containerd/platforms/cpuinfo.go b/vendor/github.com/containerd/containerd/platforms/cpuinfo.go new file mode 100644 index 00000000000..4a7177e3138 --- /dev/null +++ b/vendor/github.com/containerd/containerd/platforms/cpuinfo.go @@ -0,0 +1,131 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "bufio" + "os" + "runtime" + "strings" + "sync" + + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/log" + "github.com/pkg/errors" +) + +// Present the ARM instruction set architecture, eg: v7, v8 +// Don't use this value directly; call cpuVariant() instead. +var cpuVariantValue string + +var cpuVariantOnce sync.Once + +func cpuVariant() string { + cpuVariantOnce.Do(func() { + if isArmArch(runtime.GOARCH) { + cpuVariantValue = getCPUVariant() + } + }) + return cpuVariantValue +} + +// For Linux, the kernel has already detected the ABI, ISA and Features. +// So we don't need to access the ARM registers to detect platform information +// by ourselves. We can just parse these information from /proc/cpuinfo +func getCPUInfo(pattern string) (info string, err error) { + if !isLinuxOS(runtime.GOOS) { + return "", errors.Wrapf(errdefs.ErrNotImplemented, "getCPUInfo for OS %s", runtime.GOOS) + } + + cpuinfo, err := os.Open("/proc/cpuinfo") + if err != nil { + return "", err + } + defer cpuinfo.Close() + + // Start to Parse the Cpuinfo line by line. For SMP SoC, we parse + // the first core is enough. + scanner := bufio.NewScanner(cpuinfo) + for scanner.Scan() { + newline := scanner.Text() + list := strings.Split(newline, ":") + + if len(list) > 1 && strings.EqualFold(strings.TrimSpace(list[0]), pattern) { + return strings.TrimSpace(list[1]), nil + } + } + + // Check whether the scanner encountered errors + err = scanner.Err() + if err != nil { + return "", err + } + + return "", errors.Wrapf(errdefs.ErrNotFound, "getCPUInfo for pattern: %s", pattern) +} + +func getCPUVariant() string { + if runtime.GOOS == "windows" || runtime.GOOS == "darwin" { + // Windows/Darwin only supports v7 for ARM32 and v8 for ARM64 and so we can use + // runtime.GOARCH to determine the variants + var variant string + switch runtime.GOARCH { + case "arm64": + variant = "v8" + case "arm": + variant = "v7" + default: + variant = "unknown" + } + + return variant + } + + variant, err := getCPUInfo("Cpu architecture") + if err != nil { + log.L.WithError(err).Error("failure getting variant") + return "" + } + + // handle edge case for Raspberry Pi ARMv6 devices (which due to a kernel quirk, report "CPU architecture: 7") + // https://www.raspberrypi.org/forums/viewtopic.php?t=12614 + if runtime.GOARCH == "arm" && variant == "7" { + model, err := getCPUInfo("model name") + if err == nil && strings.HasPrefix(strings.ToLower(model), "armv6-compatible") { + variant = "6" + } + } + + switch strings.ToLower(variant) { + case "8", "aarch64": + variant = "v8" + case "7", "7m", "?(12)", "?(13)", "?(14)", "?(15)", "?(16)", "?(17)": + variant = "v7" + case "6", "6tej": + variant = "v6" + case "5", "5t", "5te", "5tej": + variant = "v5" + case "4", "4t": + variant = "v4" + case "3": + variant = "v3" + default: + variant = "unknown" + } + + return variant +} diff --git a/vendor/github.com/containerd/containerd/platforms/database.go b/vendor/github.com/containerd/containerd/platforms/database.go new file mode 100644 index 00000000000..6ede94061eb --- /dev/null +++ b/vendor/github.com/containerd/containerd/platforms/database.go @@ -0,0 +1,114 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "runtime" + "strings" +) + +// isLinuxOS returns true if the operating system is Linux. +// +// The OS value should be normalized before calling this function. +func isLinuxOS(os string) bool { + return os == "linux" +} + +// These function are generated from https://golang.org/src/go/build/syslist.go. +// +// We use switch statements because they are slightly faster than map lookups +// and use a little less memory. + +// isKnownOS returns true if we know about the operating system. +// +// The OS value should be normalized before calling this function. +func isKnownOS(os string) bool { + switch os { + case "aix", "android", "darwin", "dragonfly", "freebsd", "hurd", "illumos", "js", "linux", "nacl", "netbsd", "openbsd", "plan9", "solaris", "windows", "zos": + return true + } + return false +} + +// isArmArch returns true if the architecture is ARM. +// +// The arch value should be normalized before being passed to this function. +func isArmArch(arch string) bool { + switch arch { + case "arm", "arm64": + return true + } + return false +} + +// isKnownArch returns true if we know about the architecture. +// +// The arch value should be normalized before being passed to this function. +func isKnownArch(arch string) bool { + switch arch { + case "386", "amd64", "amd64p32", "arm", "armbe", "arm64", "arm64be", "ppc64", "ppc64le", "mips", "mipsle", "mips64", "mips64le", "mips64p32", "mips64p32le", "ppc", "riscv", "riscv64", "s390", "s390x", "sparc", "sparc64", "wasm": + return true + } + return false +} + +func normalizeOS(os string) string { + if os == "" { + return runtime.GOOS + } + os = strings.ToLower(os) + + switch os { + case "macos": + os = "darwin" + } + return os +} + +// normalizeArch normalizes the architecture. +func normalizeArch(arch, variant string) (string, string) { + arch, variant = strings.ToLower(arch), strings.ToLower(variant) + switch arch { + case "i386": + arch = "386" + variant = "" + case "x86_64", "x86-64": + arch = "amd64" + variant = "" + case "aarch64", "arm64": + arch = "arm64" + switch variant { + case "8", "v8": + variant = "" + } + case "armhf": + arch = "arm" + variant = "v7" + case "armel": + arch = "arm" + variant = "v6" + case "arm": + switch variant { + case "", "7": + variant = "v7" + case "5", "6", "8": + variant = "v" + variant + } + } + + return arch, variant +} diff --git a/vendor/github.com/containerd/containerd/platforms/defaults.go b/vendor/github.com/containerd/containerd/platforms/defaults.go new file mode 100644 index 00000000000..cb77fbc9f7b --- /dev/null +++ b/vendor/github.com/containerd/containerd/platforms/defaults.go @@ -0,0 +1,43 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "runtime" + + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +// DefaultString returns the default string specifier for the platform. +func DefaultString() string { + return Format(DefaultSpec()) +} + +// DefaultSpec returns the current platform's default platform specification. +func DefaultSpec() specs.Platform { + return specs.Platform{ + OS: runtime.GOOS, + Architecture: runtime.GOARCH, + // The Variant field will be empty if arch != ARM. + Variant: cpuVariant(), + } +} + +// DefaultStrict returns strict form of Default. +func DefaultStrict() MatchComparer { + return OnlyStrict(DefaultSpec()) +} diff --git a/vendor/github.com/containerd/containerd/platforms/defaults_unix.go b/vendor/github.com/containerd/containerd/platforms/defaults_unix.go new file mode 100644 index 00000000000..e8a7d5ffa0d --- /dev/null +++ b/vendor/github.com/containerd/containerd/platforms/defaults_unix.go @@ -0,0 +1,24 @@ +// +build !windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +// Default returns the default matcher for the platform. +func Default() MatchComparer { + return Only(DefaultSpec()) +} diff --git a/vendor/github.com/containerd/containerd/platforms/defaults_windows.go b/vendor/github.com/containerd/containerd/platforms/defaults_windows.go new file mode 100644 index 00000000000..0c380e3b7c1 --- /dev/null +++ b/vendor/github.com/containerd/containerd/platforms/defaults_windows.go @@ -0,0 +1,81 @@ +// +build windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "fmt" + "runtime" + "strconv" + "strings" + + imagespec "github.com/opencontainers/image-spec/specs-go/v1" + specs "github.com/opencontainers/image-spec/specs-go/v1" + "golang.org/x/sys/windows" +) + +type matchComparer struct { + defaults Matcher + osVersionPrefix string +} + +// Match matches platform with the same windows major, minor +// and build version. +func (m matchComparer) Match(p imagespec.Platform) bool { + if m.defaults.Match(p) { + // TODO(windows): Figure out whether OSVersion is deprecated. + return strings.HasPrefix(p.OSVersion, m.osVersionPrefix) + } + return false +} + +// Less sorts matched platforms in front of other platforms. +// For matched platforms, it puts platforms with larger revision +// number in front. +func (m matchComparer) Less(p1, p2 imagespec.Platform) bool { + m1, m2 := m.Match(p1), m.Match(p2) + if m1 && m2 { + r1, r2 := revision(p1.OSVersion), revision(p2.OSVersion) + return r1 > r2 + } + return m1 && !m2 +} + +func revision(v string) int { + parts := strings.Split(v, ".") + if len(parts) < 4 { + return 0 + } + r, err := strconv.Atoi(parts[3]) + if err != nil { + return 0 + } + return r +} + +// Default returns the current platform's default platform specification. +func Default() MatchComparer { + major, minor, build := windows.RtlGetNtVersionNumbers() + return matchComparer{ + defaults: Ordered(DefaultSpec(), specs.Platform{ + OS: "linux", + Architecture: runtime.GOARCH, + }), + osVersionPrefix: fmt.Sprintf("%d.%d.%d", major, minor, build), + } +} diff --git a/vendor/github.com/containerd/containerd/platforms/platforms.go b/vendor/github.com/containerd/containerd/platforms/platforms.go new file mode 100644 index 00000000000..088bdea0508 --- /dev/null +++ b/vendor/github.com/containerd/containerd/platforms/platforms.go @@ -0,0 +1,278 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package platforms provides a toolkit for normalizing, matching and +// specifying container platforms. +// +// Centered around OCI platform specifications, we define a string-based +// specifier syntax that can be used for user input. With a specifier, users +// only need to specify the parts of the platform that are relevant to their +// context, providing an operating system or architecture or both. +// +// How do I use this package? +// +// The vast majority of use cases should simply use the match function with +// user input. The first step is to parse a specifier into a matcher: +// +// m, err := Parse("linux") +// if err != nil { ... } +// +// Once you have a matcher, use it to match against the platform declared by a +// component, typically from an image or runtime. Since extracting an images +// platform is a little more involved, we'll use an example against the +// platform default: +// +// if ok := m.Match(Default()); !ok { /* doesn't match */ } +// +// This can be composed in loops for resolving runtimes or used as a filter for +// fetch and select images. +// +// More details of the specifier syntax and platform spec follow. +// +// Declaring Platform Support +// +// Components that have strict platform requirements should use the OCI +// platform specification to declare their support. Typically, this will be +// images and runtimes that should make these declaring which platform they +// support specifically. This looks roughly as follows: +// +// type Platform struct { +// Architecture string +// OS string +// Variant string +// } +// +// Most images and runtimes should at least set Architecture and OS, according +// to their GOARCH and GOOS values, respectively (follow the OCI image +// specification when in doubt). ARM should set variant under certain +// discussions, which are outlined below. +// +// Platform Specifiers +// +// While the OCI platform specifications provide a tool for components to +// specify structured information, user input typically doesn't need the full +// context and much can be inferred. To solve this problem, we introduced +// "specifiers". A specifier has the format +// `||/[/]`. The user can provide either the +// operating system or the architecture or both. +// +// An example of a common specifier is `linux/amd64`. If the host has a default +// of runtime that matches this, the user can simply provide the component that +// matters. For example, if a image provides amd64 and arm64 support, the +// operating system, `linux` can be inferred, so they only have to provide +// `arm64` or `amd64`. Similar behavior is implemented for operating systems, +// where the architecture may be known but a runtime may support images from +// different operating systems. +// +// Normalization +// +// Because not all users are familiar with the way the Go runtime represents +// platforms, several normalizations have been provided to make this package +// easier to user. +// +// The following are performed for architectures: +// +// Value Normalized +// aarch64 arm64 +// armhf arm +// armel arm/v6 +// i386 386 +// x86_64 amd64 +// x86-64 amd64 +// +// We also normalize the operating system `macos` to `darwin`. +// +// ARM Support +// +// To qualify ARM architecture, the Variant field is used to qualify the arm +// version. The most common arm version, v7, is represented without the variant +// unless it is explicitly provided. This is treated as equivalent to armhf. A +// previous architecture, armel, will be normalized to arm/v6. +// +// While these normalizations are provided, their support on arm platforms has +// not yet been fully implemented and tested. +package platforms + +import ( + "regexp" + "runtime" + "strconv" + "strings" + + "github.com/containerd/containerd/errdefs" + specs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +var ( + specifierRe = regexp.MustCompile(`^[A-Za-z0-9_-]+$`) +) + +// Matcher matches platforms specifications, provided by an image or runtime. +type Matcher interface { + Match(platform specs.Platform) bool +} + +// NewMatcher returns a simple matcher based on the provided platform +// specification. The returned matcher only looks for equality based on os, +// architecture and variant. +// +// One may implement their own matcher if this doesn't provide the required +// functionality. +// +// Applications should opt to use `Match` over directly parsing specifiers. +func NewMatcher(platform specs.Platform) Matcher { + return &matcher{ + Platform: Normalize(platform), + } +} + +type matcher struct { + specs.Platform +} + +func (m *matcher) Match(platform specs.Platform) bool { + normalized := Normalize(platform) + return m.OS == normalized.OS && + m.Architecture == normalized.Architecture && + m.Variant == normalized.Variant +} + +func (m *matcher) String() string { + return Format(m.Platform) +} + +// Parse parses the platform specifier syntax into a platform declaration. +// +// Platform specifiers are in the format `||/[/]`. +// The minimum required information for a platform specifier is the operating +// system or architecture. If there is only a single string (no slashes), the +// value will be matched against the known set of operating systems, then fall +// back to the known set of architectures. The missing component will be +// inferred based on the local environment. +func Parse(specifier string) (specs.Platform, error) { + if strings.Contains(specifier, "*") { + // TODO(stevvooe): need to work out exact wildcard handling + return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: wildcards not yet supported", specifier) + } + + parts := strings.Split(specifier, "/") + + for _, part := range parts { + if !specifierRe.MatchString(part) { + return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q is an invalid component of %q: platform specifier component must match %q", part, specifier, specifierRe.String()) + } + } + + var p specs.Platform + switch len(parts) { + case 1: + // in this case, we will test that the value might be an OS, then look + // it up. If it is not known, we'll treat it as an architecture. Since + // we have very little information about the platform here, we are + // going to be a little more strict if we don't know about the argument + // value. + p.OS = normalizeOS(parts[0]) + if isKnownOS(p.OS) { + // picks a default architecture + p.Architecture = runtime.GOARCH + if p.Architecture == "arm" && cpuVariant() != "v7" { + p.Variant = cpuVariant() + } + + return p, nil + } + + p.Architecture, p.Variant = normalizeArch(parts[0], "") + if p.Architecture == "arm" && p.Variant == "v7" { + p.Variant = "" + } + if isKnownArch(p.Architecture) { + p.OS = runtime.GOOS + return p, nil + } + + return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: unknown operating system or architecture", specifier) + case 2: + // In this case, we treat as a regular os/arch pair. We don't care + // about whether or not we know of the platform. + p.OS = normalizeOS(parts[0]) + p.Architecture, p.Variant = normalizeArch(parts[1], "") + if p.Architecture == "arm" && p.Variant == "v7" { + p.Variant = "" + } + + return p, nil + case 3: + // we have a fully specified variant, this is rare + p.OS = normalizeOS(parts[0]) + p.Architecture, p.Variant = normalizeArch(parts[1], parts[2]) + if p.Architecture == "arm64" && p.Variant == "" { + p.Variant = "v8" + } + + return p, nil + } + + return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: cannot parse platform specifier", specifier) +} + +// MustParse is like Parses but panics if the specifier cannot be parsed. +// Simplifies initialization of global variables. +func MustParse(specifier string) specs.Platform { + p, err := Parse(specifier) + if err != nil { + panic("platform: Parse(" + strconv.Quote(specifier) + "): " + err.Error()) + } + return p +} + +// Format returns a string specifier from the provided platform specification. +func Format(platform specs.Platform) string { + if platform.OS == "" { + return "unknown" + } + + return joinNotEmpty(platform.OS, platform.Architecture, platform.Variant) +} + +func joinNotEmpty(s ...string) string { + var ss []string + for _, s := range s { + if s == "" { + continue + } + + ss = append(ss, s) + } + + return strings.Join(ss, "/") +} + +// Normalize validates and translate the platform to the canonical value. +// +// For example, if "Aarch64" is encountered, we change it to "arm64" or if +// "x86_64" is encountered, it becomes "amd64". +func Normalize(platform specs.Platform) specs.Platform { + platform.OS = normalizeOS(platform.OS) + platform.Architecture, platform.Variant = normalizeArch(platform.Architecture, platform.Variant) + + // these fields are deprecated, remove them + platform.OSFeatures = nil + platform.OSVersion = "" + + return platform +} diff --git a/vendor/github.com/containerd/containerd/sys/epoll.go b/vendor/github.com/containerd/containerd/sys/epoll.go new file mode 100644 index 00000000000..28d6c2cabc6 --- /dev/null +++ b/vendor/github.com/containerd/containerd/sys/epoll.go @@ -0,0 +1,33 @@ +// +build linux + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package sys + +import "golang.org/x/sys/unix" + +// EpollCreate1 is an alias for unix.EpollCreate1 +// Deprecated: use golang.org/x/sys/unix.EpollCreate1 +var EpollCreate1 = unix.EpollCreate1 + +// EpollCtl is an alias for unix.EpollCtl +// Deprecated: use golang.org/x/sys/unix.EpollCtl +var EpollCtl = unix.EpollCtl + +// EpollWait is an alias for unix.EpollWait +// Deprecated: use golang.org/x/sys/unix.EpollWait +var EpollWait = unix.EpollWait diff --git a/vendor/github.com/containerd/containerd/sys/fds.go b/vendor/github.com/containerd/containerd/sys/fds.go new file mode 100644 index 00000000000..db3cf702f49 --- /dev/null +++ b/vendor/github.com/containerd/containerd/sys/fds.go @@ -0,0 +1,34 @@ +// +build !windows,!darwin + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package sys + +import ( + "io/ioutil" + "path/filepath" + "strconv" +) + +// GetOpenFds returns the number of open fds for the process provided by pid +func GetOpenFds(pid int) (int, error) { + dirs, err := ioutil.ReadDir(filepath.Join("/proc", strconv.Itoa(pid), "fd")) + if err != nil { + return -1, err + } + return len(dirs), nil +} diff --git a/vendor/github.com/containerd/containerd/sys/filesys_unix.go b/vendor/github.com/containerd/containerd/sys/filesys_unix.go new file mode 100644 index 00000000000..d8329af9fbc --- /dev/null +++ b/vendor/github.com/containerd/containerd/sys/filesys_unix.go @@ -0,0 +1,31 @@ +// +build !windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package sys + +import "os" + +// ForceRemoveAll on unix is just a wrapper for os.RemoveAll +func ForceRemoveAll(path string) error { + return os.RemoveAll(path) +} + +// MkdirAllWithACL is a wrapper for os.MkdirAll on Unix systems. +func MkdirAllWithACL(path string, perm os.FileMode) error { + return os.MkdirAll(path, perm) +} diff --git a/vendor/github.com/containerd/containerd/sys/filesys_windows.go b/vendor/github.com/containerd/containerd/sys/filesys_windows.go new file mode 100644 index 00000000000..a9198ef399a --- /dev/null +++ b/vendor/github.com/containerd/containerd/sys/filesys_windows.go @@ -0,0 +1,330 @@ +// +build windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package sys + +import ( + "os" + "path/filepath" + "regexp" + "sort" + "strconv" + "strings" + "syscall" + "unsafe" + + "github.com/Microsoft/hcsshim" + "github.com/pkg/errors" + "golang.org/x/sys/windows" +) + +const ( + // SddlAdministratorsLocalSystem is local administrators plus NT AUTHORITY\System + SddlAdministratorsLocalSystem = "D:P(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)" +) + +// MkdirAllWithACL is a wrapper for MkdirAll that creates a directory +// ACL'd for Builtin Administrators and Local System. +func MkdirAllWithACL(path string, perm os.FileMode) error { + return mkdirall(path, true) +} + +// MkdirAll implementation that is volume path aware for Windows. It can be used +// as a drop-in replacement for os.MkdirAll() +func MkdirAll(path string, _ os.FileMode) error { + return mkdirall(path, false) +} + +// mkdirall is a custom version of os.MkdirAll modified for use on Windows +// so that it is both volume path aware, and can create a directory with +// a DACL. +func mkdirall(path string, adminAndLocalSystem bool) error { + if re := regexp.MustCompile(`^\\\\\?\\Volume{[a-z0-9-]+}$`); re.MatchString(path) { + return nil + } + + // The rest of this method is largely copied from os.MkdirAll and should be kept + // as-is to ensure compatibility. + + // Fast path: if we can tell whether path is a directory or file, stop with success or error. + dir, err := os.Stat(path) + if err == nil { + if dir.IsDir() { + return nil + } + return &os.PathError{ + Op: "mkdir", + Path: path, + Err: syscall.ENOTDIR, + } + } + + // Slow path: make sure parent exists and then call Mkdir for path. + i := len(path) + for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator. + i-- + } + + j := i + for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element. + j-- + } + + if j > 1 { + // Create parent + err = mkdirall(path[0:j-1], adminAndLocalSystem) + if err != nil { + return err + } + } + + // Parent now exists; invoke os.Mkdir or mkdirWithACL and use its result. + if adminAndLocalSystem { + err = mkdirWithACL(path) + } else { + err = os.Mkdir(path, 0) + } + + if err != nil { + // Handle arguments like "foo/." by + // double-checking that directory doesn't exist. + dir, err1 := os.Lstat(path) + if err1 == nil && dir.IsDir() { + return nil + } + return err + } + return nil +} + +// mkdirWithACL creates a new directory. If there is an error, it will be of +// type *PathError. . +// +// This is a modified and combined version of os.Mkdir and windows.Mkdir +// in golang to cater for creating a directory am ACL permitting full +// access, with inheritance, to any subfolder/file for Built-in Administrators +// and Local System. +func mkdirWithACL(name string) error { + sa := windows.SecurityAttributes{Length: 0} + sd, err := windows.SecurityDescriptorFromString(SddlAdministratorsLocalSystem) + if err != nil { + return &os.PathError{Op: "mkdir", Path: name, Err: err} + } + sa.Length = uint32(unsafe.Sizeof(sa)) + sa.InheritHandle = 1 + sa.SecurityDescriptor = sd + + namep, err := windows.UTF16PtrFromString(name) + if err != nil { + return &os.PathError{Op: "mkdir", Path: name, Err: err} + } + + e := windows.CreateDirectory(namep, &sa) + if e != nil { + return &os.PathError{Op: "mkdir", Path: name, Err: e} + } + return nil +} + +// IsAbs is a platform-specific wrapper for filepath.IsAbs. On Windows, +// golang filepath.IsAbs does not consider a path \windows\system32 as absolute +// as it doesn't start with a drive-letter/colon combination. However, in +// docker we need to verify things such as WORKDIR /windows/system32 in +// a Dockerfile (which gets translated to \windows\system32 when being processed +// by the daemon. This SHOULD be treated as absolute from a docker processing +// perspective. +func IsAbs(path string) bool { + if !filepath.IsAbs(path) { + if !strings.HasPrefix(path, string(os.PathSeparator)) { + return false + } + } + return true +} + +// The origin of the functions below here are the golang OS and windows packages, +// slightly modified to only cope with files, not directories due to the +// specific use case. +// +// The alteration is to allow a file on Windows to be opened with +// FILE_FLAG_SEQUENTIAL_SCAN (particular for docker load), to avoid eating +// the standby list, particularly when accessing large files such as layer.tar. + +// CreateSequential creates the named file with mode 0666 (before umask), truncating +// it if it already exists. If successful, methods on the returned +// File can be used for I/O; the associated file descriptor has mode +// O_RDWR. +// If there is an error, it will be of type *PathError. +func CreateSequential(name string) (*os.File, error) { + return OpenFileSequential(name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0) +} + +// OpenSequential opens the named file for reading. If successful, methods on +// the returned file can be used for reading; the associated file +// descriptor has mode O_RDONLY. +// If there is an error, it will be of type *PathError. +func OpenSequential(name string) (*os.File, error) { + return OpenFileSequential(name, os.O_RDONLY, 0) +} + +// OpenFileSequential is the generalized open call; most users will use Open +// or Create instead. +// If there is an error, it will be of type *PathError. +func OpenFileSequential(name string, flag int, _ os.FileMode) (*os.File, error) { + if name == "" { + return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOENT} + } + r, errf := windowsOpenFileSequential(name, flag, 0) + if errf == nil { + return r, nil + } + return nil, &os.PathError{Op: "open", Path: name, Err: errf} +} + +func windowsOpenFileSequential(name string, flag int, _ os.FileMode) (file *os.File, err error) { + r, e := windowsOpenSequential(name, flag|windows.O_CLOEXEC, 0) + if e != nil { + return nil, e + } + return os.NewFile(uintptr(r), name), nil +} + +func makeInheritSa() *windows.SecurityAttributes { + var sa windows.SecurityAttributes + sa.Length = uint32(unsafe.Sizeof(sa)) + sa.InheritHandle = 1 + return &sa +} + +func windowsOpenSequential(path string, mode int, _ uint32) (fd windows.Handle, err error) { + if len(path) == 0 { + return windows.InvalidHandle, windows.ERROR_FILE_NOT_FOUND + } + pathp, err := windows.UTF16PtrFromString(path) + if err != nil { + return windows.InvalidHandle, err + } + var access uint32 + switch mode & (windows.O_RDONLY | windows.O_WRONLY | windows.O_RDWR) { + case windows.O_RDONLY: + access = windows.GENERIC_READ + case windows.O_WRONLY: + access = windows.GENERIC_WRITE + case windows.O_RDWR: + access = windows.GENERIC_READ | windows.GENERIC_WRITE + } + if mode&windows.O_CREAT != 0 { + access |= windows.GENERIC_WRITE + } + if mode&windows.O_APPEND != 0 { + access &^= windows.GENERIC_WRITE + access |= windows.FILE_APPEND_DATA + } + sharemode := uint32(windows.FILE_SHARE_READ | windows.FILE_SHARE_WRITE) + var sa *windows.SecurityAttributes + if mode&windows.O_CLOEXEC == 0 { + sa = makeInheritSa() + } + var createmode uint32 + switch { + case mode&(windows.O_CREAT|windows.O_EXCL) == (windows.O_CREAT | windows.O_EXCL): + createmode = windows.CREATE_NEW + case mode&(windows.O_CREAT|windows.O_TRUNC) == (windows.O_CREAT | windows.O_TRUNC): + createmode = windows.CREATE_ALWAYS + case mode&windows.O_CREAT == windows.O_CREAT: + createmode = windows.OPEN_ALWAYS + case mode&windows.O_TRUNC == windows.O_TRUNC: + createmode = windows.TRUNCATE_EXISTING + default: + createmode = windows.OPEN_EXISTING + } + // Use FILE_FLAG_SEQUENTIAL_SCAN rather than FILE_ATTRIBUTE_NORMAL as implemented in golang. + // https://msdn.microsoft.com/en-us/library/windows/desktop/aa363858(v=vs.85).aspx + const fileFlagSequentialScan = 0x08000000 // FILE_FLAG_SEQUENTIAL_SCAN + h, e := windows.CreateFile(pathp, access, sharemode, sa, createmode, fileFlagSequentialScan, 0) + return h, e +} + +// ForceRemoveAll is the same as os.RemoveAll, but is aware of io.containerd.snapshotter.v1.windows +// and uses hcsshim to unmount and delete container layers contained therein, in the correct order, +// when passed a containerd root data directory (i.e. the `--root` directory for containerd). +func ForceRemoveAll(path string) error { + // snapshots/windows/windows.go init() + const snapshotPlugin = "io.containerd.snapshotter.v1" + "." + "windows" + // snapshots/windows/windows.go NewSnapshotter() + snapshotDir := filepath.Join(path, snapshotPlugin, "snapshots") + if stat, err := os.Stat(snapshotDir); err == nil && stat.IsDir() { + if err := cleanupWCOWLayers(snapshotDir); err != nil { + return errors.Wrapf(err, "failed to cleanup WCOW layers in %s", snapshotDir) + } + } + + return os.RemoveAll(path) +} + +func cleanupWCOWLayers(root string) error { + // See snapshots/windows/windows.go getSnapshotDir() + var layerNums []int + if err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error { + if path != root && info.IsDir() { + if layerNum, err := strconv.Atoi(filepath.Base(path)); err == nil { + layerNums = append(layerNums, layerNum) + } else { + return err + } + return filepath.SkipDir + } + + return nil + }); err != nil { + return err + } + + sort.Sort(sort.Reverse(sort.IntSlice(layerNums))) + + for _, layerNum := range layerNums { + if err := cleanupWCOWLayer(filepath.Join(root, strconv.Itoa(layerNum))); err != nil { + return err + } + } + + return nil +} + +func cleanupWCOWLayer(layerPath string) error { + info := hcsshim.DriverInfo{ + HomeDir: filepath.Dir(layerPath), + } + + // ERROR_DEV_NOT_EXIST is returned if the layer is not currently prepared. + if err := hcsshim.UnprepareLayer(info, filepath.Base(layerPath)); err != nil { + if hcserror, ok := err.(*hcsshim.HcsError); !ok || hcserror.Err != windows.ERROR_DEV_NOT_EXIST { + return errors.Wrapf(err, "failed to unprepare %s", layerPath) + } + } + + if err := hcsshim.DeactivateLayer(info, filepath.Base(layerPath)); err != nil { + return errors.Wrapf(err, "failed to deactivate %s", layerPath) + } + + if err := hcsshim.DestroyLayer(info, filepath.Base(layerPath)); err != nil { + return errors.Wrapf(err, "failed to destroy %s", layerPath) + } + + return nil +} diff --git a/vendor/github.com/containerd/containerd/sys/mount_linux.go b/vendor/github.com/containerd/containerd/sys/mount_linux.go new file mode 100644 index 00000000000..a21045529ae --- /dev/null +++ b/vendor/github.com/containerd/containerd/sys/mount_linux.go @@ -0,0 +1,145 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package sys + +import ( + "runtime" + "syscall" + "unsafe" + + "github.com/containerd/containerd/log" + "github.com/pkg/errors" + "golang.org/x/sys/unix" +) + +// FMountat performs mount from the provided directory. +func FMountat(dirfd uintptr, source, target, fstype string, flags uintptr, data string) error { + var ( + sourceP, targetP, fstypeP, dataP *byte + pid uintptr + err error + errno, status syscall.Errno + ) + + sourceP, err = syscall.BytePtrFromString(source) + if err != nil { + return err + } + + targetP, err = syscall.BytePtrFromString(target) + if err != nil { + return err + } + + fstypeP, err = syscall.BytePtrFromString(fstype) + if err != nil { + return err + } + + if data != "" { + dataP, err = syscall.BytePtrFromString(data) + if err != nil { + return err + } + } + + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + var pipefds [2]int + if err := syscall.Pipe2(pipefds[:], syscall.O_CLOEXEC); err != nil { + return errors.Wrap(err, "failed to open pipe") + } + + defer func() { + // close both ends of the pipe in a deferred function, since open file + // descriptor table is shared with child + syscall.Close(pipefds[0]) + syscall.Close(pipefds[1]) + }() + + pid, errno = forkAndMountat(dirfd, + uintptr(unsafe.Pointer(sourceP)), + uintptr(unsafe.Pointer(targetP)), + uintptr(unsafe.Pointer(fstypeP)), + flags, + uintptr(unsafe.Pointer(dataP)), + pipefds[1], + ) + + if errno != 0 { + return errors.Wrap(errno, "failed to fork thread") + } + + defer func() { + _, err := unix.Wait4(int(pid), nil, 0, nil) + for err == syscall.EINTR { + _, err = unix.Wait4(int(pid), nil, 0, nil) + } + + if err != nil { + log.L.WithError(err).Debugf("failed to find pid=%d process", pid) + } + }() + + _, _, errno = syscall.RawSyscall(syscall.SYS_READ, + uintptr(pipefds[0]), + uintptr(unsafe.Pointer(&status)), + unsafe.Sizeof(status)) + if errno != 0 { + return errors.Wrap(errno, "failed to read pipe") + } + + if status != 0 { + return errors.Wrap(status, "failed to mount") + } + + return nil +} + +// forkAndMountat will fork thread, change working dir and mount. +// +// precondition: the runtime OS thread must be locked. +func forkAndMountat(dirfd uintptr, source, target, fstype, flags, data uintptr, pipefd int) (pid uintptr, errno syscall.Errno) { + + // block signal during clone + beforeFork() + + // the cloned thread shares the open file descriptor, but the thread + // never be reused by runtime. + pid, _, errno = syscall.RawSyscall6(syscall.SYS_CLONE, uintptr(syscall.SIGCHLD)|syscall.CLONE_FILES, 0, 0, 0, 0, 0) + if errno != 0 || pid != 0 { + // restore all signals + afterFork() + return + } + + // restore all signals + afterForkInChild() + + // change working dir + _, _, errno = syscall.RawSyscall(syscall.SYS_FCHDIR, dirfd, 0, 0) + if errno != 0 { + goto childerr + } + _, _, errno = syscall.RawSyscall6(syscall.SYS_MOUNT, source, target, fstype, flags, data, 0) + +childerr: + _, _, errno = syscall.RawSyscall(syscall.SYS_WRITE, uintptr(pipefd), uintptr(unsafe.Pointer(&errno)), unsafe.Sizeof(errno)) + syscall.RawSyscall(syscall.SYS_EXIT, uintptr(errno), 0, 0) + panic("unreachable") +} diff --git a/vendor/github.com/containerd/containerd/sys/oom_linux.go b/vendor/github.com/containerd/containerd/sys/oom_linux.go new file mode 100644 index 00000000000..82a347c6f72 --- /dev/null +++ b/vendor/github.com/containerd/containerd/sys/oom_linux.go @@ -0,0 +1,83 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package sys + +import ( + "fmt" + "io/ioutil" + "os" + "strconv" + "strings" + + "github.com/containerd/containerd/pkg/userns" + "golang.org/x/sys/unix" +) + +const ( + // OOMScoreAdjMin is from OOM_SCORE_ADJ_MIN https://github.com/torvalds/linux/blob/v5.10/include/uapi/linux/oom.h#L9 + OOMScoreAdjMin = -1000 + // OOMScoreAdjMax is from OOM_SCORE_ADJ_MAX https://github.com/torvalds/linux/blob/v5.10/include/uapi/linux/oom.h#L10 + OOMScoreAdjMax = 1000 +) + +// AdjustOOMScore sets the oom score for the provided pid. If the provided score +// is out of range (-1000 - 1000), it is clipped to the min/max value. +func AdjustOOMScore(pid, score int) error { + if score > OOMScoreAdjMax { + score = OOMScoreAdjMax + } else if score < OOMScoreAdjMin { + score = OOMScoreAdjMin + } + return SetOOMScore(pid, score) +} + +// SetOOMScore sets the oom score for the provided pid +func SetOOMScore(pid, score int) error { + if score > OOMScoreAdjMax || score < OOMScoreAdjMin { + return fmt.Errorf("value out of range (%d): OOM score must be between %d and %d", score, OOMScoreAdjMin, OOMScoreAdjMax) + } + path := fmt.Sprintf("/proc/%d/oom_score_adj", pid) + f, err := os.OpenFile(path, os.O_WRONLY, 0) + if err != nil { + return err + } + defer f.Close() + if _, err = f.WriteString(strconv.Itoa(score)); err != nil { + if os.IsPermission(err) && (!runningPrivileged() || userns.RunningInUserNS()) { + return nil + } + return err + } + return nil +} + +// GetOOMScoreAdj gets the oom score for a process. It returns 0 (zero) if either +// no oom score is set, or a sore is set to 0. +func GetOOMScoreAdj(pid int) (int, error) { + path := fmt.Sprintf("/proc/%d/oom_score_adj", pid) + data, err := ioutil.ReadFile(path) + if err != nil { + return 0, err + } + return strconv.Atoi(strings.TrimSpace(string(data))) +} + +// runningPrivileged returns true if the effective user ID of the +// calling process is 0 +func runningPrivileged() bool { + return unix.Geteuid() == 0 +} diff --git a/vendor/github.com/containerd/containerd/sys/oom_unsupported.go b/vendor/github.com/containerd/containerd/sys/oom_unsupported.go new file mode 100644 index 00000000000..f5d7e9786b8 --- /dev/null +++ b/vendor/github.com/containerd/containerd/sys/oom_unsupported.go @@ -0,0 +1,48 @@ +// +build !linux + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package sys + +const ( + // OOMScoreMaxKillable is not implemented on non Linux + OOMScoreMaxKillable = 0 + // OOMScoreAdjMax is not implemented on non Linux + OOMScoreAdjMax = 0 +) + +// AdjustOOMScore sets the oom score for the provided pid. If the provided score +// is out of range (-1000 - 1000), it is clipped to the min/max value. +// +// Not implemented on Windows +func AdjustOOMScore(pid, score int) error { + return nil +} + +// SetOOMScore sets the oom score for the process +// +// Not implemented on Windows +func SetOOMScore(pid, score int) error { + return nil +} + +// GetOOMScoreAdj gets the oom score for a process +// +// Not implemented on Windows +func GetOOMScoreAdj(pid int) (int, error) { + return 0, nil +} diff --git a/vendor/github.com/containerd/containerd/sys/socket_unix.go b/vendor/github.com/containerd/containerd/sys/socket_unix.go new file mode 100644 index 00000000000..b67cc1fa3a7 --- /dev/null +++ b/vendor/github.com/containerd/containerd/sys/socket_unix.go @@ -0,0 +1,80 @@ +// +build !windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package sys + +import ( + "net" + "os" + "path/filepath" + + "github.com/pkg/errors" + "golang.org/x/sys/unix" +) + +// CreateUnixSocket creates a unix socket and returns the listener +func CreateUnixSocket(path string) (net.Listener, error) { + // BSDs have a 104 limit + if len(path) > 104 { + return nil, errors.Errorf("%q: unix socket path too long (> 104)", path) + } + if err := os.MkdirAll(filepath.Dir(path), 0660); err != nil { + return nil, err + } + if err := unix.Unlink(path); err != nil && !os.IsNotExist(err) { + return nil, err + } + return net.Listen("unix", path) +} + +// GetLocalListener returns a listener out of a unix socket. +func GetLocalListener(path string, uid, gid int) (net.Listener, error) { + // Ensure parent directory is created + if err := mkdirAs(filepath.Dir(path), uid, gid); err != nil { + return nil, err + } + + l, err := CreateUnixSocket(path) + if err != nil { + return l, err + } + + if err := os.Chmod(path, 0660); err != nil { + l.Close() + return nil, err + } + + if err := os.Chown(path, uid, gid); err != nil { + l.Close() + return nil, err + } + + return l, nil +} + +func mkdirAs(path string, uid, gid int) error { + if _, err := os.Stat(path); !os.IsNotExist(err) { + return err + } + + if err := os.MkdirAll(path, 0770); err != nil { + return err + } + + return os.Chown(path, uid, gid) +} diff --git a/vendor/github.com/containerd/containerd/sys/socket_windows.go b/vendor/github.com/containerd/containerd/sys/socket_windows.go new file mode 100644 index 00000000000..3ee7679b49b --- /dev/null +++ b/vendor/github.com/containerd/containerd/sys/socket_windows.go @@ -0,0 +1,32 @@ +// +build windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package sys + +import ( + "net" + + "github.com/Microsoft/go-winio" +) + +// GetLocalListener returns a Listernet out of a named pipe. +// `path` must be of the form of `\\.\pipe\` +// (see https://msdn.microsoft.com/en-us/library/windows/desktop/aa365150) +func GetLocalListener(path string, uid, gid int) (net.Listener, error) { + return winio.ListenPipe(path, nil) +} diff --git a/vendor/github.com/containerd/containerd/sys/stat_bsd.go b/vendor/github.com/containerd/containerd/sys/stat_bsd.go new file mode 100644 index 00000000000..4f03cd6cb0f --- /dev/null +++ b/vendor/github.com/containerd/containerd/sys/stat_bsd.go @@ -0,0 +1,44 @@ +// +build darwin freebsd netbsd + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package sys + +import ( + "syscall" + "time" +) + +// StatAtime returns the access time from a stat struct +func StatAtime(st *syscall.Stat_t) syscall.Timespec { + return st.Atimespec +} + +// StatCtime returns the created time from a stat struct +func StatCtime(st *syscall.Stat_t) syscall.Timespec { + return st.Ctimespec +} + +// StatMtime returns the modified time from a stat struct +func StatMtime(st *syscall.Stat_t) syscall.Timespec { + return st.Mtimespec +} + +// StatATimeAsTime returns the access time as a time.Time +func StatATimeAsTime(st *syscall.Stat_t) time.Time { + return time.Unix(int64(st.Atimespec.Sec), int64(st.Atimespec.Nsec)) // nolint: unconvert +} diff --git a/vendor/github.com/containerd/containerd/sys/stat_openbsd.go b/vendor/github.com/containerd/containerd/sys/stat_openbsd.go new file mode 100644 index 00000000000..ec3b9df69aa --- /dev/null +++ b/vendor/github.com/containerd/containerd/sys/stat_openbsd.go @@ -0,0 +1,45 @@ +// +build openbsd + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package sys + +import ( + "syscall" + "time" +) + +// StatAtime returns the Atim +func StatAtime(st *syscall.Stat_t) syscall.Timespec { + return st.Atim +} + +// StatCtime returns the Ctim +func StatCtime(st *syscall.Stat_t) syscall.Timespec { + return st.Ctim +} + +// StatMtime returns the Mtim +func StatMtime(st *syscall.Stat_t) syscall.Timespec { + return st.Mtim +} + +// StatATimeAsTime returns st.Atim as a time.Time +func StatATimeAsTime(st *syscall.Stat_t) time.Time { + // The int64 conversions ensure the line compiles for 32-bit systems as well. + return time.Unix(int64(st.Atim.Sec), int64(st.Atim.Nsec)) // nolint: unconvert +} diff --git a/vendor/github.com/containerd/containerd/sys/stat_unix.go b/vendor/github.com/containerd/containerd/sys/stat_unix.go new file mode 100644 index 00000000000..21a666dff86 --- /dev/null +++ b/vendor/github.com/containerd/containerd/sys/stat_unix.go @@ -0,0 +1,44 @@ +// +build linux solaris + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package sys + +import ( + "syscall" + "time" +) + +// StatAtime returns the Atim +func StatAtime(st *syscall.Stat_t) syscall.Timespec { + return st.Atim +} + +// StatCtime returns the Ctim +func StatCtime(st *syscall.Stat_t) syscall.Timespec { + return st.Ctim +} + +// StatMtime returns the Mtim +func StatMtime(st *syscall.Stat_t) syscall.Timespec { + return st.Mtim +} + +// StatATimeAsTime returns st.Atim as a time.Time +func StatATimeAsTime(st *syscall.Stat_t) time.Time { + return time.Unix(int64(st.Atim.Sec), int64(st.Atim.Nsec)) // nolint: unconvert +} diff --git a/vendor/github.com/containerd/containerd/sys/subprocess_unsafe_linux.go b/vendor/github.com/containerd/containerd/sys/subprocess_unsafe_linux.go new file mode 100644 index 00000000000..6e40a9c7d7f --- /dev/null +++ b/vendor/github.com/containerd/containerd/sys/subprocess_unsafe_linux.go @@ -0,0 +1,30 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package sys + +import ( + _ "unsafe" // required for go:linkname. +) + +//go:linkname beforeFork syscall.runtime_BeforeFork +func beforeFork() + +//go:linkname afterFork syscall.runtime_AfterFork +func afterFork() + +//go:linkname afterForkInChild syscall.runtime_AfterForkInChild +func afterForkInChild() diff --git a/vendor/github.com/containerd/containerd/sys/subprocess_unsafe_linux.s b/vendor/github.com/containerd/containerd/sys/subprocess_unsafe_linux.s new file mode 100644 index 00000000000..c073fa4ad7c --- /dev/null +++ b/vendor/github.com/containerd/containerd/sys/subprocess_unsafe_linux.s @@ -0,0 +1,15 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ diff --git a/vendor/github.com/containerd/containerd/sys/userns_deprecated.go b/vendor/github.com/containerd/containerd/sys/userns_deprecated.go new file mode 100644 index 00000000000..53acf55477e --- /dev/null +++ b/vendor/github.com/containerd/containerd/sys/userns_deprecated.go @@ -0,0 +1,23 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package sys + +import "github.com/containerd/containerd/pkg/userns" + +// RunningInUserNS detects whether we are currently running in a user namespace. +// Deprecated: use github.com/containerd/containerd/pkg/userns.RunningInUserNS instead. +var RunningInUserNS = userns.RunningInUserNS diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/LICENSE b/vendor/github.com/containerd/stargz-snapshotter/estargz/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go new file mode 100644 index 00000000000..708b2668990 --- /dev/null +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go @@ -0,0 +1,628 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +/* + Copyright 2019 The Go Authors. All rights reserved. + Use of this source code is governed by a BSD-style + license that can be found in the LICENSE file. +*/ + +package estargz + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "fmt" + "io" + "io/ioutil" + "os" + "path" + "runtime" + "strings" + "sync" + + "github.com/containerd/stargz-snapshotter/estargz/errorutil" + "github.com/klauspost/compress/zstd" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "golang.org/x/sync/errgroup" +) + +type options struct { + chunkSize int + compressionLevel int + prioritizedFiles []string + missedPrioritizedFiles *[]string + compression Compression +} + +type Option func(o *options) error + +// WithChunkSize option specifies the chunk size of eStargz blob to build. +func WithChunkSize(chunkSize int) Option { + return func(o *options) error { + o.chunkSize = chunkSize + return nil + } +} + +// WithCompressionLevel option specifies the gzip compression level. +// The default is gzip.BestCompression. +// See also: https://godoc.org/compress/gzip#pkg-constants +func WithCompressionLevel(level int) Option { + return func(o *options) error { + o.compressionLevel = level + return nil + } +} + +// WithPrioritizedFiles option specifies the list of prioritized files. +// These files must be complete paths that are absolute or relative to "/" +// For example, all of "foo/bar", "/foo/bar", "./foo/bar" and "../foo/bar" +// are treated as "/foo/bar". +func WithPrioritizedFiles(files []string) Option { + return func(o *options) error { + o.prioritizedFiles = files + return nil + } +} + +// WithAllowPrioritizeNotFound makes Build continue the execution even if some +// of prioritized files specified by WithPrioritizedFiles option aren't found +// in the input tar. Instead, this records all missed file names to the passed +// slice. +func WithAllowPrioritizeNotFound(missedFiles *[]string) Option { + return func(o *options) error { + if missedFiles == nil { + return fmt.Errorf("WithAllowPrioritizeNotFound: slice must be passed") + } + o.missedPrioritizedFiles = missedFiles + return nil + } +} + +// WithCompression specifies compression algorithm to be used. +// Default is gzip. +func WithCompression(compression Compression) Option { + return func(o *options) error { + o.compression = compression + return nil + } +} + +// Blob is an eStargz blob. +type Blob struct { + io.ReadCloser + diffID digest.Digester + tocDigest digest.Digest +} + +// DiffID returns the digest of uncompressed blob. +// It is only valid to call DiffID after Close. +func (b *Blob) DiffID() digest.Digest { + return b.diffID.Digest() +} + +// TOCDigest returns the digest of uncompressed TOC JSON. +func (b *Blob) TOCDigest() digest.Digest { + return b.tocDigest +} + +// Build builds an eStargz blob which is an extended version of stargz, from a blob (gzip, zstd +// or plain tar) passed through the argument. If there are some prioritized files are listed in +// the option, these files are grouped as "prioritized" and can be used for runtime optimization +// (e.g. prefetch). This function builds a blob in parallel, with dividing that blob into several +// (at least the number of runtime.GOMAXPROCS(0)) sub-blobs. +func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) { + var opts options + opts.compressionLevel = gzip.BestCompression // BestCompression by default + for _, o := range opt { + if err := o(&opts); err != nil { + return nil, err + } + } + if opts.compression == nil { + opts.compression = newGzipCompressionWithLevel(opts.compressionLevel) + } + layerFiles := newTempFiles() + defer func() { + if rErr != nil { + if err := layerFiles.CleanupAll(); err != nil { + rErr = errors.Wrapf(rErr, "failed to cleanup tmp files: %v", err) + } + } + }() + tarBlob, err := decompressBlob(tarBlob, layerFiles) + if err != nil { + return nil, err + } + entries, err := sortEntries(tarBlob, opts.prioritizedFiles, opts.missedPrioritizedFiles) + if err != nil { + return nil, err + } + tarParts := divideEntries(entries, runtime.GOMAXPROCS(0)) + writers := make([]*Writer, len(tarParts)) + payloads := make([]*os.File, len(tarParts)) + var mu sync.Mutex + var eg errgroup.Group + for i, parts := range tarParts { + i, parts := i, parts + // builds verifiable stargz sub-blobs + eg.Go(func() error { + esgzFile, err := layerFiles.TempFile("", "esgzdata") + if err != nil { + return err + } + sw := NewWriterWithCompressor(esgzFile, opts.compression) + sw.ChunkSize = opts.chunkSize + if err := sw.AppendTar(readerFromEntries(parts...)); err != nil { + return err + } + mu.Lock() + writers[i] = sw + payloads[i] = esgzFile + mu.Unlock() + return nil + }) + } + if err := eg.Wait(); err != nil { + rErr = err + return nil, err + } + tocAndFooter, tocDgst, err := closeWithCombine(opts.compressionLevel, writers...) + if err != nil { + rErr = err + return nil, err + } + var rs []io.Reader + for _, p := range payloads { + fs, err := fileSectionReader(p) + if err != nil { + return nil, err + } + rs = append(rs, fs) + } + diffID := digest.Canonical.Digester() + pr, pw := io.Pipe() + go func() { + r, err := opts.compression.Reader(io.TeeReader(io.MultiReader(append(rs, tocAndFooter)...), pw)) + if err != nil { + pw.CloseWithError(err) + return + } + defer r.Close() + if _, err := io.Copy(diffID.Hash(), r); err != nil { + pw.CloseWithError(err) + return + } + pw.Close() + }() + return &Blob{ + ReadCloser: readCloser{ + Reader: pr, + closeFunc: layerFiles.CleanupAll, + }, + tocDigest: tocDgst, + diffID: diffID, + }, nil +} + +// closeWithCombine takes unclosed Writers and close them. This also returns the +// toc that combined all Writers into. +// Writers doesn't write TOC and footer to the underlying writers so they can be +// combined into a single eStargz and tocAndFooter returned by this function can +// be appended at the tail of that combined blob. +func closeWithCombine(compressionLevel int, ws ...*Writer) (tocAndFooterR io.Reader, tocDgst digest.Digest, err error) { + if len(ws) == 0 { + return nil, "", fmt.Errorf("at least one writer must be passed") + } + for _, w := range ws { + if w.closed { + return nil, "", fmt.Errorf("writer must be unclosed") + } + defer func(w *Writer) { w.closed = true }(w) + if err := w.closeGz(); err != nil { + return nil, "", err + } + if err := w.bw.Flush(); err != nil { + return nil, "", err + } + } + var ( + mtoc = new(JTOC) + currentOffset int64 + ) + mtoc.Version = ws[0].toc.Version + for _, w := range ws { + for _, e := range w.toc.Entries { + // Recalculate Offset of non-empty files/chunks + if (e.Type == "reg" && e.Size > 0) || e.Type == "chunk" { + e.Offset += currentOffset + } + mtoc.Entries = append(mtoc.Entries, e) + } + if w.toc.Version > mtoc.Version { + mtoc.Version = w.toc.Version + } + currentOffset += w.cw.n + } + + return tocAndFooter(ws[0].compressor, mtoc, currentOffset) +} + +func tocAndFooter(compressor Compressor, toc *JTOC, offset int64) (io.Reader, digest.Digest, error) { + buf := new(bytes.Buffer) + tocDigest, err := compressor.WriteTOCAndFooter(buf, offset, toc, nil) + if err != nil { + return nil, "", err + } + return buf, tocDigest, nil +} + +// divideEntries divides passed entries to the parts at least the number specified by the +// argument. +func divideEntries(entries []*entry, minPartsNum int) (set [][]*entry) { + var estimatedSize int64 + for _, e := range entries { + estimatedSize += e.header.Size + } + unitSize := estimatedSize / int64(minPartsNum) + var ( + nextEnd = unitSize + offset int64 + ) + set = append(set, []*entry{}) + for _, e := range entries { + set[len(set)-1] = append(set[len(set)-1], e) + offset += e.header.Size + if offset > nextEnd { + set = append(set, []*entry{}) + nextEnd += unitSize + } + } + return +} + +var errNotFound = errors.New("not found") + +// sortEntries reads the specified tar blob and returns a list of tar entries. +// If some of prioritized files are specified, the list starts from these +// files with keeping the order specified by the argument. +func sortEntries(in io.ReaderAt, prioritized []string, missedPrioritized *[]string) ([]*entry, error) { + + // Import tar file. + intar, err := importTar(in) + if err != nil { + return nil, errors.Wrap(err, "failed to sort") + } + + // Sort the tar file respecting to the prioritized files list. + sorted := &tarFile{} + for _, l := range prioritized { + if err := moveRec(l, intar, sorted); err != nil { + if errors.Is(err, errNotFound) && missedPrioritized != nil { + *missedPrioritized = append(*missedPrioritized, l) + continue // allow not found + } + return nil, errors.Wrap(err, "failed to sort tar entries") + } + } + if len(prioritized) == 0 { + sorted.add(&entry{ + header: &tar.Header{ + Name: NoPrefetchLandmark, + Typeflag: tar.TypeReg, + Size: int64(len([]byte{landmarkContents})), + }, + payload: bytes.NewReader([]byte{landmarkContents}), + }) + } else { + sorted.add(&entry{ + header: &tar.Header{ + Name: PrefetchLandmark, + Typeflag: tar.TypeReg, + Size: int64(len([]byte{landmarkContents})), + }, + payload: bytes.NewReader([]byte{landmarkContents}), + }) + } + + // Dump all entry and concatinate them. + return append(sorted.dump(), intar.dump()...), nil +} + +// readerFromEntries returns a reader of tar archive that contains entries passed +// through the arguments. +func readerFromEntries(entries ...*entry) io.Reader { + pr, pw := io.Pipe() + go func() { + tw := tar.NewWriter(pw) + defer tw.Close() + for _, entry := range entries { + if err := tw.WriteHeader(entry.header); err != nil { + pw.CloseWithError(fmt.Errorf("Failed to write tar header: %v", err)) + return + } + if _, err := io.Copy(tw, entry.payload); err != nil { + pw.CloseWithError(fmt.Errorf("Failed to write tar payload: %v", err)) + return + } + } + pw.Close() + }() + return pr +} + +func importTar(in io.ReaderAt) (*tarFile, error) { + tf := &tarFile{} + pw, err := newCountReader(in) + if err != nil { + return nil, errors.Wrap(err, "failed to make position watcher") + } + tr := tar.NewReader(pw) + + // Walk through all nodes. + for { + // Fetch and parse next header. + h, err := tr.Next() + if err != nil { + if err == io.EOF { + break + } else { + return nil, errors.Wrap(err, "failed to parse tar file") + } + } + switch cleanEntryName(h.Name) { + case PrefetchLandmark, NoPrefetchLandmark: + // Ignore existing landmark + continue + } + + // Add entry. If it already exists, replace it. + if _, ok := tf.get(h.Name); ok { + tf.remove(h.Name) + } + tf.add(&entry{ + header: h, + payload: io.NewSectionReader(in, pw.currentPos(), h.Size), + }) + } + + return tf, nil +} + +func moveRec(name string, in *tarFile, out *tarFile) error { + name = cleanEntryName(name) + if name == "" { // root directory. stop recursion. + if e, ok := in.get(name); ok { + // entry of the root directory exists. we should move it as well. + // this case will occur if tar entries are prefixed with "./", "/", etc. + out.add(e) + in.remove(name) + } + return nil + } + + _, okIn := in.get(name) + _, okOut := out.get(name) + if !okIn && !okOut { + return errors.Wrapf(errNotFound, "file: %q", name) + } + + parent, _ := path.Split(strings.TrimSuffix(name, "/")) + if err := moveRec(parent, in, out); err != nil { + return err + } + if e, ok := in.get(name); ok && e.header.Typeflag == tar.TypeLink { + if err := moveRec(e.header.Linkname, in, out); err != nil { + return err + } + } + if e, ok := in.get(name); ok { + out.add(e) + in.remove(name) + } + return nil +} + +type entry struct { + header *tar.Header + payload io.ReadSeeker +} + +type tarFile struct { + index map[string]*entry + stream []*entry +} + +func (f *tarFile) add(e *entry) { + if f.index == nil { + f.index = make(map[string]*entry) + } + f.index[cleanEntryName(e.header.Name)] = e + f.stream = append(f.stream, e) +} + +func (f *tarFile) remove(name string) { + name = cleanEntryName(name) + if f.index != nil { + delete(f.index, name) + } + var filtered []*entry + for _, e := range f.stream { + if cleanEntryName(e.header.Name) == name { + continue + } + filtered = append(filtered, e) + } + f.stream = filtered +} + +func (f *tarFile) get(name string) (e *entry, ok bool) { + if f.index == nil { + return nil, false + } + e, ok = f.index[cleanEntryName(name)] + return +} + +func (f *tarFile) dump() []*entry { + return f.stream +} + +type readCloser struct { + io.Reader + closeFunc func() error +} + +func (rc readCloser) Close() error { + return rc.closeFunc() +} + +func fileSectionReader(file *os.File) (*io.SectionReader, error) { + info, err := file.Stat() + if err != nil { + return nil, err + } + return io.NewSectionReader(file, 0, info.Size()), nil +} + +func newTempFiles() *tempFiles { + return &tempFiles{} +} + +type tempFiles struct { + files []*os.File + filesMu sync.Mutex +} + +func (tf *tempFiles) TempFile(dir, pattern string) (*os.File, error) { + f, err := ioutil.TempFile(dir, pattern) + if err != nil { + return nil, err + } + tf.filesMu.Lock() + tf.files = append(tf.files, f) + tf.filesMu.Unlock() + return f, nil +} + +func (tf *tempFiles) CleanupAll() error { + tf.filesMu.Lock() + defer tf.filesMu.Unlock() + var allErr []error + for _, f := range tf.files { + if err := f.Close(); err != nil { + allErr = append(allErr, err) + } + if err := os.Remove(f.Name()); err != nil { + allErr = append(allErr, err) + } + } + tf.files = nil + return errorutil.Aggregate(allErr) +} + +func newCountReader(r io.ReaderAt) (*countReader, error) { + pos := int64(0) + return &countReader{r: r, cPos: &pos}, nil +} + +type countReader struct { + r io.ReaderAt + cPos *int64 + + mu sync.Mutex +} + +func (cr *countReader) Read(p []byte) (int, error) { + cr.mu.Lock() + defer cr.mu.Unlock() + + n, err := cr.r.ReadAt(p, *cr.cPos) + if err == nil { + *cr.cPos += int64(n) + } + return n, err +} + +func (cr *countReader) Seek(offset int64, whence int) (int64, error) { + cr.mu.Lock() + defer cr.mu.Unlock() + + switch whence { + default: + return 0, fmt.Errorf("Unknown whence: %v", whence) + case io.SeekStart: + case io.SeekCurrent: + offset += *cr.cPos + case io.SeekEnd: + return 0, fmt.Errorf("Unsupported whence: %v", whence) + } + + if offset < 0 { + return 0, fmt.Errorf("invalid offset") + } + *cr.cPos = offset + return offset, nil +} + +func (cr *countReader) currentPos() int64 { + cr.mu.Lock() + defer cr.mu.Unlock() + + return *cr.cPos +} + +func decompressBlob(org *io.SectionReader, tmp *tempFiles) (*io.SectionReader, error) { + if org.Size() < 4 { + return org, nil + } + src := make([]byte, 4) + if _, err := org.Read(src); err != nil && err != io.EOF { + return nil, err + } + var dR io.Reader + if bytes.Equal([]byte{0x1F, 0x8B, 0x08}, src[:3]) { + // gzip + dgR, err := gzip.NewReader(io.NewSectionReader(org, 0, org.Size())) + if err != nil { + return nil, err + } + defer dgR.Close() + dR = io.Reader(dgR) + } else if bytes.Equal([]byte{0x28, 0xb5, 0x2f, 0xfd}, src[:4]) { + // zstd + dzR, err := zstd.NewReader(io.NewSectionReader(org, 0, org.Size())) + if err != nil { + return nil, err + } + defer dzR.Close() + dR = io.Reader(dzR) + } else { + // uncompressed + return io.NewSectionReader(org, 0, org.Size()), nil + } + b, err := tmp.TempFile("", "uncompresseddata") + if err != nil { + return nil, err + } + if _, err := io.Copy(b, dR); err != nil { + return nil, err + } + return fileSectionReader(b) +} diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/errorutil/errors.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/errorutil/errors.go new file mode 100644 index 00000000000..6de78b02dcd --- /dev/null +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/errorutil/errors.go @@ -0,0 +1,40 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package errorutil + +import ( + "errors" + "fmt" + "strings" +) + +// Aggregate combines a list of errors into a single new error. +func Aggregate(errs []error) error { + switch len(errs) { + case 0: + return nil + case 1: + return errs[0] + default: + points := make([]string, len(errs)+1) + points[0] = fmt.Sprintf("%d error(s) occurred:", len(errs)) + for i, err := range errs { + points[i+1] = fmt.Sprintf("* %s", err) + } + return errors.New(strings.Join(points, "\n\t")) + } +} diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go new file mode 100644 index 00000000000..2c36e89b4ed --- /dev/null +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go @@ -0,0 +1,1044 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +/* + Copyright 2019 The Go Authors. All rights reserved. + Use of this source code is governed by a BSD-style + license that can be found in the LICENSE file. +*/ + +package estargz + +import ( + "bufio" + "bytes" + "compress/gzip" + "crypto/sha256" + "fmt" + "hash" + "io" + "io/ioutil" + "os" + "path" + "sort" + "strings" + "sync" + "time" + + "github.com/containerd/stargz-snapshotter/estargz/errorutil" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/vbatts/tar-split/archive/tar" +) + +// A Reader permits random access reads from a stargz file. +type Reader struct { + sr *io.SectionReader + toc *JTOC + tocDigest digest.Digest + + // m stores all non-chunk entries, keyed by name. + m map[string]*TOCEntry + + // chunks stores all TOCEntry values for regular files that + // are split up. For a file with a single chunk, it's only + // stored in m. + chunks map[string][]*TOCEntry + + decompressor Decompressor +} + +type openOpts struct { + tocOffset int64 + decompressors []Decompressor + telemetry *Telemetry +} + +// OpenOption is an option used during opening the layer +type OpenOption func(o *openOpts) error + +// WithTOCOffset option specifies the offset of TOC +func WithTOCOffset(tocOffset int64) OpenOption { + return func(o *openOpts) error { + o.tocOffset = tocOffset + return nil + } +} + +// WithDecompressors option specifies decompressors to use. +// Default is gzip-based decompressor. +func WithDecompressors(decompressors ...Decompressor) OpenOption { + return func(o *openOpts) error { + o.decompressors = decompressors + return nil + } +} + +// WithTelemetry option specifies the telemetry hooks +func WithTelemetry(telemetry *Telemetry) OpenOption { + return func(o *openOpts) error { + o.telemetry = telemetry + return nil + } +} + +// MeasureLatencyHook is a func which takes start time and records the diff +type MeasureLatencyHook func(time.Time) + +// Telemetry is a struct which defines telemetry hooks. By implementing these hooks you should be able to record +// the latency metrics of the respective steps of estargz open operation. To be used with estargz.OpenWithTelemetry(...) +type Telemetry struct { + GetFooterLatency MeasureLatencyHook // measure time to get stargz footer (in milliseconds) + GetTocLatency MeasureLatencyHook // measure time to GET TOC JSON (in milliseconds) + DeserializeTocLatency MeasureLatencyHook // measure time to deserialize TOC JSON (in milliseconds) +} + +// Open opens a stargz file for reading. +// The behavior is configurable using options. +// +// Note that each entry name is normalized as the path that is relative to root. +func Open(sr *io.SectionReader, opt ...OpenOption) (*Reader, error) { + var opts openOpts + for _, o := range opt { + if err := o(&opts); err != nil { + return nil, err + } + } + + gzipCompressors := []Decompressor{new(GzipDecompressor), new(LegacyGzipDecompressor)} + decompressors := append(gzipCompressors, opts.decompressors...) + + // Determine the size to fetch. Try to fetch as many bytes as possible. + fetchSize := maxFooterSize(sr.Size(), decompressors...) + if maybeTocOffset := opts.tocOffset; maybeTocOffset > fetchSize { + if maybeTocOffset > sr.Size() { + return nil, fmt.Errorf("blob size %d is smaller than the toc offset", sr.Size()) + } + fetchSize = sr.Size() - maybeTocOffset + } + + start := time.Now() // before getting layer footer + footer := make([]byte, fetchSize) + if _, err := sr.ReadAt(footer, sr.Size()-fetchSize); err != nil { + return nil, fmt.Errorf("error reading footer: %v", err) + } + if opts.telemetry != nil && opts.telemetry.GetFooterLatency != nil { + opts.telemetry.GetFooterLatency(start) + } + + var allErr []error + var found bool + var r *Reader + for _, d := range decompressors { + fSize := d.FooterSize() + fOffset := positive(int64(len(footer)) - fSize) + maybeTocBytes := footer[:fOffset] + _, tocOffset, tocSize, err := d.ParseFooter(footer[fOffset:]) + if err != nil { + allErr = append(allErr, err) + continue + } + if tocSize <= 0 { + tocSize = sr.Size() - tocOffset - fSize + } + if tocSize < int64(len(maybeTocBytes)) { + maybeTocBytes = maybeTocBytes[:tocSize] + } + r, err = parseTOC(d, sr, tocOffset, tocSize, maybeTocBytes, opts) + if err == nil { + found = true + break + } + allErr = append(allErr, err) + } + if !found { + return nil, errorutil.Aggregate(allErr) + } + if err := r.initFields(); err != nil { + return nil, fmt.Errorf("failed to initialize fields of entries: %v", err) + } + return r, nil +} + +// OpenFooter extracts and parses footer from the given blob. +// only supports gzip-based eStargz. +func OpenFooter(sr *io.SectionReader) (tocOffset int64, footerSize int64, rErr error) { + if sr.Size() < FooterSize && sr.Size() < legacyFooterSize { + return 0, 0, fmt.Errorf("blob size %d is smaller than the footer size", sr.Size()) + } + var footer [FooterSize]byte + if _, err := sr.ReadAt(footer[:], sr.Size()-FooterSize); err != nil { + return 0, 0, fmt.Errorf("error reading footer: %v", err) + } + var allErr []error + for _, d := range []Decompressor{new(GzipDecompressor), new(LegacyGzipDecompressor)} { + fSize := d.FooterSize() + fOffset := positive(int64(len(footer)) - fSize) + _, tocOffset, _, err := d.ParseFooter(footer[fOffset:]) + if err == nil { + return tocOffset, fSize, err + } + allErr = append(allErr, err) + } + return 0, 0, errorutil.Aggregate(allErr) +} + +// initFields populates the Reader from r.toc after decoding it from +// JSON. +// +// Unexported fields are populated and TOCEntry fields that were +// implicit in the JSON are populated. +func (r *Reader) initFields() error { + r.m = make(map[string]*TOCEntry, len(r.toc.Entries)) + r.chunks = make(map[string][]*TOCEntry) + var lastPath string + uname := map[int]string{} + gname := map[int]string{} + var lastRegEnt *TOCEntry + for _, ent := range r.toc.Entries { + ent.Name = cleanEntryName(ent.Name) + if ent.Type == "reg" { + lastRegEnt = ent + } + if ent.Type == "chunk" { + ent.Name = lastPath + r.chunks[ent.Name] = append(r.chunks[ent.Name], ent) + if ent.ChunkSize == 0 && lastRegEnt != nil { + ent.ChunkSize = lastRegEnt.Size - ent.ChunkOffset + } + } else { + lastPath = ent.Name + + if ent.Uname != "" { + uname[ent.UID] = ent.Uname + } else { + ent.Uname = uname[ent.UID] + } + if ent.Gname != "" { + gname[ent.GID] = ent.Gname + } else { + ent.Gname = uname[ent.GID] + } + + ent.modTime, _ = time.Parse(time.RFC3339, ent.ModTime3339) + + if ent.Type == "dir" { + ent.NumLink++ // Parent dir links to this directory + } + r.m[ent.Name] = ent + } + if ent.Type == "reg" && ent.ChunkSize > 0 && ent.ChunkSize < ent.Size { + r.chunks[ent.Name] = make([]*TOCEntry, 0, ent.Size/ent.ChunkSize+1) + r.chunks[ent.Name] = append(r.chunks[ent.Name], ent) + } + if ent.ChunkSize == 0 && ent.Size != 0 { + ent.ChunkSize = ent.Size + } + } + + // Populate children, add implicit directories: + for _, ent := range r.toc.Entries { + if ent.Type == "chunk" { + continue + } + // add "foo/": + // add "foo" child to "" (creating "" if necessary) + // + // add "foo/bar/": + // add "bar" child to "foo" (creating "foo" if necessary) + // + // add "foo/bar.txt": + // add "bar.txt" child to "foo" (creating "foo" if necessary) + // + // add "a/b/c/d/e/f.txt": + // create "a/b/c/d/e" node + // add "f.txt" child to "e" + + name := ent.Name + pdirName := parentDir(name) + if name == pdirName { + // This entry and its parent are the same. + // Ignore this for avoiding infinite loop of the reference. + // The example case where this can occur is when tar contains the root + // directory itself (e.g. "./", "/"). + continue + } + pdir := r.getOrCreateDir(pdirName) + ent.NumLink++ // at least one name(ent.Name) references this entry. + if ent.Type == "hardlink" { + org, err := r.getSource(ent) + if err != nil { + return err + } + org.NumLink++ // original entry is referenced by this ent.Name. + ent = org + } + pdir.addChild(path.Base(name), ent) + } + + lastOffset := r.sr.Size() + for i := len(r.toc.Entries) - 1; i >= 0; i-- { + e := r.toc.Entries[i] + if e.isDataType() { + e.nextOffset = lastOffset + } + if e.Offset != 0 { + lastOffset = e.Offset + } + } + + return nil +} + +func (r *Reader) getSource(ent *TOCEntry) (_ *TOCEntry, err error) { + if ent.Type == "hardlink" { + org, ok := r.m[cleanEntryName(ent.LinkName)] + if !ok { + return nil, fmt.Errorf("%q is a hardlink but the linkname %q isn't found", ent.Name, ent.LinkName) + } + ent, err = r.getSource(org) + if err != nil { + return nil, err + } + } + return ent, nil +} + +func parentDir(p string) string { + dir, _ := path.Split(p) + return strings.TrimSuffix(dir, "/") +} + +func (r *Reader) getOrCreateDir(d string) *TOCEntry { + e, ok := r.m[d] + if !ok { + e = &TOCEntry{ + Name: d, + Type: "dir", + Mode: 0755, + NumLink: 2, // The directory itself(.) and the parent link to this directory. + } + r.m[d] = e + if d != "" { + pdir := r.getOrCreateDir(parentDir(d)) + pdir.addChild(path.Base(d), e) + } + } + return e +} + +func (r *Reader) TOCDigest() digest.Digest { + return r.tocDigest +} + +// VerifyTOC checks that the TOC JSON in the passed blob matches the +// passed digests and that the TOC JSON contains digests for all chunks +// contained in the blob. If the verification succceeds, this function +// returns TOCEntryVerifier which holds all chunk digests in the stargz blob. +func (r *Reader) VerifyTOC(tocDigest digest.Digest) (TOCEntryVerifier, error) { + // Verify the digest of TOC JSON + if r.tocDigest != tocDigest { + return nil, fmt.Errorf("invalid TOC JSON %q; want %q", r.tocDigest, tocDigest) + } + return r.Verifiers() +} + +// Verifiers returns TOCEntryVerifier of this chunk. Use VerifyTOC instead in most cases +// because this doesn't verify TOC. +func (r *Reader) Verifiers() (TOCEntryVerifier, error) { + chunkDigestMap := make(map[int64]digest.Digest) // map from chunk offset to the chunk digest + regDigestMap := make(map[int64]digest.Digest) // map from chunk offset to the reg file digest + var chunkDigestMapIncomplete bool + var regDigestMapIncomplete bool + var containsChunk bool + for _, e := range r.toc.Entries { + if e.Type != "reg" && e.Type != "chunk" { + continue + } + + // offset must be unique in stargz blob + _, dOK := chunkDigestMap[e.Offset] + _, rOK := regDigestMap[e.Offset] + if dOK || rOK { + return nil, fmt.Errorf("offset %d found twice", e.Offset) + } + + if e.Type == "reg" { + if e.Size == 0 { + continue // ignores empty file + } + + // record the digest of regular file payload + if e.Digest != "" { + d, err := digest.Parse(e.Digest) + if err != nil { + return nil, errors.Wrapf(err, + "failed to parse regular file digest %q", e.Digest) + } + regDigestMap[e.Offset] = d + } else { + regDigestMapIncomplete = true + } + } else { + containsChunk = true // this layer contains "chunk" entries. + } + + // "reg" also can contain ChunkDigest (e.g. when "reg" is the first entry of + // chunked file) + if e.ChunkDigest != "" { + d, err := digest.Parse(e.ChunkDigest) + if err != nil { + return nil, errors.Wrapf(err, + "failed to parse chunk digest %q", e.ChunkDigest) + } + chunkDigestMap[e.Offset] = d + } else { + chunkDigestMapIncomplete = true + } + } + + if chunkDigestMapIncomplete { + // Though some chunk digests are not found, if this layer doesn't contain + // "chunk"s and all digest of "reg" files are recorded, we can use them instead. + if !containsChunk && !regDigestMapIncomplete { + return &verifier{digestMap: regDigestMap}, nil + } + return nil, fmt.Errorf("some ChunkDigest not found in TOC JSON") + } + + return &verifier{digestMap: chunkDigestMap}, nil +} + +// verifier is an implementation of TOCEntryVerifier which holds verifiers keyed by +// offset of the chunk. +type verifier struct { + digestMap map[int64]digest.Digest + digestMapMu sync.Mutex +} + +// Verifier returns a content verifier specified by TOCEntry. +func (v *verifier) Verifier(ce *TOCEntry) (digest.Verifier, error) { + v.digestMapMu.Lock() + defer v.digestMapMu.Unlock() + d, ok := v.digestMap[ce.Offset] + if !ok { + return nil, fmt.Errorf("verifier for offset=%d,size=%d hasn't been registered", + ce.Offset, ce.ChunkSize) + } + return d.Verifier(), nil +} + +// ChunkEntryForOffset returns the TOCEntry containing the byte of the +// named file at the given offset within the file. +// Name must be absolute path or one that is relative to root. +func (r *Reader) ChunkEntryForOffset(name string, offset int64) (e *TOCEntry, ok bool) { + name = cleanEntryName(name) + e, ok = r.Lookup(name) + if !ok || !e.isDataType() { + return nil, false + } + ents := r.chunks[name] + if len(ents) < 2 { + if offset >= e.ChunkSize { + return nil, false + } + return e, true + } + i := sort.Search(len(ents), func(i int) bool { + e := ents[i] + return e.ChunkOffset >= offset || (offset > e.ChunkOffset && offset < e.ChunkOffset+e.ChunkSize) + }) + if i == len(ents) { + return nil, false + } + return ents[i], true +} + +// Lookup returns the Table of Contents entry for the given path. +// +// To get the root directory, use the empty string. +// Path must be absolute path or one that is relative to root. +func (r *Reader) Lookup(path string) (e *TOCEntry, ok bool) { + path = cleanEntryName(path) + if r == nil { + return + } + e, ok = r.m[path] + if ok && e.Type == "hardlink" { + var err error + e, err = r.getSource(e) + if err != nil { + return nil, false + } + } + return +} + +// OpenFile returns the reader of the specified file payload. +// +// Name must be absolute path or one that is relative to root. +func (r *Reader) OpenFile(name string) (*io.SectionReader, error) { + name = cleanEntryName(name) + ent, ok := r.Lookup(name) + if !ok { + // TODO: come up with some error plan. This is lazy: + return nil, &os.PathError{ + Path: name, + Op: "OpenFile", + Err: os.ErrNotExist, + } + } + if ent.Type != "reg" { + return nil, &os.PathError{ + Path: name, + Op: "OpenFile", + Err: errors.New("not a regular file"), + } + } + fr := &fileReader{ + r: r, + size: ent.Size, + ents: r.getChunks(ent), + } + return io.NewSectionReader(fr, 0, fr.size), nil +} + +func (r *Reader) getChunks(ent *TOCEntry) []*TOCEntry { + if ents, ok := r.chunks[ent.Name]; ok { + return ents + } + return []*TOCEntry{ent} +} + +type fileReader struct { + r *Reader + size int64 + ents []*TOCEntry // 1 or more reg/chunk entries +} + +func (fr *fileReader) ReadAt(p []byte, off int64) (n int, err error) { + if off >= fr.size { + return 0, io.EOF + } + if off < 0 { + return 0, errors.New("invalid offset") + } + var i int + if len(fr.ents) > 1 { + i = sort.Search(len(fr.ents), func(i int) bool { + return fr.ents[i].ChunkOffset >= off + }) + if i == len(fr.ents) { + i = len(fr.ents) - 1 + } + } + ent := fr.ents[i] + if ent.ChunkOffset > off { + if i == 0 { + return 0, errors.New("internal error; first chunk offset is non-zero") + } + ent = fr.ents[i-1] + } + + // If ent is a chunk of a large file, adjust the ReadAt + // offset by the chunk's offset. + off -= ent.ChunkOffset + + finalEnt := fr.ents[len(fr.ents)-1] + compressedOff := ent.Offset + // compressedBytesRemain is the number of compressed bytes in this + // file remaining, over 1+ chunks. + compressedBytesRemain := finalEnt.NextOffset() - compressedOff + + sr := io.NewSectionReader(fr.r.sr, compressedOff, compressedBytesRemain) + + const maxRead = 2 << 20 + var bufSize = maxRead + if compressedBytesRemain < maxRead { + bufSize = int(compressedBytesRemain) + } + + br := bufio.NewReaderSize(sr, bufSize) + if _, err := br.Peek(bufSize); err != nil { + return 0, fmt.Errorf("fileReader.ReadAt.peek: %v", err) + } + + dr, err := fr.r.decompressor.Reader(br) + if err != nil { + return 0, fmt.Errorf("fileReader.ReadAt.decompressor.Reader: %v", err) + } + defer dr.Close() + if n, err := io.CopyN(ioutil.Discard, dr, off); n != off || err != nil { + return 0, fmt.Errorf("discard of %d bytes = %v, %v", off, n, err) + } + return io.ReadFull(dr, p) +} + +// A Writer writes stargz files. +// +// Use NewWriter to create a new Writer. +type Writer struct { + bw *bufio.Writer + cw *countWriter + toc *JTOC + diffHash hash.Hash // SHA-256 of uncompressed tar + + closed bool + gz io.WriteCloser + lastUsername map[int]string + lastGroupname map[int]string + compressor Compressor + + // ChunkSize optionally controls the maximum number of bytes + // of data of a regular file that can be written in one gzip + // stream before a new gzip stream is started. + // Zero means to use a default, currently 4 MiB. + ChunkSize int +} + +// currentCompressionWriter writes to the current w.gz field, which can +// change throughout writing a tar entry. +// +// Additionally, it updates w's SHA-256 of the uncompressed bytes +// of the tar file. +type currentCompressionWriter struct{ w *Writer } + +func (ccw currentCompressionWriter) Write(p []byte) (int, error) { + ccw.w.diffHash.Write(p) + if ccw.w.gz == nil { + if err := ccw.w.condOpenGz(); err != nil { + return 0, err + } + } + return ccw.w.gz.Write(p) +} + +func (w *Writer) chunkSize() int { + if w.ChunkSize <= 0 { + return 4 << 20 + } + return w.ChunkSize +} + +// Unpack decompresses the given estargz blob and returns a ReadCloser of the tar blob. +// TOC JSON and footer are removed. +func Unpack(sr *io.SectionReader, c Decompressor) (io.ReadCloser, error) { + footerSize := c.FooterSize() + if sr.Size() < footerSize { + return nil, fmt.Errorf("blob is too small; %d < %d", sr.Size(), footerSize) + } + footerOffset := sr.Size() - footerSize + footer := make([]byte, footerSize) + if _, err := sr.ReadAt(footer, footerOffset); err != nil { + return nil, err + } + blobPayloadSize, _, _, err := c.ParseFooter(footer) + if err != nil { + return nil, errors.Wrapf(err, "failed to parse footer") + } + return c.Reader(io.LimitReader(sr, blobPayloadSize)) +} + +// NewWriter returns a new stargz writer (gzip-based) writing to w. +// +// The writer must be closed to write its trailing table of contents. +func NewWriter(w io.Writer) *Writer { + return NewWriterLevel(w, gzip.BestCompression) +} + +// NewWriterLevel returns a new stargz writer (gzip-based) writing to w. +// The compression level is configurable. +// +// The writer must be closed to write its trailing table of contents. +func NewWriterLevel(w io.Writer, compressionLevel int) *Writer { + return NewWriterWithCompressor(w, NewGzipCompressorWithLevel(compressionLevel)) +} + +// NewWriterWithCompressor returns a new stargz writer writing to w. +// The compression method is configurable. +// +// The writer must be closed to write its trailing table of contents. +func NewWriterWithCompressor(w io.Writer, c Compressor) *Writer { + bw := bufio.NewWriter(w) + cw := &countWriter{w: bw} + return &Writer{ + bw: bw, + cw: cw, + toc: &JTOC{Version: 1}, + diffHash: sha256.New(), + compressor: c, + } +} + +// Close writes the stargz's table of contents and flushes all the +// buffers, returning any error. +func (w *Writer) Close() (digest.Digest, error) { + if w.closed { + return "", nil + } + defer func() { w.closed = true }() + + if err := w.closeGz(); err != nil { + return "", err + } + + // Write the TOC index and footer. + tocDigest, err := w.compressor.WriteTOCAndFooter(w.cw, w.cw.n, w.toc, w.diffHash) + if err != nil { + return "", err + } + if err := w.bw.Flush(); err != nil { + return "", err + } + + return tocDigest, nil +} + +func (w *Writer) closeGz() error { + if w.closed { + return errors.New("write on closed Writer") + } + if w.gz != nil { + if err := w.gz.Close(); err != nil { + return err + } + w.gz = nil + } + return nil +} + +// nameIfChanged returns name, unless it was the already the value of (*mp)[id], +// in which case it returns the empty string. +func (w *Writer) nameIfChanged(mp *map[int]string, id int, name string) string { + if name == "" { + return "" + } + if *mp == nil { + *mp = make(map[int]string) + } + if (*mp)[id] == name { + return "" + } + (*mp)[id] = name + return name +} + +func (w *Writer) condOpenGz() (err error) { + if w.gz == nil { + w.gz, err = w.compressor.Writer(w.cw) + } + return +} + +// AppendTar reads the tar or tar.gz file from r and appends +// each of its contents to w. +// +// The input r can optionally be gzip compressed but the output will +// always be compressed by the specified compressor. +func (w *Writer) AppendTar(r io.Reader) error { + return w.appendTar(r, false) +} + +// AppendTarLossLess reads the tar or tar.gz file from r and appends +// each of its contents to w. +// +// The input r can optionally be gzip compressed but the output will +// always be compressed by the specified compressor. +// +// The difference of this func with AppendTar is that this writes +// the input tar stream into w without any modification (e.g. to header bytes). +// +// Note that if the input tar stream already contains TOC JSON, this returns +// error because w cannot overwrite the TOC JSON to the one generated by w without +// lossy modification. To avoid this error, if the input stream is known to be stargz/estargz, +// you shoud decompress it and remove TOC JSON in advance. +func (w *Writer) AppendTarLossLess(r io.Reader) error { + return w.appendTar(r, true) +} + +func (w *Writer) appendTar(r io.Reader, lossless bool) error { + var src io.Reader + br := bufio.NewReader(r) + if isGzip(br) { + zr, _ := gzip.NewReader(br) + src = zr + } else { + src = io.Reader(br) + } + dst := currentCompressionWriter{w} + var tw *tar.Writer + if !lossless { + tw = tar.NewWriter(dst) // use tar writer only when this isn't lossless mode. + } + tr := tar.NewReader(src) + if lossless { + tr.RawAccounting = true + } + for { + h, err := tr.Next() + if err == io.EOF { + if lossless { + if remain := tr.RawBytes(); len(remain) > 0 { + // Collect the remaining null bytes. + // https://github.com/vbatts/tar-split/blob/80a436fd6164c557b131f7c59ed69bd81af69761/concept/main.go#L49-L53 + if _, err := dst.Write(remain); err != nil { + return err + } + } + } + break + } + if err != nil { + return fmt.Errorf("error reading from source tar: tar.Reader.Next: %v", err) + } + if cleanEntryName(h.Name) == TOCTarName { + // It is possible for a layer to be "stargzified" twice during the + // distribution lifecycle. So we reserve "TOCTarName" here to avoid + // duplicated entries in the resulting layer. + if lossless { + // We cannot handle this in lossless way. + return fmt.Errorf("existing TOC JSON is not allowed; decompress layer before append") + } + continue + } + + xattrs := make(map[string][]byte) + const xattrPAXRecordsPrefix = "SCHILY.xattr." + if h.PAXRecords != nil { + for k, v := range h.PAXRecords { + if strings.HasPrefix(k, xattrPAXRecordsPrefix) { + xattrs[k[len(xattrPAXRecordsPrefix):]] = []byte(v) + } + } + } + ent := &TOCEntry{ + Name: h.Name, + Mode: h.Mode, + UID: h.Uid, + GID: h.Gid, + Uname: w.nameIfChanged(&w.lastUsername, h.Uid, h.Uname), + Gname: w.nameIfChanged(&w.lastGroupname, h.Gid, h.Gname), + ModTime3339: formatModtime(h.ModTime), + Xattrs: xattrs, + } + if err := w.condOpenGz(); err != nil { + return err + } + if tw != nil { + if err := tw.WriteHeader(h); err != nil { + return err + } + } else { + if _, err := dst.Write(tr.RawBytes()); err != nil { + return err + } + } + switch h.Typeflag { + case tar.TypeLink: + ent.Type = "hardlink" + ent.LinkName = h.Linkname + case tar.TypeSymlink: + ent.Type = "symlink" + ent.LinkName = h.Linkname + case tar.TypeDir: + ent.Type = "dir" + case tar.TypeReg: + ent.Type = "reg" + ent.Size = h.Size + case tar.TypeChar: + ent.Type = "char" + ent.DevMajor = int(h.Devmajor) + ent.DevMinor = int(h.Devminor) + case tar.TypeBlock: + ent.Type = "block" + ent.DevMajor = int(h.Devmajor) + ent.DevMinor = int(h.Devminor) + case tar.TypeFifo: + ent.Type = "fifo" + default: + return fmt.Errorf("unsupported input tar entry %q", h.Typeflag) + } + + // We need to keep a reference to the TOC entry for regular files, so that we + // can fill the digest later. + var regFileEntry *TOCEntry + var payloadDigest digest.Digester + if h.Typeflag == tar.TypeReg { + regFileEntry = ent + payloadDigest = digest.Canonical.Digester() + } + + if h.Typeflag == tar.TypeReg && ent.Size > 0 { + var written int64 + totalSize := ent.Size // save it before we destroy ent + tee := io.TeeReader(tr, payloadDigest.Hash()) + for written < totalSize { + if err := w.closeGz(); err != nil { + return err + } + + chunkSize := int64(w.chunkSize()) + remain := totalSize - written + if remain < chunkSize { + chunkSize = remain + } else { + ent.ChunkSize = chunkSize + } + ent.Offset = w.cw.n + ent.ChunkOffset = written + chunkDigest := digest.Canonical.Digester() + + if err := w.condOpenGz(); err != nil { + return err + } + + teeChunk := io.TeeReader(tee, chunkDigest.Hash()) + var out io.Writer + if tw != nil { + out = tw + } else { + out = dst + } + if _, err := io.CopyN(out, teeChunk, chunkSize); err != nil { + return fmt.Errorf("error copying %q: %v", h.Name, err) + } + ent.ChunkDigest = chunkDigest.Digest().String() + w.toc.Entries = append(w.toc.Entries, ent) + written += chunkSize + ent = &TOCEntry{ + Name: h.Name, + Type: "chunk", + } + } + } else { + w.toc.Entries = append(w.toc.Entries, ent) + } + if payloadDigest != nil { + regFileEntry.Digest = payloadDigest.Digest().String() + } + if tw != nil { + if err := tw.Flush(); err != nil { + return err + } + } + } + remainDest := ioutil.Discard + if lossless { + remainDest = dst // Preserve the remaining bytes in lossless mode + } + _, err := io.Copy(remainDest, src) + return err +} + +// DiffID returns the SHA-256 of the uncompressed tar bytes. +// It is only valid to call DiffID after Close. +func (w *Writer) DiffID() string { + return fmt.Sprintf("sha256:%x", w.diffHash.Sum(nil)) +} + +func maxFooterSize(blobSize int64, decompressors ...Decompressor) (res int64) { + for _, d := range decompressors { + if s := d.FooterSize(); res < s && s <= blobSize { + res = s + } + } + return +} + +func parseTOC(d Decompressor, sr *io.SectionReader, tocOff, tocSize int64, tocBytes []byte, opts openOpts) (*Reader, error) { + if len(tocBytes) > 0 { + start := time.Now() + toc, tocDgst, err := d.ParseTOC(bytes.NewReader(tocBytes)) + if err == nil { + if opts.telemetry != nil && opts.telemetry.DeserializeTocLatency != nil { + opts.telemetry.DeserializeTocLatency(start) + } + return &Reader{ + sr: sr, + toc: toc, + tocDigest: tocDgst, + decompressor: d, + }, nil + } + } + + start := time.Now() + tocBytes = make([]byte, tocSize) + if _, err := sr.ReadAt(tocBytes, tocOff); err != nil { + return nil, fmt.Errorf("error reading %d byte TOC targz: %v", len(tocBytes), err) + } + if opts.telemetry != nil && opts.telemetry.GetTocLatency != nil { + opts.telemetry.GetTocLatency(start) + } + start = time.Now() + toc, tocDgst, err := d.ParseTOC(bytes.NewReader(tocBytes)) + if err != nil { + return nil, err + } + if opts.telemetry != nil && opts.telemetry.DeserializeTocLatency != nil { + opts.telemetry.DeserializeTocLatency(start) + } + return &Reader{ + sr: sr, + toc: toc, + tocDigest: tocDgst, + decompressor: d, + }, nil +} + +func formatModtime(t time.Time) string { + if t.IsZero() || t.Unix() == 0 { + return "" + } + return t.UTC().Round(time.Second).Format(time.RFC3339) +} + +func cleanEntryName(name string) string { + // Use path.Clean to consistently deal with path separators across platforms. + return strings.TrimPrefix(path.Clean("/"+name), "/") +} + +// countWriter counts how many bytes have been written to its wrapped +// io.Writer. +type countWriter struct { + w io.Writer + n int64 +} + +func (cw *countWriter) Write(p []byte) (n int, err error) { + n, err = cw.w.Write(p) + cw.n += int64(n) + return +} + +// isGzip reports whether br is positioned right before an upcoming gzip stream. +// It does not consume any bytes from br. +func isGzip(br *bufio.Reader) bool { + const ( + gzipID1 = 0x1f + gzipID2 = 0x8b + gzipDeflate = 8 + ) + peek, _ := br.Peek(3) + return len(peek) >= 3 && peek[0] == gzipID1 && peek[1] == gzipID2 && peek[2] == gzipDeflate +} + +func positive(n int64) int64 { + if n < 0 { + return 0 + } + return n +} diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/go.mod b/vendor/github.com/containerd/stargz-snapshotter/estargz/go.mod new file mode 100644 index 00000000000..b82879fd764 --- /dev/null +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/go.mod @@ -0,0 +1,11 @@ +module github.com/containerd/stargz-snapshotter/estargz + +go 1.16 + +require ( + github.com/klauspost/compress v1.14.2 + github.com/opencontainers/go-digest v1.0.0 + github.com/pkg/errors v0.9.1 + github.com/vbatts/tar-split v0.11.2 + golang.org/x/sync v0.0.0-20201207232520-09787c993a3a +) diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/go.sum b/vendor/github.com/containerd/stargz-snapshotter/estargz/go.sum new file mode 100644 index 00000000000..20433e16bf8 --- /dev/null +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/go.sum @@ -0,0 +1,22 @@ +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/klauspost/compress v1.14.2 h1:S0OHlFk/Gbon/yauFJ4FfJJF5V0fc5HbBTJazi28pRw= +github.com/klauspost/compress v1.14.2/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/urfave/cli v1.22.4/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/vbatts/tar-split v0.11.2 h1:Via6XqJr0hceW4wff3QRzD5gAk/tatMw/4ZA7cTlIME= +github.com/vbatts/tar-split v0.11.2/go.mod h1:vV3ZuO2yWSVsz+pfFzDG/upWH1JhjOiEaWq6kXyQ3VI= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a h1:DcqTD9SDLc+1P/r1EmRBwnVsrOwW+kk2vWf9n+1sGhs= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go new file mode 100644 index 00000000000..7330849cb89 --- /dev/null +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go @@ -0,0 +1,238 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +/* + Copyright 2019 The Go Authors. All rights reserved. + Use of this source code is governed by a BSD-style + license that can be found in the LICENSE file. +*/ + +package estargz + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "encoding/binary" + "encoding/json" + "fmt" + "hash" + "io" + "strconv" + + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +type gzipCompression struct { + *GzipCompressor + *GzipDecompressor +} + +func newGzipCompressionWithLevel(level int) Compression { + return &gzipCompression{ + &GzipCompressor{level}, + &GzipDecompressor{}, + } +} + +func NewGzipCompressor() *GzipCompressor { + return &GzipCompressor{gzip.BestCompression} +} + +func NewGzipCompressorWithLevel(level int) *GzipCompressor { + return &GzipCompressor{level} +} + +type GzipCompressor struct { + compressionLevel int +} + +func (gc *GzipCompressor) Writer(w io.Writer) (io.WriteCloser, error) { + return gzip.NewWriterLevel(w, gc.compressionLevel) +} + +func (gc *GzipCompressor) WriteTOCAndFooter(w io.Writer, off int64, toc *JTOC, diffHash hash.Hash) (digest.Digest, error) { + tocJSON, err := json.MarshalIndent(toc, "", "\t") + if err != nil { + return "", err + } + gz, _ := gzip.NewWriterLevel(w, gc.compressionLevel) + gw := io.Writer(gz) + if diffHash != nil { + gw = io.MultiWriter(gz, diffHash) + } + tw := tar.NewWriter(gw) + if err := tw.WriteHeader(&tar.Header{ + Typeflag: tar.TypeReg, + Name: TOCTarName, + Size: int64(len(tocJSON)), + }); err != nil { + return "", err + } + if _, err := tw.Write(tocJSON); err != nil { + return "", err + } + + if err := tw.Close(); err != nil { + return "", err + } + if err := gz.Close(); err != nil { + return "", err + } + if _, err := w.Write(gzipFooterBytes(off)); err != nil { + return "", err + } + return digest.FromBytes(tocJSON), nil +} + +// gzipFooterBytes returns the 51 bytes footer. +func gzipFooterBytes(tocOff int64) []byte { + buf := bytes.NewBuffer(make([]byte, 0, FooterSize)) + gz, _ := gzip.NewWriterLevel(buf, gzip.NoCompression) // MUST be NoCompression to keep 51 bytes + + // Extra header indicating the offset of TOCJSON + // https://tools.ietf.org/html/rfc1952#section-2.3.1.1 + header := make([]byte, 4) + header[0], header[1] = 'S', 'G' + subfield := fmt.Sprintf("%016xSTARGZ", tocOff) + binary.LittleEndian.PutUint16(header[2:4], uint16(len(subfield))) // little-endian per RFC1952 + gz.Header.Extra = append(header, []byte(subfield)...) + gz.Close() + if buf.Len() != FooterSize { + panic(fmt.Sprintf("footer buffer = %d, not %d", buf.Len(), FooterSize)) + } + return buf.Bytes() +} + +type GzipDecompressor struct{} + +func (gz *GzipDecompressor) Reader(r io.Reader) (io.ReadCloser, error) { + return gzip.NewReader(r) +} + +func (gz *GzipDecompressor) ParseTOC(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error) { + return parseTOCEStargz(r) +} + +func (gz *GzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error) { + if len(p) != FooterSize { + return 0, 0, 0, fmt.Errorf("invalid length %d cannot be parsed", len(p)) + } + zr, err := gzip.NewReader(bytes.NewReader(p)) + if err != nil { + return 0, 0, 0, err + } + defer zr.Close() + extra := zr.Header.Extra + si1, si2, subfieldlen, subfield := extra[0], extra[1], extra[2:4], extra[4:] + if si1 != 'S' || si2 != 'G' { + return 0, 0, 0, fmt.Errorf("invalid subfield IDs: %q, %q; want E, S", si1, si2) + } + if slen := binary.LittleEndian.Uint16(subfieldlen); slen != uint16(16+len("STARGZ")) { + return 0, 0, 0, fmt.Errorf("invalid length of subfield %d; want %d", slen, 16+len("STARGZ")) + } + if string(subfield[16:]) != "STARGZ" { + return 0, 0, 0, fmt.Errorf("STARGZ magic string must be included in the footer subfield") + } + tocOffset, err = strconv.ParseInt(string(subfield[:16]), 16, 64) + if err != nil { + return 0, 0, 0, errors.Wrapf(err, "legacy: failed to parse toc offset") + } + return tocOffset, tocOffset, 0, nil +} + +func (gz *GzipDecompressor) FooterSize() int64 { + return FooterSize +} + +func (gz *GzipDecompressor) DecompressTOC(r io.Reader) (tocJSON io.ReadCloser, err error) { + return decompressTOCEStargz(r) +} + +type LegacyGzipDecompressor struct{} + +func (gz *LegacyGzipDecompressor) Reader(r io.Reader) (io.ReadCloser, error) { + return gzip.NewReader(r) +} + +func (gz *LegacyGzipDecompressor) ParseTOC(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error) { + return parseTOCEStargz(r) +} + +func (gz *LegacyGzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error) { + if len(p) != legacyFooterSize { + return 0, 0, 0, fmt.Errorf("legacy: invalid length %d cannot be parsed", len(p)) + } + zr, err := gzip.NewReader(bytes.NewReader(p)) + if err != nil { + return 0, 0, 0, errors.Wrapf(err, "legacy: failed to get footer gzip reader") + } + defer zr.Close() + extra := zr.Header.Extra + if len(extra) != 16+len("STARGZ") { + return 0, 0, 0, fmt.Errorf("legacy: invalid stargz's extra field size") + } + if string(extra[16:]) != "STARGZ" { + return 0, 0, 0, fmt.Errorf("legacy: magic string STARGZ not found") + } + tocOffset, err = strconv.ParseInt(string(extra[:16]), 16, 64) + if err != nil { + return 0, 0, 0, errors.Wrapf(err, "legacy: failed to parse toc offset") + } + return tocOffset, tocOffset, 0, nil +} + +func (gz *LegacyGzipDecompressor) FooterSize() int64 { + return legacyFooterSize +} + +func (gz *LegacyGzipDecompressor) DecompressTOC(r io.Reader) (tocJSON io.ReadCloser, err error) { + return decompressTOCEStargz(r) +} + +func parseTOCEStargz(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error) { + tr, err := decompressTOCEStargz(r) + if err != nil { + return nil, "", err + } + dgstr := digest.Canonical.Digester() + toc = new(JTOC) + if err := json.NewDecoder(io.TeeReader(tr, dgstr.Hash())).Decode(&toc); err != nil { + return nil, "", fmt.Errorf("error decoding TOC JSON: %v", err) + } + if err := tr.Close(); err != nil { + return nil, "", err + } + return toc, dgstr.Digest(), nil +} + +func decompressTOCEStargz(r io.Reader) (tocJSON io.ReadCloser, err error) { + zr, err := gzip.NewReader(r) + if err != nil { + return nil, fmt.Errorf("malformed TOC gzip header: %v", err) + } + zr.Multistream(false) + tr := tar.NewReader(zr) + h, err := tr.Next() + if err != nil { + return nil, fmt.Errorf("failed to find tar header in TOC gzip stream: %v", err) + } + if h.Name != TOCTarName { + return nil, fmt.Errorf("TOC tar entry had name %q; expected %q", h.Name, TOCTarName) + } + return readCloser{tr, zr.Close}, nil +} diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go new file mode 100644 index 00000000000..9224e456dde --- /dev/null +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go @@ -0,0 +1,2009 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +/* + Copyright 2019 The Go Authors. All rights reserved. + Use of this source code is governed by a BSD-style + license that can be found in the LICENSE file. +*/ + +package estargz + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "crypto/sha256" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "reflect" + "sort" + "strings" + "testing" + "time" + + "github.com/containerd/stargz-snapshotter/estargz/errorutil" + "github.com/klauspost/compress/zstd" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +// TestingController is Compression with some helper methods necessary for testing. +type TestingController interface { + Compression + CountStreams(*testing.T, []byte) int + DiffIDOf(*testing.T, []byte) string + String() string +} + +// CompressionTestSuite tests this pkg with controllers can build valid eStargz blobs and parse them. +func CompressionTestSuite(t *testing.T, controllers ...TestingController) { + t.Run("testBuild", func(t *testing.T) { t.Parallel(); testBuild(t, controllers...) }) + t.Run("testDigestAndVerify", func(t *testing.T) { t.Parallel(); testDigestAndVerify(t, controllers...) }) + t.Run("testWriteAndOpen", func(t *testing.T) { t.Parallel(); testWriteAndOpen(t, controllers...) }) +} + +const ( + uncompressedType int = iota + gzipType + zstdType +) + +var srcCompressions = []int{ + uncompressedType, + gzipType, + zstdType, +} + +var allowedPrefix = [4]string{"", "./", "/", "../"} + +// testBuild tests the resulting stargz blob built by this pkg has the same +// contents as the normal stargz blob. +func testBuild(t *testing.T, controllers ...TestingController) { + tests := []struct { + name string + chunkSize int + in []tarEntry + }{ + { + name: "regfiles and directories", + chunkSize: 4, + in: tarOf( + file("foo", "test1"), + dir("foo2/"), + file("foo2/bar", "test2", xAttr(map[string]string{"test": "sample"})), + ), + }, + { + name: "empty files", + chunkSize: 4, + in: tarOf( + file("foo", "tttttt"), + file("foo_empty", ""), + file("foo2", "tttttt"), + file("foo_empty2", ""), + file("foo3", "tttttt"), + file("foo_empty3", ""), + file("foo4", "tttttt"), + file("foo_empty4", ""), + file("foo5", "tttttt"), + file("foo_empty5", ""), + file("foo6", "tttttt"), + ), + }, + { + name: "various files", + chunkSize: 4, + in: tarOf( + file("baz.txt", "bazbazbazbazbazbazbaz"), + file("foo.txt", "a"), + symlink("barlink", "test/bar.txt"), + dir("test/"), + dir("dev/"), + blockdev("dev/testblock", 3, 4), + fifo("dev/testfifo"), + chardev("dev/testchar1", 5, 6), + file("test/bar.txt", "testbartestbar", xAttr(map[string]string{"test2": "sample2"})), + dir("test2/"), + link("test2/bazlink", "baz.txt"), + chardev("dev/testchar2", 1, 2), + ), + }, + { + name: "no contents", + chunkSize: 4, + in: tarOf( + file("baz.txt", ""), + symlink("barlink", "test/bar.txt"), + dir("test/"), + dir("dev/"), + blockdev("dev/testblock", 3, 4), + fifo("dev/testfifo"), + chardev("dev/testchar1", 5, 6), + file("test/bar.txt", "", xAttr(map[string]string{"test2": "sample2"})), + dir("test2/"), + link("test2/bazlink", "baz.txt"), + chardev("dev/testchar2", 1, 2), + ), + }, + } + for _, tt := range tests { + for _, srcCompression := range srcCompressions { + srcCompression := srcCompression + for _, cl := range controllers { + cl := cl + for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} { + srcTarFormat := srcTarFormat + for _, prefix := range allowedPrefix { + prefix := prefix + t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,src=%d,format=%s", cl, prefix, srcCompression, srcTarFormat), func(t *testing.T) { + tarBlob := buildTar(t, tt.in, prefix, srcTarFormat) + // Test divideEntries() + entries, err := sortEntries(tarBlob, nil, nil) // identical order + if err != nil { + t.Fatalf("failed to parse tar: %v", err) + } + var merged []*entry + for _, part := range divideEntries(entries, 4) { + merged = append(merged, part...) + } + if !reflect.DeepEqual(entries, merged) { + for _, e := range entries { + t.Logf("Original: %v", e.header) + } + for _, e := range merged { + t.Logf("Merged: %v", e.header) + } + t.Errorf("divided entries couldn't be merged") + return + } + + // Prepare sample data + wantBuf := new(bytes.Buffer) + sw := NewWriterWithCompressor(wantBuf, cl) + sw.ChunkSize = tt.chunkSize + if err := sw.AppendTar(tarBlob); err != nil { + t.Fatalf("failed to append tar to want stargz: %v", err) + } + if _, err := sw.Close(); err != nil { + t.Fatalf("failed to prepare want stargz: %v", err) + } + wantData := wantBuf.Bytes() + want, err := Open(io.NewSectionReader( + bytes.NewReader(wantData), 0, int64(len(wantData))), + WithDecompressors(cl), + ) + if err != nil { + t.Fatalf("failed to parse the want stargz: %v", err) + } + + // Prepare testing data + rc, err := Build(compressBlob(t, tarBlob, srcCompression), + WithChunkSize(tt.chunkSize), WithCompression(cl)) + if err != nil { + t.Fatalf("failed to build stargz: %v", err) + } + defer rc.Close() + gotBuf := new(bytes.Buffer) + if _, err := io.Copy(gotBuf, rc); err != nil { + t.Fatalf("failed to copy built stargz blob: %v", err) + } + gotData := gotBuf.Bytes() + got, err := Open(io.NewSectionReader( + bytes.NewReader(gotBuf.Bytes()), 0, int64(len(gotData))), + WithDecompressors(cl), + ) + if err != nil { + t.Fatalf("failed to parse the got stargz: %v", err) + } + + // Check DiffID is properly calculated + rc.Close() + diffID := rc.DiffID() + wantDiffID := cl.DiffIDOf(t, gotData) + if diffID.String() != wantDiffID { + t.Errorf("DiffID = %q; want %q", diffID, wantDiffID) + } + + // Compare as stargz + if !isSameVersion(t, cl, wantData, gotData) { + t.Errorf("built stargz hasn't same json") + return + } + if !isSameEntries(t, want, got) { + t.Errorf("built stargz isn't same as the original") + return + } + + // Compare as tar.gz + if !isSameTarGz(t, cl, wantData, gotData) { + t.Errorf("built stargz isn't same tar.gz") + return + } + }) + } + } + } + } + } +} + +func isSameTarGz(t *testing.T, controller TestingController, a, b []byte) bool { + aGz, err := controller.Reader(bytes.NewReader(a)) + if err != nil { + t.Fatalf("failed to read A") + } + defer aGz.Close() + bGz, err := controller.Reader(bytes.NewReader(b)) + if err != nil { + t.Fatalf("failed to read B") + } + defer bGz.Close() + + // Same as tar's Next() method but ignores landmarks and TOCJSON file + next := func(r *tar.Reader) (h *tar.Header, err error) { + for { + if h, err = r.Next(); err != nil { + return + } + if h.Name != PrefetchLandmark && + h.Name != NoPrefetchLandmark && + h.Name != TOCTarName { + return + } + } + } + + aTar := tar.NewReader(aGz) + bTar := tar.NewReader(bGz) + for { + // Fetch and parse next header. + aH, aErr := next(aTar) + bH, bErr := next(bTar) + if aErr != nil || bErr != nil { + if aErr == io.EOF && bErr == io.EOF { + break + } + t.Fatalf("Failed to parse tar file: A: %v, B: %v", aErr, bErr) + } + if !reflect.DeepEqual(aH, bH) { + t.Logf("different header (A = %v; B = %v)", aH, bH) + return false + + } + aFile, err := ioutil.ReadAll(aTar) + if err != nil { + t.Fatal("failed to read tar payload of A") + } + bFile, err := ioutil.ReadAll(bTar) + if err != nil { + t.Fatal("failed to read tar payload of B") + } + if !bytes.Equal(aFile, bFile) { + t.Logf("different tar payload (A = %q; B = %q)", string(a), string(b)) + return false + } + } + + return true +} + +func isSameVersion(t *testing.T, controller TestingController, a, b []byte) bool { + aJTOC, _, err := parseStargz(io.NewSectionReader(bytes.NewReader(a), 0, int64(len(a))), controller) + if err != nil { + t.Fatalf("failed to parse A: %v", err) + } + bJTOC, _, err := parseStargz(io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))), controller) + if err != nil { + t.Fatalf("failed to parse B: %v", err) + } + t.Logf("A: TOCJSON: %v", dumpTOCJSON(t, aJTOC)) + t.Logf("B: TOCJSON: %v", dumpTOCJSON(t, bJTOC)) + return aJTOC.Version == bJTOC.Version +} + +func isSameEntries(t *testing.T, a, b *Reader) bool { + aroot, ok := a.Lookup("") + if !ok { + t.Fatalf("failed to get root of A") + } + broot, ok := b.Lookup("") + if !ok { + t.Fatalf("failed to get root of B") + } + aEntry := stargzEntry{aroot, a} + bEntry := stargzEntry{broot, b} + return contains(t, aEntry, bEntry) && contains(t, bEntry, aEntry) +} + +func compressBlob(t *testing.T, src *io.SectionReader, srcCompression int) *io.SectionReader { + buf := new(bytes.Buffer) + var w io.WriteCloser + var err error + if srcCompression == gzipType { + w = gzip.NewWriter(buf) + } else if srcCompression == zstdType { + w, err = zstd.NewWriter(buf) + if err != nil { + t.Fatalf("failed to init zstd writer: %v", err) + } + } else { + return src + } + src.Seek(0, io.SeekStart) + if _, err := io.Copy(w, src); err != nil { + t.Fatalf("failed to compress source") + } + if err := w.Close(); err != nil { + t.Fatalf("failed to finalize compress source") + } + data := buf.Bytes() + return io.NewSectionReader(bytes.NewReader(data), 0, int64(len(data))) + +} + +type stargzEntry struct { + e *TOCEntry + r *Reader +} + +// contains checks if all child entries in "b" are also contained in "a". +// This function also checks if the files/chunks contain the same contents among "a" and "b". +func contains(t *testing.T, a, b stargzEntry) bool { + ae, ar := a.e, a.r + be, br := b.e, b.r + t.Logf("Comparing: %q vs %q", ae.Name, be.Name) + if !equalEntry(ae, be) { + t.Logf("%q != %q: entry: a: %v, b: %v", ae.Name, be.Name, ae, be) + return false + } + if ae.Type == "dir" { + t.Logf("Directory: %q vs %q: %v vs %v", ae.Name, be.Name, + allChildrenName(ae), allChildrenName(be)) + iscontain := true + ae.ForeachChild(func(aBaseName string, aChild *TOCEntry) bool { + // Walk through all files on this stargz file. + + if aChild.Name == PrefetchLandmark || + aChild.Name == NoPrefetchLandmark { + return true // Ignore landmarks + } + + // Ignore a TOCEntry of "./" (formated as "" by stargz lib) on root directory + // because this points to the root directory itself. + if aChild.Name == "" && ae.Name == "" { + return true + } + + bChild, ok := be.LookupChild(aBaseName) + if !ok { + t.Logf("%q (base: %q): not found in b: %v", + ae.Name, aBaseName, allChildrenName(be)) + iscontain = false + return false + } + + childcontain := contains(t, stargzEntry{aChild, a.r}, stargzEntry{bChild, b.r}) + if !childcontain { + t.Logf("%q != %q: non-equal dir", ae.Name, be.Name) + iscontain = false + return false + } + return true + }) + return iscontain + } else if ae.Type == "reg" { + af, err := ar.OpenFile(ae.Name) + if err != nil { + t.Fatalf("failed to open file %q on A: %v", ae.Name, err) + } + bf, err := br.OpenFile(be.Name) + if err != nil { + t.Fatalf("failed to open file %q on B: %v", be.Name, err) + } + + var nr int64 + for nr < ae.Size { + abytes, anext, aok := readOffset(t, af, nr, a) + bbytes, bnext, bok := readOffset(t, bf, nr, b) + if !aok && !bok { + break + } else if !(aok && bok) || anext != bnext { + t.Logf("%q != %q (offset=%d): chunk existence a=%v vs b=%v, anext=%v vs bnext=%v", + ae.Name, be.Name, nr, aok, bok, anext, bnext) + return false + } + nr = anext + if !bytes.Equal(abytes, bbytes) { + t.Logf("%q != %q: different contents %v vs %v", + ae.Name, be.Name, string(abytes), string(bbytes)) + return false + } + } + return true + } + + return true +} + +func allChildrenName(e *TOCEntry) (children []string) { + e.ForeachChild(func(baseName string, _ *TOCEntry) bool { + children = append(children, baseName) + return true + }) + return +} + +func equalEntry(a, b *TOCEntry) bool { + // Here, we selectively compare fileds that we are interested in. + return a.Name == b.Name && + a.Type == b.Type && + a.Size == b.Size && + a.ModTime3339 == b.ModTime3339 && + a.Stat().ModTime().Equal(b.Stat().ModTime()) && // modTime time.Time + a.LinkName == b.LinkName && + a.Mode == b.Mode && + a.UID == b.UID && + a.GID == b.GID && + a.Uname == b.Uname && + a.Gname == b.Gname && + (a.Offset > 0) == (b.Offset > 0) && + (a.NextOffset() > 0) == (b.NextOffset() > 0) && + a.DevMajor == b.DevMajor && + a.DevMinor == b.DevMinor && + a.NumLink == b.NumLink && + reflect.DeepEqual(a.Xattrs, b.Xattrs) && + // chunk-related infomations aren't compared in this function. + // ChunkOffset int64 `json:"chunkOffset,omitempty"` + // ChunkSize int64 `json:"chunkSize,omitempty"` + // children map[string]*TOCEntry + a.Digest == b.Digest +} + +func readOffset(t *testing.T, r *io.SectionReader, offset int64, e stargzEntry) ([]byte, int64, bool) { + ce, ok := e.r.ChunkEntryForOffset(e.e.Name, offset) + if !ok { + return nil, 0, false + } + data := make([]byte, ce.ChunkSize) + t.Logf("Offset: %v, NextOffset: %v", ce.Offset, ce.NextOffset()) + n, err := r.ReadAt(data, ce.ChunkOffset) + if err != nil { + t.Fatalf("failed to read file payload of %q (offset:%d,size:%d): %v", + e.e.Name, ce.ChunkOffset, ce.ChunkSize, err) + } + if int64(n) != ce.ChunkSize { + t.Fatalf("unexpected copied data size %d; want %d", + n, ce.ChunkSize) + } + return data[:n], offset + ce.ChunkSize, true +} + +func dumpTOCJSON(t *testing.T, tocJSON *JTOC) string { + jtocData, err := json.Marshal(*tocJSON) + if err != nil { + t.Fatalf("failed to marshal TOC JSON: %v", err) + } + buf := new(bytes.Buffer) + if _, err := io.Copy(buf, bytes.NewReader(jtocData)); err != nil { + t.Fatalf("failed to read toc json blob: %v", err) + } + return buf.String() +} + +const chunkSize = 3 + +// type check func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, compressionLevel int) +type check func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController) + +// testDigestAndVerify runs specified checks against sample stargz blobs. +func testDigestAndVerify(t *testing.T, controllers ...TestingController) { + tests := []struct { + name string + tarInit func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) + checks []check + }{ + { + name: "no-regfile", + tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) { + return tarOf( + dir("test/"), + ) + }, + checks: []check{ + checkStargzTOC, + checkVerifyTOC, + checkVerifyInvalidStargzFail(buildTar(t, tarOf( + dir("test2/"), // modified + ), allowedPrefix[0])), + }, + }, + { + name: "small-files", + tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) { + return tarOf( + regDigest(t, "baz.txt", "", dgstMap), + regDigest(t, "foo.txt", "a", dgstMap), + dir("test/"), + regDigest(t, "test/bar.txt", "bbb", dgstMap), + ) + }, + checks: []check{ + checkStargzTOC, + checkVerifyTOC, + checkVerifyInvalidStargzFail(buildTar(t, tarOf( + file("baz.txt", ""), + file("foo.txt", "M"), // modified + dir("test/"), + file("test/bar.txt", "bbb"), + ), allowedPrefix[0])), + // checkVerifyInvalidTOCEntryFail("foo.txt"), // TODO + checkVerifyBrokenContentFail("foo.txt"), + }, + }, + { + name: "big-files", + tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) { + return tarOf( + regDigest(t, "baz.txt", "bazbazbazbazbazbazbaz", dgstMap), + regDigest(t, "foo.txt", "a", dgstMap), + dir("test/"), + regDigest(t, "test/bar.txt", "testbartestbar", dgstMap), + ) + }, + checks: []check{ + checkStargzTOC, + checkVerifyTOC, + checkVerifyInvalidStargzFail(buildTar(t, tarOf( + file("baz.txt", "bazbazbazMMMbazbazbaz"), // modified + file("foo.txt", "a"), + dir("test/"), + file("test/bar.txt", "testbartestbar"), + ), allowedPrefix[0])), + checkVerifyInvalidTOCEntryFail("test/bar.txt"), + checkVerifyBrokenContentFail("test/bar.txt"), + }, + }, + { + name: "with-non-regfiles", + tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) { + return tarOf( + regDigest(t, "baz.txt", "bazbazbazbazbazbazbaz", dgstMap), + regDigest(t, "foo.txt", "a", dgstMap), + symlink("barlink", "test/bar.txt"), + dir("test/"), + regDigest(t, "test/bar.txt", "testbartestbar", dgstMap), + dir("test2/"), + link("test2/bazlink", "baz.txt"), + ) + }, + checks: []check{ + checkStargzTOC, + checkVerifyTOC, + checkVerifyInvalidStargzFail(buildTar(t, tarOf( + file("baz.txt", "bazbazbazbazbazbazbaz"), + file("foo.txt", "a"), + symlink("barlink", "test/bar.txt"), + dir("test/"), + file("test/bar.txt", "testbartestbar"), + dir("test2/"), + link("test2/bazlink", "foo.txt"), // modified + ), allowedPrefix[0])), + checkVerifyInvalidTOCEntryFail("test/bar.txt"), + checkVerifyBrokenContentFail("test/bar.txt"), + }, + }, + } + + for _, tt := range tests { + for _, srcCompression := range srcCompressions { + srcCompression := srcCompression + for _, cl := range controllers { + cl := cl + for _, prefix := range allowedPrefix { + prefix := prefix + for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} { + srcTarFormat := srcTarFormat + t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,format=%s", cl, prefix, srcTarFormat), func(t *testing.T) { + // Get original tar file and chunk digests + dgstMap := make(map[string]digest.Digest) + tarBlob := buildTar(t, tt.tarInit(t, dgstMap), prefix, srcTarFormat) + + rc, err := Build(compressBlob(t, tarBlob, srcCompression), + WithChunkSize(chunkSize), WithCompression(cl)) + if err != nil { + t.Fatalf("failed to convert stargz: %v", err) + } + tocDigest := rc.TOCDigest() + defer rc.Close() + buf := new(bytes.Buffer) + if _, err := io.Copy(buf, rc); err != nil { + t.Fatalf("failed to copy built stargz blob: %v", err) + } + newStargz := buf.Bytes() + // NoPrefetchLandmark is added during `Bulid`, which is expected behaviour. + dgstMap[chunkID(NoPrefetchLandmark, 0, int64(len([]byte{landmarkContents})))] = digest.FromBytes([]byte{landmarkContents}) + + for _, check := range tt.checks { + check(t, newStargz, tocDigest, dgstMap, cl) + } + }) + } + } + } + } + } +} + +// checkStargzTOC checks the TOC JSON of the passed stargz has the expected +// digest and contains valid chunks. It walks all entries in the stargz and +// checks all chunk digests stored to the TOC JSON match the actual contents. +func checkStargzTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController) { + sgz, err := Open( + io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), + WithDecompressors(controller), + ) + if err != nil { + t.Errorf("failed to parse converted stargz: %v", err) + return + } + digestMapTOC, err := listDigests(io.NewSectionReader( + bytes.NewReader(sgzData), 0, int64(len(sgzData))), + controller, + ) + if err != nil { + t.Fatalf("failed to list digest: %v", err) + } + found := make(map[string]bool) + for id := range dgstMap { + found[id] = false + } + zr, err := controller.Reader(bytes.NewReader(sgzData)) + if err != nil { + t.Fatalf("failed to decompress converted stargz: %v", err) + } + defer zr.Close() + tr := tar.NewReader(zr) + for { + h, err := tr.Next() + if err != nil { + if err != io.EOF { + t.Errorf("failed to read tar entry: %v", err) + return + } + break + } + if h.Name == TOCTarName { + // Check the digest of TOC JSON based on the actual contents + // It's sure that TOC JSON exists in this archive because + // Open succeeded. + dgstr := digest.Canonical.Digester() + if _, err := io.Copy(dgstr.Hash(), tr); err != nil { + t.Fatalf("failed to calculate digest of TOC JSON: %v", + err) + } + if dgstr.Digest() != tocDigest { + t.Errorf("invalid TOC JSON %q; want %q", tocDigest, dgstr.Digest()) + } + continue + } + if _, ok := sgz.Lookup(h.Name); !ok { + t.Errorf("lost stargz entry %q in the converted TOC", h.Name) + return + } + var n int64 + for n < h.Size { + ce, ok := sgz.ChunkEntryForOffset(h.Name, n) + if !ok { + t.Errorf("lost chunk %q(offset=%d) in the converted TOC", + h.Name, n) + return + } + + // Get the original digest to make sure the file contents are kept unchanged + // from the original tar, during the whole conversion steps. + id := chunkID(h.Name, n, ce.ChunkSize) + want, ok := dgstMap[id] + if !ok { + t.Errorf("Unexpected chunk %q(offset=%d,size=%d): %v", + h.Name, n, ce.ChunkSize, dgstMap) + return + } + found[id] = true + + // Check the file contents + dgstr := digest.Canonical.Digester() + if _, err := io.CopyN(dgstr.Hash(), tr, ce.ChunkSize); err != nil { + t.Fatalf("failed to calculate digest of %q (offset=%d,size=%d)", + h.Name, n, ce.ChunkSize) + } + if want != dgstr.Digest() { + t.Errorf("Invalid contents in converted stargz %q: %q; want %q", + h.Name, dgstr.Digest(), want) + return + } + + // Check the digest stored in TOC JSON + dgstTOC, ok := digestMapTOC[ce.Offset] + if !ok { + t.Errorf("digest of %q(offset=%d,size=%d,chunkOffset=%d) isn't registered", + h.Name, ce.Offset, ce.ChunkSize, ce.ChunkOffset) + } + if want != dgstTOC { + t.Errorf("Invalid digest in TOCEntry %q: %q; want %q", + h.Name, dgstTOC, want) + return + } + + n += ce.ChunkSize + } + } + + for id, ok := range found { + if !ok { + t.Errorf("required chunk %q not found in the converted stargz: %v", id, found) + } + } +} + +// checkVerifyTOC checks the verification works for the TOC JSON of the passed +// stargz. It walks all entries in the stargz and checks the verifications for +// all chunks work. +func checkVerifyTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController) { + sgz, err := Open( + io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), + WithDecompressors(controller), + ) + if err != nil { + t.Errorf("failed to parse converted stargz: %v", err) + return + } + ev, err := sgz.VerifyTOC(tocDigest) + if err != nil { + t.Errorf("failed to verify stargz: %v", err) + return + } + + found := make(map[string]bool) + for id := range dgstMap { + found[id] = false + } + zr, err := controller.Reader(bytes.NewReader(sgzData)) + if err != nil { + t.Fatalf("failed to decompress converted stargz: %v", err) + } + defer zr.Close() + tr := tar.NewReader(zr) + for { + h, err := tr.Next() + if err != nil { + if err != io.EOF { + t.Errorf("failed to read tar entry: %v", err) + return + } + break + } + if h.Name == TOCTarName { + continue + } + if _, ok := sgz.Lookup(h.Name); !ok { + t.Errorf("lost stargz entry %q in the converted TOC", h.Name) + return + } + var n int64 + for n < h.Size { + ce, ok := sgz.ChunkEntryForOffset(h.Name, n) + if !ok { + t.Errorf("lost chunk %q(offset=%d) in the converted TOC", + h.Name, n) + return + } + + v, err := ev.Verifier(ce) + if err != nil { + t.Errorf("failed to get verifier for %q(offset=%d)", h.Name, n) + } + + found[chunkID(h.Name, n, ce.ChunkSize)] = true + + // Check the file contents + if _, err := io.CopyN(v, tr, ce.ChunkSize); err != nil { + t.Fatalf("failed to get chunk of %q (offset=%d,size=%d)", + h.Name, n, ce.ChunkSize) + } + if !v.Verified() { + t.Errorf("Invalid contents in converted stargz %q (should be succeeded)", + h.Name) + return + } + n += ce.ChunkSize + } + } + + for id, ok := range found { + if !ok { + t.Errorf("required chunk %q not found in the converted stargz: %v", id, found) + } + } +} + +// checkVerifyInvalidTOCEntryFail checks if misconfigured TOC JSON can be +// detected during the verification and the verification returns an error. +func checkVerifyInvalidTOCEntryFail(filename string) check { + return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController) { + funcs := map[string]rewriteFunc{ + "lost digest in a entry": func(t *testing.T, toc *JTOC, sgz *io.SectionReader) { + var found bool + for _, e := range toc.Entries { + if cleanEntryName(e.Name) == filename { + if e.Type != "reg" && e.Type != "chunk" { + t.Fatalf("entry %q to break must be regfile or chunk", filename) + } + if e.ChunkDigest == "" { + t.Fatalf("entry %q is already invalid", filename) + } + e.ChunkDigest = "" + found = true + } + } + if !found { + t.Fatalf("rewrite target not found") + } + }, + "duplicated entry offset": func(t *testing.T, toc *JTOC, sgz *io.SectionReader) { + var ( + sampleEntry *TOCEntry + targetEntry *TOCEntry + ) + for _, e := range toc.Entries { + if e.Type == "reg" || e.Type == "chunk" { + if cleanEntryName(e.Name) == filename { + targetEntry = e + } else { + sampleEntry = e + } + } + } + if sampleEntry == nil { + t.Fatalf("TOC must contain at least one regfile or chunk entry other than the rewrite target") + } + if targetEntry == nil { + t.Fatalf("rewrite target not found") + } + targetEntry.Offset = sampleEntry.Offset + }, + } + + for name, rFunc := range funcs { + t.Run(name, func(t *testing.T) { + newSgz, newTocDigest := rewriteTOCJSON(t, io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), rFunc, controller) + buf := new(bytes.Buffer) + if _, err := io.Copy(buf, newSgz); err != nil { + t.Fatalf("failed to get converted stargz") + } + isgz := buf.Bytes() + + sgz, err := Open( + io.NewSectionReader(bytes.NewReader(isgz), 0, int64(len(isgz))), + WithDecompressors(controller), + ) + if err != nil { + t.Fatalf("failed to parse converted stargz: %v", err) + return + } + _, err = sgz.VerifyTOC(newTocDigest) + if err == nil { + t.Errorf("must fail for invalid TOC") + return + } + }) + } + } +} + +// checkVerifyInvalidStargzFail checks if the verification detects that the +// given stargz file doesn't match to the expected digest and returns error. +func checkVerifyInvalidStargzFail(invalid *io.SectionReader) check { + return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController) { + rc, err := Build(invalid, WithChunkSize(chunkSize), WithCompression(controller)) + if err != nil { + t.Fatalf("failed to convert stargz: %v", err) + } + defer rc.Close() + buf := new(bytes.Buffer) + if _, err := io.Copy(buf, rc); err != nil { + t.Fatalf("failed to copy built stargz blob: %v", err) + } + mStargz := buf.Bytes() + + sgz, err := Open( + io.NewSectionReader(bytes.NewReader(mStargz), 0, int64(len(mStargz))), + WithDecompressors(controller), + ) + if err != nil { + t.Fatalf("failed to parse converted stargz: %v", err) + return + } + _, err = sgz.VerifyTOC(tocDigest) + if err == nil { + t.Errorf("must fail for invalid TOC") + return + } + } +} + +// checkVerifyBrokenContentFail checks if the verifier detects broken contents +// that doesn't match to the expected digest and returns error. +func checkVerifyBrokenContentFail(filename string) check { + return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController) { + // Parse stargz file + sgz, err := Open( + io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), + WithDecompressors(controller), + ) + if err != nil { + t.Fatalf("failed to parse converted stargz: %v", err) + return + } + ev, err := sgz.VerifyTOC(tocDigest) + if err != nil { + t.Fatalf("failed to verify stargz: %v", err) + return + } + + // Open the target file + sr, err := sgz.OpenFile(filename) + if err != nil { + t.Fatalf("failed to open file %q", filename) + } + ce, ok := sgz.ChunkEntryForOffset(filename, 0) + if !ok { + t.Fatalf("lost chunk %q(offset=%d) in the converted TOC", filename, 0) + return + } + if ce.ChunkSize == 0 { + t.Fatalf("file mustn't be empty") + return + } + data := make([]byte, ce.ChunkSize) + if _, err := sr.ReadAt(data, ce.ChunkOffset); err != nil { + t.Errorf("failed to get data of a chunk of %q(offset=%q)", + filename, ce.ChunkOffset) + } + + // Check the broken chunk (must fail) + v, err := ev.Verifier(ce) + if err != nil { + t.Fatalf("failed to get verifier for %q", filename) + } + broken := append([]byte{^data[0]}, data[1:]...) + if _, err := io.CopyN(v, bytes.NewReader(broken), ce.ChunkSize); err != nil { + t.Fatalf("failed to get chunk of %q (offset=%d,size=%d)", + filename, ce.ChunkOffset, ce.ChunkSize) + } + if v.Verified() { + t.Errorf("verification must fail for broken file chunk %q(org:%q,broken:%q)", + filename, data, broken) + } + } +} + +func chunkID(name string, offset, size int64) string { + return fmt.Sprintf("%s-%d-%d", cleanEntryName(name), offset, size) +} + +type rewriteFunc func(t *testing.T, toc *JTOC, sgz *io.SectionReader) + +func rewriteTOCJSON(t *testing.T, sgz *io.SectionReader, rewrite rewriteFunc, controller TestingController) (newSgz io.Reader, tocDigest digest.Digest) { + decodedJTOC, jtocOffset, err := parseStargz(sgz, controller) + if err != nil { + t.Fatalf("failed to extract TOC JSON: %v", err) + } + + rewrite(t, decodedJTOC, sgz) + + tocFooter, tocDigest, err := tocAndFooter(controller, decodedJTOC, jtocOffset) + if err != nil { + t.Fatalf("failed to create toc and footer: %v", err) + } + + // Reconstruct stargz file with the modified TOC JSON + if _, err := sgz.Seek(0, io.SeekStart); err != nil { + t.Fatalf("failed to reset the seek position of stargz: %v", err) + } + return io.MultiReader( + io.LimitReader(sgz, jtocOffset), // Original stargz (before TOC JSON) + tocFooter, // Rewritten TOC and footer + ), tocDigest +} + +func listDigests(sgz *io.SectionReader, controller TestingController) (map[int64]digest.Digest, error) { + decodedJTOC, _, err := parseStargz(sgz, controller) + if err != nil { + return nil, err + } + digestMap := make(map[int64]digest.Digest) + for _, e := range decodedJTOC.Entries { + if e.Type == "reg" || e.Type == "chunk" { + if e.Type == "reg" && e.Size == 0 { + continue // ignores empty file + } + if e.ChunkDigest == "" { + return nil, fmt.Errorf("ChunkDigest of %q(off=%d) not found in TOC JSON", + e.Name, e.Offset) + } + d, err := digest.Parse(e.ChunkDigest) + if err != nil { + return nil, err + } + digestMap[e.Offset] = d + } + } + return digestMap, nil +} + +func parseStargz(sgz *io.SectionReader, controller TestingController) (decodedJTOC *JTOC, jtocOffset int64, err error) { + fSize := controller.FooterSize() + footer := make([]byte, fSize) + if _, err := sgz.ReadAt(footer, sgz.Size()-fSize); err != nil { + return nil, 0, errors.Wrap(err, "error reading footer") + } + _, tocOffset, _, err := controller.ParseFooter(footer[positive(int64(len(footer))-fSize):]) + if err != nil { + return nil, 0, errors.Wrapf(err, "failed to parse footer") + } + + // Decode the TOC JSON + tocReader := io.NewSectionReader(sgz, tocOffset, sgz.Size()-tocOffset-fSize) + decodedJTOC, _, err = controller.ParseTOC(tocReader) + if err != nil { + return nil, 0, errors.Wrap(err, "failed to parse TOC") + } + return decodedJTOC, tocOffset, nil +} + +func testWriteAndOpen(t *testing.T, controllers ...TestingController) { + const content = "Some contents" + invalidUtf8 := "\xff\xfe\xfd" + + xAttrFile := xAttr{"foo": "bar", "invalid-utf8": invalidUtf8} + sampleOwner := owner{uid: 50, gid: 100} + + tests := []struct { + name string + chunkSize int + in []tarEntry + want []stargzCheck + wantNumGz int // expected number of streams + + wantNumGzLossLess int // expected number of streams (> 0) in lossless mode if it's different from wantNumGz + wantFailOnLossLess bool + }{ + { + name: "empty", + in: tarOf(), + wantNumGz: 2, // empty tar + TOC + footer + wantNumGzLossLess: 3, // empty tar + TOC + footer + want: checks( + numTOCEntries(0), + ), + }, + { + name: "1dir_1empty_file", + in: tarOf( + dir("foo/"), + file("foo/bar.txt", ""), + ), + wantNumGz: 3, // dir, TOC, footer + want: checks( + numTOCEntries(2), + hasDir("foo/"), + hasFileLen("foo/bar.txt", 0), + entryHasChildren("foo", "bar.txt"), + hasFileDigest("foo/bar.txt", digestFor("")), + ), + }, + { + name: "1dir_1file", + in: tarOf( + dir("foo/"), + file("foo/bar.txt", content, xAttrFile), + ), + wantNumGz: 4, // var dir, foo.txt alone, TOC, footer + want: checks( + numTOCEntries(2), + hasDir("foo/"), + hasFileLen("foo/bar.txt", len(content)), + hasFileDigest("foo/bar.txt", digestFor(content)), + hasFileContentsRange("foo/bar.txt", 0, content), + hasFileContentsRange("foo/bar.txt", 1, content[1:]), + entryHasChildren("", "foo"), + entryHasChildren("foo", "bar.txt"), + hasFileXattrs("foo/bar.txt", "foo", "bar"), + hasFileXattrs("foo/bar.txt", "invalid-utf8", invalidUtf8), + ), + }, + { + name: "2meta_2file", + in: tarOf( + dir("bar/", sampleOwner), + dir("foo/", sampleOwner), + file("foo/bar.txt", content, sampleOwner), + ), + wantNumGz: 4, // both dirs, foo.txt alone, TOC, footer + want: checks( + numTOCEntries(3), + hasDir("bar/"), + hasDir("foo/"), + hasFileLen("foo/bar.txt", len(content)), + entryHasChildren("", "bar", "foo"), + entryHasChildren("foo", "bar.txt"), + hasChunkEntries("foo/bar.txt", 1), + hasEntryOwner("bar/", sampleOwner), + hasEntryOwner("foo/", sampleOwner), + hasEntryOwner("foo/bar.txt", sampleOwner), + ), + }, + { + name: "3dir", + in: tarOf( + dir("bar/"), + dir("foo/"), + dir("foo/bar/"), + ), + wantNumGz: 3, // 3 dirs, TOC, footer + want: checks( + hasDirLinkCount("bar/", 2), + hasDirLinkCount("foo/", 3), + hasDirLinkCount("foo/bar/", 2), + ), + }, + { + name: "symlink", + in: tarOf( + dir("foo/"), + symlink("foo/bar", "../../x"), + ), + wantNumGz: 3, // metas + TOC + footer + want: checks( + numTOCEntries(2), + hasSymlink("foo/bar", "../../x"), + entryHasChildren("", "foo"), + entryHasChildren("foo", "bar"), + ), + }, + { + name: "chunked_file", + chunkSize: 4, + in: tarOf( + dir("foo/"), + file("foo/big.txt", "This "+"is s"+"uch "+"a bi"+"g fi"+"le"), + ), + wantNumGz: 9, + want: checks( + numTOCEntries(7), // 1 for foo dir, 6 for the foo/big.txt file + hasDir("foo/"), + hasFileLen("foo/big.txt", len("This is such a big file")), + hasFileDigest("foo/big.txt", digestFor("This is such a big file")), + hasFileContentsRange("foo/big.txt", 0, "This is such a big file"), + hasFileContentsRange("foo/big.txt", 1, "his is such a big file"), + hasFileContentsRange("foo/big.txt", 2, "is is such a big file"), + hasFileContentsRange("foo/big.txt", 3, "s is such a big file"), + hasFileContentsRange("foo/big.txt", 4, " is such a big file"), + hasFileContentsRange("foo/big.txt", 5, "is such a big file"), + hasFileContentsRange("foo/big.txt", 6, "s such a big file"), + hasFileContentsRange("foo/big.txt", 7, " such a big file"), + hasFileContentsRange("foo/big.txt", 8, "such a big file"), + hasFileContentsRange("foo/big.txt", 9, "uch a big file"), + hasFileContentsRange("foo/big.txt", 10, "ch a big file"), + hasFileContentsRange("foo/big.txt", 11, "h a big file"), + hasFileContentsRange("foo/big.txt", 12, " a big file"), + hasFileContentsRange("foo/big.txt", len("This is such a big file")-1, ""), + hasChunkEntries("foo/big.txt", 6), + ), + }, + { + name: "recursive", + in: tarOf( + dir("/", sampleOwner), + dir("bar/", sampleOwner), + dir("foo/", sampleOwner), + file("foo/bar.txt", content, sampleOwner), + ), + wantNumGz: 4, // dirs, bar.txt alone, TOC, footer + want: checks( + maxDepth(2), // 0: root directory, 1: "foo/", 2: "bar.txt" + ), + }, + { + name: "block_char_fifo", + in: tarOf( + tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + return w.WriteHeader(&tar.Header{ + Name: prefix + "b", + Typeflag: tar.TypeBlock, + Devmajor: 123, + Devminor: 456, + Format: format, + }) + }), + tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + return w.WriteHeader(&tar.Header{ + Name: prefix + "c", + Typeflag: tar.TypeChar, + Devmajor: 111, + Devminor: 222, + Format: format, + }) + }), + tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + return w.WriteHeader(&tar.Header{ + Name: prefix + "f", + Typeflag: tar.TypeFifo, + Format: format, + }) + }), + ), + wantNumGz: 3, + want: checks( + lookupMatch("b", &TOCEntry{Name: "b", Type: "block", DevMajor: 123, DevMinor: 456, NumLink: 1}), + lookupMatch("c", &TOCEntry{Name: "c", Type: "char", DevMajor: 111, DevMinor: 222, NumLink: 1}), + lookupMatch("f", &TOCEntry{Name: "f", Type: "fifo", NumLink: 1}), + ), + }, + { + name: "modes", + in: tarOf( + dir("foo1/", 0755|os.ModeDir|os.ModeSetgid), + file("foo1/bar1", content, 0700|os.ModeSetuid), + file("foo1/bar2", content, 0755|os.ModeSetgid), + dir("foo2/", 0755|os.ModeDir|os.ModeSticky), + file("foo2/bar3", content, 0755|os.ModeSticky), + dir("foo3/", 0755|os.ModeDir), + file("foo3/bar4", content, os.FileMode(0700)), + file("foo3/bar5", content, os.FileMode(0755)), + ), + wantNumGz: 8, // dir, bar1 alone, bar2 alone + dir, bar3 alone + dir, bar4 alone, bar5 alone, TOC, footer + want: checks( + hasMode("foo1/", 0755|os.ModeDir|os.ModeSetgid), + hasMode("foo1/bar1", 0700|os.ModeSetuid), + hasMode("foo1/bar2", 0755|os.ModeSetgid), + hasMode("foo2/", 0755|os.ModeDir|os.ModeSticky), + hasMode("foo2/bar3", 0755|os.ModeSticky), + hasMode("foo3/", 0755|os.ModeDir), + hasMode("foo3/bar4", os.FileMode(0700)), + hasMode("foo3/bar5", os.FileMode(0755)), + ), + }, + { + name: "lossy", + in: tarOf( + dir("bar/", sampleOwner), + dir("foo/", sampleOwner), + file("foo/bar.txt", content, sampleOwner), + file(TOCTarName, "dummy"), // ignored by the writer. (lossless write returns error) + ), + wantNumGz: 4, // both dirs, foo.txt alone, TOC, footer + want: checks( + numTOCEntries(3), + hasDir("bar/"), + hasDir("foo/"), + hasFileLen("foo/bar.txt", len(content)), + entryHasChildren("", "bar", "foo"), + entryHasChildren("foo", "bar.txt"), + hasChunkEntries("foo/bar.txt", 1), + hasEntryOwner("bar/", sampleOwner), + hasEntryOwner("foo/", sampleOwner), + hasEntryOwner("foo/bar.txt", sampleOwner), + ), + wantFailOnLossLess: true, + }, + } + + for _, tt := range tests { + for _, cl := range controllers { + cl := cl + for _, prefix := range allowedPrefix { + prefix := prefix + for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} { + srcTarFormat := srcTarFormat + for _, lossless := range []bool{true, false} { + t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,lossless=%v,format=%s", cl, prefix, lossless, srcTarFormat), func(t *testing.T) { + var tr io.Reader = buildTar(t, tt.in, prefix, srcTarFormat) + origTarDgstr := digest.Canonical.Digester() + tr = io.TeeReader(tr, origTarDgstr.Hash()) + var stargzBuf bytes.Buffer + w := NewWriterWithCompressor(&stargzBuf, cl) + w.ChunkSize = tt.chunkSize + if lossless { + err := w.AppendTarLossLess(tr) + if tt.wantFailOnLossLess { + if err != nil { + return // expected to fail + } + t.Fatalf("Append wanted to fail on lossless") + } + if err != nil { + t.Fatalf("Append(lossless): %v", err) + } + } else { + if err := w.AppendTar(tr); err != nil { + t.Fatalf("Append: %v", err) + } + } + if _, err := w.Close(); err != nil { + t.Fatalf("Writer.Close: %v", err) + } + b := stargzBuf.Bytes() + + if lossless { + // Check if the result blob reserves original tar metadata + rc, err := Unpack(io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))), cl) + if err != nil { + t.Errorf("failed to decompress blob: %v", err) + return + } + defer rc.Close() + resultDgstr := digest.Canonical.Digester() + if _, err := io.Copy(resultDgstr.Hash(), rc); err != nil { + t.Errorf("failed to read result decompressed blob: %v", err) + return + } + if resultDgstr.Digest() != origTarDgstr.Digest() { + t.Errorf("lossy compression occurred: digest=%v; want %v", + resultDgstr.Digest(), origTarDgstr.Digest()) + return + } + } + + diffID := w.DiffID() + wantDiffID := cl.DiffIDOf(t, b) + if diffID != wantDiffID { + t.Errorf("DiffID = %q; want %q", diffID, wantDiffID) + } + + got := cl.CountStreams(t, b) + wantNumGz := tt.wantNumGz + if lossless && tt.wantNumGzLossLess > 0 { + wantNumGz = tt.wantNumGzLossLess + } + if got != wantNumGz { + t.Errorf("number of streams = %d; want %d", got, wantNumGz) + } + + telemetry, checkCalled := newCalledTelemetry() + r, err := Open( + io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))), + WithDecompressors(cl), + WithTelemetry(telemetry), + ) + if err != nil { + t.Fatalf("stargz.Open: %v", err) + } + if err := checkCalled(); err != nil { + t.Errorf("telemetry failure: %v", err) + } + for _, want := range tt.want { + want.check(t, r) + } + }) + } + } + } + } + } +} + +func newCalledTelemetry() (telemetry *Telemetry, check func() error) { + var getFooterLatencyCalled bool + var getTocLatencyCalled bool + var deserializeTocLatencyCalled bool + return &Telemetry{ + func(time.Time) { getFooterLatencyCalled = true }, + func(time.Time) { getTocLatencyCalled = true }, + func(time.Time) { deserializeTocLatencyCalled = true }, + }, func() error { + var allErr []error + if !getFooterLatencyCalled { + allErr = append(allErr, fmt.Errorf("metrics GetFooterLatency isn't called")) + } + if !getTocLatencyCalled { + allErr = append(allErr, fmt.Errorf("metrics GetTocLatency isn't called")) + } + if !deserializeTocLatencyCalled { + allErr = append(allErr, fmt.Errorf("metrics DeserializeTocLatency isn't called")) + } + return errorutil.Aggregate(allErr) + } +} + +func digestFor(content string) string { + sum := sha256.Sum256([]byte(content)) + return fmt.Sprintf("sha256:%x", sum) +} + +type numTOCEntries int + +func (n numTOCEntries) check(t *testing.T, r *Reader) { + if r.toc == nil { + t.Fatal("nil TOC") + } + if got, want := len(r.toc.Entries), int(n); got != want { + t.Errorf("got %d TOC entries; want %d", got, want) + } + t.Logf("got TOC entries:") + for i, ent := range r.toc.Entries { + entj, _ := json.Marshal(ent) + t.Logf(" [%d]: %s\n", i, entj) + } + if t.Failed() { + t.FailNow() + } +} + +func checks(s ...stargzCheck) []stargzCheck { return s } + +type stargzCheck interface { + check(t *testing.T, r *Reader) +} + +type stargzCheckFn func(*testing.T, *Reader) + +func (f stargzCheckFn) check(t *testing.T, r *Reader) { f(t, r) } + +func maxDepth(max int) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + e, ok := r.Lookup("") + if !ok { + t.Fatal("root directory not found") + } + d, err := getMaxDepth(t, e, 0, 10*max) + if err != nil { + t.Errorf("failed to get max depth (wanted %d): %v", max, err) + return + } + if d != max { + t.Errorf("invalid depth %d; want %d", d, max) + return + } + }) +} + +func getMaxDepth(t *testing.T, e *TOCEntry, current, limit int) (max int, rErr error) { + if current > limit { + return -1, fmt.Errorf("walkMaxDepth: exceeds limit: current:%d > limit:%d", + current, limit) + } + max = current + e.ForeachChild(func(baseName string, ent *TOCEntry) bool { + t.Logf("%q(basename:%q) is child of %q\n", ent.Name, baseName, e.Name) + d, err := getMaxDepth(t, ent, current+1, limit) + if err != nil { + rErr = err + return false + } + if d > max { + max = d + } + return true + }) + return +} + +func hasFileLen(file string, wantLen int) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + for _, ent := range r.toc.Entries { + if ent.Name == file { + if ent.Type != "reg" { + t.Errorf("file type of %q is %q; want \"reg\"", file, ent.Type) + } else if ent.Size != int64(wantLen) { + t.Errorf("file size of %q = %d; want %d", file, ent.Size, wantLen) + } + return + } + } + t.Errorf("file %q not found", file) + }) +} + +func hasFileXattrs(file, name, value string) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + for _, ent := range r.toc.Entries { + if ent.Name == file { + if ent.Type != "reg" { + t.Errorf("file type of %q is %q; want \"reg\"", file, ent.Type) + } + if ent.Xattrs == nil { + t.Errorf("file %q has no xattrs", file) + return + } + valueFound, found := ent.Xattrs[name] + if !found { + t.Errorf("file %q has no xattr %q", file, name) + return + } + if string(valueFound) != value { + t.Errorf("file %q has xattr %q with value %q instead of %q", file, name, valueFound, value) + } + + return + } + } + t.Errorf("file %q not found", file) + }) +} + +func hasFileDigest(file string, digest string) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + ent, ok := r.Lookup(file) + if !ok { + t.Fatalf("didn't find TOCEntry for file %q", file) + } + if ent.Digest != digest { + t.Fatalf("Digest(%q) = %q, want %q", file, ent.Digest, digest) + } + }) +} + +func hasFileContentsRange(file string, offset int, want string) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + f, err := r.OpenFile(file) + if err != nil { + t.Fatal(err) + } + got := make([]byte, len(want)) + n, err := f.ReadAt(got, int64(offset)) + if err != nil { + t.Fatalf("ReadAt(len %d, offset %d) = %v, %v", len(got), offset, n, err) + } + if string(got) != want { + t.Fatalf("ReadAt(len %d, offset %d) = %q, want %q", len(got), offset, got, want) + } + }) +} + +func hasChunkEntries(file string, wantChunks int) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + ent, ok := r.Lookup(file) + if !ok { + t.Fatalf("no file for %q", file) + } + if ent.Type != "reg" { + t.Fatalf("file %q has unexpected type %q; want reg", file, ent.Type) + } + chunks := r.getChunks(ent) + if len(chunks) != wantChunks { + t.Errorf("len(r.getChunks(%q)) = %d; want %d", file, len(chunks), wantChunks) + return + } + f := chunks[0] + + var gotChunks []*TOCEntry + var last *TOCEntry + for off := int64(0); off < f.Size; off++ { + e, ok := r.ChunkEntryForOffset(file, off) + if !ok { + t.Errorf("no ChunkEntryForOffset at %d", off) + return + } + if last != e { + gotChunks = append(gotChunks, e) + last = e + } + } + if !reflect.DeepEqual(chunks, gotChunks) { + t.Errorf("gotChunks=%d, want=%d; contents mismatch", len(gotChunks), wantChunks) + } + + // And verify the NextOffset + for i := 0; i < len(gotChunks)-1; i++ { + ci := gotChunks[i] + cnext := gotChunks[i+1] + if ci.NextOffset() != cnext.Offset { + t.Errorf("chunk %d NextOffset %d != next chunk's Offset of %d", i, ci.NextOffset(), cnext.Offset) + } + } + }) +} + +func entryHasChildren(dir string, want ...string) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + want := append([]string(nil), want...) + var got []string + ent, ok := r.Lookup(dir) + if !ok { + t.Fatalf("didn't find TOCEntry for dir node %q", dir) + } + for baseName := range ent.children { + got = append(got, baseName) + } + sort.Strings(got) + sort.Strings(want) + if !reflect.DeepEqual(got, want) { + t.Errorf("children of %q = %q; want %q", dir, got, want) + } + }) +} + +func hasDir(file string) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + for _, ent := range r.toc.Entries { + if ent.Name == cleanEntryName(file) { + if ent.Type != "dir" { + t.Errorf("file type of %q is %q; want \"dir\"", file, ent.Type) + } + return + } + } + t.Errorf("directory %q not found", file) + }) +} + +func hasDirLinkCount(file string, count int) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + for _, ent := range r.toc.Entries { + if ent.Name == cleanEntryName(file) { + if ent.Type != "dir" { + t.Errorf("file type of %q is %q; want \"dir\"", file, ent.Type) + return + } + if ent.NumLink != count { + t.Errorf("link count of %q = %d; want %d", file, ent.NumLink, count) + } + return + } + } + t.Errorf("directory %q not found", file) + }) +} + +func hasMode(file string, mode os.FileMode) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + for _, ent := range r.toc.Entries { + if ent.Name == cleanEntryName(file) { + if ent.Stat().Mode() != mode { + t.Errorf("invalid mode: got %v; want %v", ent.Stat().Mode(), mode) + return + } + return + } + } + t.Errorf("file %q not found", file) + }) +} + +func hasSymlink(file, target string) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + for _, ent := range r.toc.Entries { + if ent.Name == file { + if ent.Type != "symlink" { + t.Errorf("file type of %q is %q; want \"symlink\"", file, ent.Type) + } else if ent.LinkName != target { + t.Errorf("link target of symlink %q is %q; want %q", file, ent.LinkName, target) + } + return + } + } + t.Errorf("symlink %q not found", file) + }) +} + +func lookupMatch(name string, want *TOCEntry) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + e, ok := r.Lookup(name) + if !ok { + t.Fatalf("failed to Lookup entry %q", name) + } + if !reflect.DeepEqual(e, want) { + t.Errorf("entry %q mismatch.\n got: %+v\nwant: %+v\n", name, e, want) + } + + }) +} + +func hasEntryOwner(entry string, owner owner) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + ent, ok := r.Lookup(strings.TrimSuffix(entry, "/")) + if !ok { + t.Errorf("entry %q not found", entry) + return + } + if ent.UID != owner.uid || ent.GID != owner.gid { + t.Errorf("entry %q has invalid owner (uid:%d, gid:%d) instead of (uid:%d, gid:%d)", entry, ent.UID, ent.GID, owner.uid, owner.gid) + return + } + }) +} + +func tarOf(s ...tarEntry) []tarEntry { return s } + +type tarEntry interface { + appendTar(tw *tar.Writer, prefix string, format tar.Format) error +} + +type tarEntryFunc func(*tar.Writer, string, tar.Format) error + +func (f tarEntryFunc) appendTar(tw *tar.Writer, prefix string, format tar.Format) error { + return f(tw, prefix, format) +} + +func buildTar(t *testing.T, ents []tarEntry, prefix string, opts ...interface{}) *io.SectionReader { + format := tar.FormatUnknown + for _, opt := range opts { + switch v := opt.(type) { + case tar.Format: + format = v + default: + panic(fmt.Errorf("unsupported opt for buildTar: %v", opt)) + } + } + buf := new(bytes.Buffer) + tw := tar.NewWriter(buf) + for _, ent := range ents { + if err := ent.appendTar(tw, prefix, format); err != nil { + t.Fatalf("building input tar: %v", err) + } + } + if err := tw.Close(); err != nil { + t.Errorf("closing write of input tar: %v", err) + } + data := append(buf.Bytes(), make([]byte, 100)...) // append empty bytes at the tail to see lossless works + return io.NewSectionReader(bytes.NewReader(data), 0, int64(len(data))) +} + +func dir(name string, opts ...interface{}) tarEntry { + return tarEntryFunc(func(tw *tar.Writer, prefix string, format tar.Format) error { + var o owner + mode := os.FileMode(0755) + for _, opt := range opts { + switch v := opt.(type) { + case owner: + o = v + case os.FileMode: + mode = v + default: + return errors.New("unsupported opt") + } + } + if !strings.HasSuffix(name, "/") { + panic(fmt.Sprintf("missing trailing slash in dir %q ", name)) + } + tm, err := fileModeToTarMode(mode) + if err != nil { + return err + } + return tw.WriteHeader(&tar.Header{ + Typeflag: tar.TypeDir, + Name: prefix + name, + Mode: tm, + Uid: o.uid, + Gid: o.gid, + Format: format, + }) + }) +} + +// xAttr are extended attributes to set on test files created with the file func. +type xAttr map[string]string + +// owner is owner ot set on test files and directories with the file and dir functions. +type owner struct { + uid int + gid int +} + +func file(name, contents string, opts ...interface{}) tarEntry { + return tarEntryFunc(func(tw *tar.Writer, prefix string, format tar.Format) error { + var xattrs xAttr + var o owner + mode := os.FileMode(0644) + for _, opt := range opts { + switch v := opt.(type) { + case xAttr: + xattrs = v + case owner: + o = v + case os.FileMode: + mode = v + default: + return errors.New("unsupported opt") + } + } + if strings.HasSuffix(name, "/") { + return fmt.Errorf("bogus trailing slash in file %q", name) + } + tm, err := fileModeToTarMode(mode) + if err != nil { + return err + } + if len(xattrs) > 0 { + format = tar.FormatPAX // only PAX supports xattrs + } + if err := tw.WriteHeader(&tar.Header{ + Typeflag: tar.TypeReg, + Name: prefix + name, + Mode: tm, + Xattrs: xattrs, + Size: int64(len(contents)), + Uid: o.uid, + Gid: o.gid, + Format: format, + }); err != nil { + return err + } + _, err = io.WriteString(tw, contents) + return err + }) +} + +func symlink(name, target string) tarEntry { + return tarEntryFunc(func(tw *tar.Writer, prefix string, format tar.Format) error { + return tw.WriteHeader(&tar.Header{ + Typeflag: tar.TypeSymlink, + Name: prefix + name, + Linkname: target, + Mode: 0644, + Format: format, + }) + }) +} + +func link(name string, linkname string) tarEntry { + now := time.Now() + return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + return w.WriteHeader(&tar.Header{ + Typeflag: tar.TypeLink, + Name: prefix + name, + Linkname: linkname, + ModTime: now, + Format: format, + }) + }) +} + +func chardev(name string, major, minor int64) tarEntry { + now := time.Now() + return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + return w.WriteHeader(&tar.Header{ + Typeflag: tar.TypeChar, + Name: prefix + name, + Devmajor: major, + Devminor: minor, + ModTime: now, + Format: format, + }) + }) +} + +func blockdev(name string, major, minor int64) tarEntry { + now := time.Now() + return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + return w.WriteHeader(&tar.Header{ + Typeflag: tar.TypeBlock, + Name: prefix + name, + Devmajor: major, + Devminor: minor, + ModTime: now, + Format: format, + }) + }) +} +func fifo(name string) tarEntry { + now := time.Now() + return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + return w.WriteHeader(&tar.Header{ + Typeflag: tar.TypeFifo, + Name: prefix + name, + ModTime: now, + Format: format, + }) + }) +} + +func prefetchLandmark() tarEntry { + return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + if err := w.WriteHeader(&tar.Header{ + Name: PrefetchLandmark, + Typeflag: tar.TypeReg, + Size: int64(len([]byte{landmarkContents})), + Format: format, + }); err != nil { + return err + } + contents := []byte{landmarkContents} + if _, err := io.CopyN(w, bytes.NewReader(contents), int64(len(contents))); err != nil { + return err + } + return nil + }) +} + +func noPrefetchLandmark() tarEntry { + return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + if err := w.WriteHeader(&tar.Header{ + Name: NoPrefetchLandmark, + Typeflag: tar.TypeReg, + Size: int64(len([]byte{landmarkContents})), + Format: format, + }); err != nil { + return err + } + contents := []byte{landmarkContents} + if _, err := io.CopyN(w, bytes.NewReader(contents), int64(len(contents))); err != nil { + return err + } + return nil + }) +} + +func regDigest(t *testing.T, name string, contentStr string, digestMap map[string]digest.Digest) tarEntry { + if digestMap == nil { + t.Fatalf("digest map mustn't be nil") + } + content := []byte(contentStr) + + var n int64 + for n < int64(len(content)) { + size := int64(chunkSize) + remain := int64(len(content)) - n + if remain < size { + size = remain + } + dgstr := digest.Canonical.Digester() + if _, err := io.CopyN(dgstr.Hash(), bytes.NewReader(content[n:n+size]), size); err != nil { + t.Fatalf("failed to calculate digest of %q (name=%q,offset=%d,size=%d)", + string(content[n:n+size]), name, n, size) + } + digestMap[chunkID(name, n, size)] = dgstr.Digest() + n += size + } + + return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + if err := w.WriteHeader(&tar.Header{ + Typeflag: tar.TypeReg, + Name: prefix + name, + Size: int64(len(content)), + Format: format, + }); err != nil { + return err + } + if _, err := io.CopyN(w, bytes.NewReader(content), int64(len(content))); err != nil { + return err + } + return nil + }) +} + +func fileModeToTarMode(mode os.FileMode) (int64, error) { + h, err := tar.FileInfoHeader(fileInfoOnlyMode(mode), "") + if err != nil { + return 0, err + } + return h.Mode, nil +} + +// fileInfoOnlyMode is os.FileMode that populates only file mode. +type fileInfoOnlyMode os.FileMode + +func (f fileInfoOnlyMode) Name() string { return "" } +func (f fileInfoOnlyMode) Size() int64 { return 0 } +func (f fileInfoOnlyMode) Mode() os.FileMode { return os.FileMode(f) } +func (f fileInfoOnlyMode) ModTime() time.Time { return time.Now() } +func (f fileInfoOnlyMode) IsDir() bool { return os.FileMode(f).IsDir() } +func (f fileInfoOnlyMode) Sys() interface{} { return nil } diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go new file mode 100644 index 00000000000..384ff7fd7f2 --- /dev/null +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go @@ -0,0 +1,316 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +/* + Copyright 2019 The Go Authors. All rights reserved. + Use of this source code is governed by a BSD-style + license that can be found in the LICENSE file. +*/ + +package estargz + +import ( + "archive/tar" + "hash" + "io" + "os" + "path" + "time" + + digest "github.com/opencontainers/go-digest" +) + +const ( + // TOCTarName is the name of the JSON file in the tar archive in the + // table of contents gzip stream. + TOCTarName = "stargz.index.json" + + // FooterSize is the number of bytes in the footer + // + // The footer is an empty gzip stream with no compression and an Extra + // header of the form "%016xSTARGZ", where the 64 bit hex-encoded + // number is the offset to the gzip stream of JSON TOC. + // + // 51 comes from: + // + // 10 bytes gzip header + // 2 bytes XLEN (length of Extra field) = 26 (4 bytes header + 16 hex digits + len("STARGZ")) + // 2 bytes Extra: SI1 = 'S', SI2 = 'G' + // 2 bytes Extra: LEN = 22 (16 hex digits + len("STARGZ")) + // 22 bytes Extra: subfield = fmt.Sprintf("%016xSTARGZ", offsetOfTOC) + // 5 bytes flate header + // 8 bytes gzip footer + // (End of the eStargz blob) + // + // NOTE: For Extra fields, subfield IDs SI1='S' SI2='G' is used for eStargz. + FooterSize = 51 + + // legacyFooterSize is the number of bytes in the legacy stargz footer. + // + // 47 comes from: + // + // 10 byte gzip header + + // 2 byte (LE16) length of extra, encoding 22 (16 hex digits + len("STARGZ")) == "\x16\x00" + + // 22 bytes of extra (fmt.Sprintf("%016xSTARGZ", tocGzipOffset)) + // 5 byte flate header + // 8 byte gzip footer (two little endian uint32s: digest, size) + legacyFooterSize = 47 + + // TOCJSONDigestAnnotation is an annotation for an image layer. This stores the + // digest of the TOC JSON. + // This annotation is valid only when it is specified in `.[]layers.annotations` + // of an image manifest. + TOCJSONDigestAnnotation = "containerd.io/snapshot/stargz/toc.digest" + + // StoreUncompressedSizeAnnotation is an additional annotation key for eStargz to enable lazy + // pulling on containers/storage. Stargz Store is required to expose the layer's uncompressed size + // to the runtime but current OCI image doesn't ship this information by default. So we store this + // to the special annotation. + StoreUncompressedSizeAnnotation = "io.containers.estargz.uncompressed-size" + + // PrefetchLandmark is a file entry which indicates the end position of + // prefetch in the stargz file. + PrefetchLandmark = ".prefetch.landmark" + + // NoPrefetchLandmark is a file entry which indicates that no prefetch should + // occur in the stargz file. + NoPrefetchLandmark = ".no.prefetch.landmark" + + landmarkContents = 0xf +) + +// JTOC is the JSON-serialized table of contents index of the files in the stargz file. +type JTOC struct { + Version int `json:"version"` + Entries []*TOCEntry `json:"entries"` +} + +// TOCEntry is an entry in the stargz file's TOC (Table of Contents). +type TOCEntry struct { + // Name is the tar entry's name. It is the complete path + // stored in the tar file, not just the base name. + Name string `json:"name"` + + // Type is one of "dir", "reg", "symlink", "hardlink", "char", + // "block", "fifo", or "chunk". + // The "chunk" type is used for regular file data chunks past the first + // TOCEntry; the 2nd chunk and on have only Type ("chunk"), Offset, + // ChunkOffset, and ChunkSize populated. + Type string `json:"type"` + + // Size, for regular files, is the logical size of the file. + Size int64 `json:"size,omitempty"` + + // ModTime3339 is the modification time of the tar entry. Empty + // means zero or unknown. Otherwise it's in UTC RFC3339 + // format. Use the ModTime method to access the time.Time value. + ModTime3339 string `json:"modtime,omitempty"` + modTime time.Time + + // LinkName, for symlinks and hardlinks, is the link target. + LinkName string `json:"linkName,omitempty"` + + // Mode is the permission and mode bits. + Mode int64 `json:"mode,omitempty"` + + // UID is the user ID of the owner. + UID int `json:"uid,omitempty"` + + // GID is the group ID of the owner. + GID int `json:"gid,omitempty"` + + // Uname is the username of the owner. + // + // In the serialized JSON, this field may only be present for + // the first entry with the same UID. + Uname string `json:"userName,omitempty"` + + // Gname is the group name of the owner. + // + // In the serialized JSON, this field may only be present for + // the first entry with the same GID. + Gname string `json:"groupName,omitempty"` + + // Offset, for regular files, provides the offset in the + // stargz file to the file's data bytes. See ChunkOffset and + // ChunkSize. + Offset int64 `json:"offset,omitempty"` + + nextOffset int64 // the Offset of the next entry with a non-zero Offset + + // DevMajor is the major device number for "char" and "block" types. + DevMajor int `json:"devMajor,omitempty"` + + // DevMinor is the major device number for "char" and "block" types. + DevMinor int `json:"devMinor,omitempty"` + + // NumLink is the number of entry names pointing to this entry. + // Zero means one name references this entry. + NumLink int + + // Xattrs are the extended attribute for the entry. + Xattrs map[string][]byte `json:"xattrs,omitempty"` + + // Digest stores the OCI checksum for regular files payload. + // It has the form "sha256:abcdef01234....". + Digest string `json:"digest,omitempty"` + + // ChunkOffset is non-zero if this is a chunk of a large, + // regular file. If so, the Offset is where the gzip header of + // ChunkSize bytes at ChunkOffset in Name begin. + // + // In serialized form, a "chunkSize" JSON field of zero means + // that the chunk goes to the end of the file. After reading + // from the stargz TOC, though, the ChunkSize is initialized + // to a non-zero file for when Type is either "reg" or + // "chunk". + ChunkOffset int64 `json:"chunkOffset,omitempty"` + ChunkSize int64 `json:"chunkSize,omitempty"` + + // ChunkDigest stores an OCI digest of the chunk. This must be formed + // as "sha256:0123abcd...". + ChunkDigest string `json:"chunkDigest,omitempty"` + + children map[string]*TOCEntry +} + +// ModTime returns the entry's modification time. +func (e *TOCEntry) ModTime() time.Time { return e.modTime } + +// NextOffset returns the position (relative to the start of the +// stargz file) of the next gzip boundary after e.Offset. +func (e *TOCEntry) NextOffset() int64 { return e.nextOffset } + +func (e *TOCEntry) addChild(baseName string, child *TOCEntry) { + if e.children == nil { + e.children = make(map[string]*TOCEntry) + } + if child.Type == "dir" { + e.NumLink++ // Entry ".." in the subdirectory links to this directory + } + e.children[baseName] = child +} + +// isDataType reports whether TOCEntry is a regular file or chunk (something that +// contains regular file data). +func (e *TOCEntry) isDataType() bool { return e.Type == "reg" || e.Type == "chunk" } + +// Stat returns a FileInfo value representing e. +func (e *TOCEntry) Stat() os.FileInfo { return fileInfo{e} } + +// ForeachChild calls f for each child item. If f returns false, iteration ends. +// If e is not a directory, f is not called. +func (e *TOCEntry) ForeachChild(f func(baseName string, ent *TOCEntry) bool) { + for name, ent := range e.children { + if !f(name, ent) { + return + } + } +} + +// LookupChild returns the directory e's child by its base name. +func (e *TOCEntry) LookupChild(baseName string) (child *TOCEntry, ok bool) { + child, ok = e.children[baseName] + return +} + +// fileInfo implements os.FileInfo using the wrapped *TOCEntry. +type fileInfo struct{ e *TOCEntry } + +var _ os.FileInfo = fileInfo{} + +func (fi fileInfo) Name() string { return path.Base(fi.e.Name) } +func (fi fileInfo) IsDir() bool { return fi.e.Type == "dir" } +func (fi fileInfo) Size() int64 { return fi.e.Size } +func (fi fileInfo) ModTime() time.Time { return fi.e.ModTime() } +func (fi fileInfo) Sys() interface{} { return fi.e } +func (fi fileInfo) Mode() (m os.FileMode) { + // TOCEntry.Mode is tar.Header.Mode so we can understand the these bits using `tar` pkg. + m = (&tar.Header{Mode: fi.e.Mode}).FileInfo().Mode() & + (os.ModePerm | os.ModeSetuid | os.ModeSetgid | os.ModeSticky) + switch fi.e.Type { + case "dir": + m |= os.ModeDir + case "symlink": + m |= os.ModeSymlink + case "char": + m |= os.ModeDevice | os.ModeCharDevice + case "block": + m |= os.ModeDevice + case "fifo": + m |= os.ModeNamedPipe + } + return m +} + +// TOCEntryVerifier holds verifiers that are usable for verifying chunks contained +// in a eStargz blob. +type TOCEntryVerifier interface { + + // Verifier provides a content verifier that can be used for verifying the + // contents of the specified TOCEntry. + Verifier(ce *TOCEntry) (digest.Verifier, error) +} + +// Compression provides the compression helper to be used creating and parsing eStargz. +// This package provides gzip-based Compression by default, but any compression +// algorithm (e.g. zstd) can be used as long as it implements Compression. +type Compression interface { + Compressor + Decompressor +} + +// Compressor represents the helper mothods to be used for creating eStargz. +type Compressor interface { + // Writer returns WriteCloser to be used for writing a chunk to eStargz. + // Everytime a chunk is written, the WriteCloser is closed and Writer is + // called again for writing the next chunk. + Writer(w io.Writer) (io.WriteCloser, error) + + // WriteTOCAndFooter is called to write JTOC to the passed Writer. + // diffHash calculates the DiffID (uncompressed sha256 hash) of the blob + // WriteTOCAndFooter can optionally write anything that affects DiffID calculation + // (e.g. uncompressed TOC JSON). + // + // This function returns tocDgst that represents the digest of TOC that will be used + // to verify this blob when it's parsed. + WriteTOCAndFooter(w io.Writer, off int64, toc *JTOC, diffHash hash.Hash) (tocDgst digest.Digest, err error) +} + +// Decompressor represents the helper mothods to be used for parsing eStargz. +type Decompressor interface { + // Reader returns ReadCloser to be used for decompressing file payload. + Reader(r io.Reader) (io.ReadCloser, error) + + // FooterSize returns the size of the footer of this blob. + FooterSize() int64 + + // ParseFooter parses the footer and returns the offset and (compressed) size of TOC. + // payloadBlobSize is the (compressed) size of the blob payload (i.e. the size between + // the top until the TOC JSON). + // + // Here, tocSize is optional. If tocSize <= 0, it's by default the size of the range + // from tocOffset until the beginning of the footer (blob size - tocOff - FooterSize). + ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error) + + // ParseTOC parses TOC from the passed reader. The reader provides the partial contents + // of the underlying blob that has the range specified by ParseFooter method. + // + // This function returns tocDgst that represents the digest of TOC that will be used + // to verify this blob. This must match to the value returned from + // Compressor.WriteTOCAndFooter that is used when creating this blob. + ParseTOC(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error) +} diff --git a/vendor/github.com/containernetworking/cni/LICENSE b/vendor/github.com/containernetworking/cni/LICENSE new file mode 100644 index 00000000000..8f71f43fee3 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/LICENSE @@ -0,0 +1,202 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/github.com/containernetworking/cni/libcni/api.go b/vendor/github.com/containernetworking/cni/libcni/api.go new file mode 100644 index 00000000000..0d82a2dd3c6 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/libcni/api.go @@ -0,0 +1,679 @@ +// Copyright 2015 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package libcni + +// Note this is the actual implementation of the CNI specification, which +// is reflected in the https://github.com/containernetworking/cni/blob/master/SPEC.md file +// it is typically bundled into runtime providers (i.e. containerd or cri-o would use this +// before calling runc or hcsshim). It is also bundled into CNI providers as well, for example, +// to add an IP to a container, to parse the configuration of the CNI and so on. + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/containernetworking/cni/pkg/invoke" + "github.com/containernetworking/cni/pkg/types" + "github.com/containernetworking/cni/pkg/types/create" + "github.com/containernetworking/cni/pkg/utils" + "github.com/containernetworking/cni/pkg/version" +) + +var ( + CacheDir = "/var/lib/cni" +) + +const ( + CNICacheV1 = "cniCacheV1" +) + +// A RuntimeConf holds the arguments to one invocation of a CNI plugin +// excepting the network configuration, with the nested exception that +// the `runtimeConfig` from the network configuration is included +// here. +type RuntimeConf struct { + ContainerID string + NetNS string + IfName string + Args [][2]string + // A dictionary of capability-specific data passed by the runtime + // to plugins as top-level keys in the 'runtimeConfig' dictionary + // of the plugin's stdin data. libcni will ensure that only keys + // in this map which match the capabilities of the plugin are passed + // to the plugin + CapabilityArgs map[string]interface{} + + // DEPRECATED. Will be removed in a future release. + CacheDir string +} + +type NetworkConfig struct { + Network *types.NetConf + Bytes []byte +} + +type NetworkConfigList struct { + Name string + CNIVersion string + DisableCheck bool + Plugins []*NetworkConfig + Bytes []byte +} + +type CNI interface { + AddNetworkList(ctx context.Context, net *NetworkConfigList, rt *RuntimeConf) (types.Result, error) + CheckNetworkList(ctx context.Context, net *NetworkConfigList, rt *RuntimeConf) error + DelNetworkList(ctx context.Context, net *NetworkConfigList, rt *RuntimeConf) error + GetNetworkListCachedResult(net *NetworkConfigList, rt *RuntimeConf) (types.Result, error) + GetNetworkListCachedConfig(net *NetworkConfigList, rt *RuntimeConf) ([]byte, *RuntimeConf, error) + + AddNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) (types.Result, error) + CheckNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error + DelNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error + GetNetworkCachedResult(net *NetworkConfig, rt *RuntimeConf) (types.Result, error) + GetNetworkCachedConfig(net *NetworkConfig, rt *RuntimeConf) ([]byte, *RuntimeConf, error) + + ValidateNetworkList(ctx context.Context, net *NetworkConfigList) ([]string, error) + ValidateNetwork(ctx context.Context, net *NetworkConfig) ([]string, error) +} + +type CNIConfig struct { + Path []string + exec invoke.Exec + cacheDir string +} + +// CNIConfig implements the CNI interface +var _ CNI = &CNIConfig{} + +// NewCNIConfig returns a new CNIConfig object that will search for plugins +// in the given paths and use the given exec interface to run those plugins, +// or if the exec interface is not given, will use a default exec handler. +func NewCNIConfig(path []string, exec invoke.Exec) *CNIConfig { + return NewCNIConfigWithCacheDir(path, "", exec) +} + +// NewCNIConfigWithCacheDir returns a new CNIConfig object that will search for plugins +// in the given paths use the given exec interface to run those plugins, +// or if the exec interface is not given, will use a default exec handler. +// The given cache directory will be used for temporary data storage when needed. +func NewCNIConfigWithCacheDir(path []string, cacheDir string, exec invoke.Exec) *CNIConfig { + return &CNIConfig{ + Path: path, + cacheDir: cacheDir, + exec: exec, + } +} + +func buildOneConfig(name, cniVersion string, orig *NetworkConfig, prevResult types.Result, rt *RuntimeConf) (*NetworkConfig, error) { + var err error + + inject := map[string]interface{}{ + "name": name, + "cniVersion": cniVersion, + } + // Add previous plugin result + if prevResult != nil { + inject["prevResult"] = prevResult + } + + // Ensure every config uses the same name and version + orig, err = InjectConf(orig, inject) + if err != nil { + return nil, err + } + + return injectRuntimeConfig(orig, rt) +} + +// This function takes a libcni RuntimeConf structure and injects values into +// a "runtimeConfig" dictionary in the CNI network configuration JSON that +// will be passed to the plugin on stdin. +// +// Only "capabilities arguments" passed by the runtime are currently injected. +// These capabilities arguments are filtered through the plugin's advertised +// capabilities from its config JSON, and any keys in the CapabilityArgs +// matching plugin capabilities are added to the "runtimeConfig" dictionary +// sent to the plugin via JSON on stdin. For example, if the plugin's +// capabilities include "portMappings", and the CapabilityArgs map includes a +// "portMappings" key, that key and its value are added to the "runtimeConfig" +// dictionary to be passed to the plugin's stdin. +func injectRuntimeConfig(orig *NetworkConfig, rt *RuntimeConf) (*NetworkConfig, error) { + var err error + + rc := make(map[string]interface{}) + for capability, supported := range orig.Network.Capabilities { + if !supported { + continue + } + if data, ok := rt.CapabilityArgs[capability]; ok { + rc[capability] = data + } + } + + if len(rc) > 0 { + orig, err = InjectConf(orig, map[string]interface{}{"runtimeConfig": rc}) + if err != nil { + return nil, err + } + } + + return orig, nil +} + +// ensure we have a usable exec if the CNIConfig was not given one +func (c *CNIConfig) ensureExec() invoke.Exec { + if c.exec == nil { + c.exec = &invoke.DefaultExec{ + RawExec: &invoke.RawExec{Stderr: os.Stderr}, + PluginDecoder: version.PluginDecoder{}, + } + } + return c.exec +} + +type cachedInfo struct { + Kind string `json:"kind"` + ContainerID string `json:"containerId"` + Config []byte `json:"config"` + IfName string `json:"ifName"` + NetworkName string `json:"networkName"` + CniArgs [][2]string `json:"cniArgs,omitempty"` + CapabilityArgs map[string]interface{} `json:"capabilityArgs,omitempty"` + RawResult map[string]interface{} `json:"result,omitempty"` + Result types.Result `json:"-"` +} + +// getCacheDir returns the cache directory in this order: +// 1) global cacheDir from CNIConfig object +// 2) deprecated cacheDir from RuntimeConf object +// 3) fall back to default cache directory +func (c *CNIConfig) getCacheDir(rt *RuntimeConf) string { + if c.cacheDir != "" { + return c.cacheDir + } + if rt.CacheDir != "" { + return rt.CacheDir + } + return CacheDir +} + +func (c *CNIConfig) getCacheFilePath(netName string, rt *RuntimeConf) (string, error) { + if netName == "" || rt.ContainerID == "" || rt.IfName == "" { + return "", fmt.Errorf("cache file path requires network name (%q), container ID (%q), and interface name (%q)", netName, rt.ContainerID, rt.IfName) + } + return filepath.Join(c.getCacheDir(rt), "results", fmt.Sprintf("%s-%s-%s", netName, rt.ContainerID, rt.IfName)), nil +} + +func (c *CNIConfig) cacheAdd(result types.Result, config []byte, netName string, rt *RuntimeConf) error { + cached := cachedInfo{ + Kind: CNICacheV1, + ContainerID: rt.ContainerID, + Config: config, + IfName: rt.IfName, + NetworkName: netName, + CniArgs: rt.Args, + CapabilityArgs: rt.CapabilityArgs, + } + + // We need to get type.Result into cachedInfo as JSON map + // Marshal to []byte, then Unmarshal into cached.RawResult + data, err := json.Marshal(result) + if err != nil { + return err + } + + err = json.Unmarshal(data, &cached.RawResult) + if err != nil { + return err + } + + newBytes, err := json.Marshal(&cached) + if err != nil { + return err + } + + fname, err := c.getCacheFilePath(netName, rt) + if err != nil { + return err + } + if err := os.MkdirAll(filepath.Dir(fname), 0700); err != nil { + return err + } + + return ioutil.WriteFile(fname, newBytes, 0600) +} + +func (c *CNIConfig) cacheDel(netName string, rt *RuntimeConf) error { + fname, err := c.getCacheFilePath(netName, rt) + if err != nil { + // Ignore error + return nil + } + return os.Remove(fname) +} + +func (c *CNIConfig) getCachedConfig(netName string, rt *RuntimeConf) ([]byte, *RuntimeConf, error) { + var bytes []byte + + fname, err := c.getCacheFilePath(netName, rt) + if err != nil { + return nil, nil, err + } + bytes, err = ioutil.ReadFile(fname) + if err != nil { + // Ignore read errors; the cached result may not exist on-disk + return nil, nil, nil + } + + unmarshaled := cachedInfo{} + if err := json.Unmarshal(bytes, &unmarshaled); err != nil { + return nil, nil, fmt.Errorf("failed to unmarshal cached network %q config: %w", netName, err) + } + if unmarshaled.Kind != CNICacheV1 { + return nil, nil, fmt.Errorf("read cached network %q config has wrong kind: %v", netName, unmarshaled.Kind) + } + + newRt := *rt + if unmarshaled.CniArgs != nil { + newRt.Args = unmarshaled.CniArgs + } + newRt.CapabilityArgs = unmarshaled.CapabilityArgs + + return unmarshaled.Config, &newRt, nil +} + +func (c *CNIConfig) getLegacyCachedResult(netName, cniVersion string, rt *RuntimeConf) (types.Result, error) { + fname, err := c.getCacheFilePath(netName, rt) + if err != nil { + return nil, err + } + data, err := ioutil.ReadFile(fname) + if err != nil { + // Ignore read errors; the cached result may not exist on-disk + return nil, nil + } + + // Load the cached result + result, err := create.CreateFromBytes(data) + if err != nil { + return nil, err + } + + // Convert to the config version to ensure plugins get prevResult + // in the same version as the config. The cached result version + // should match the config version unless the config was changed + // while the container was running. + result, err = result.GetAsVersion(cniVersion) + if err != nil { + return nil, fmt.Errorf("failed to convert cached result to config version %q: %w", cniVersion, err) + } + return result, nil +} + +func (c *CNIConfig) getCachedResult(netName, cniVersion string, rt *RuntimeConf) (types.Result, error) { + fname, err := c.getCacheFilePath(netName, rt) + if err != nil { + return nil, err + } + fdata, err := ioutil.ReadFile(fname) + if err != nil { + // Ignore read errors; the cached result may not exist on-disk + return nil, nil + } + + cachedInfo := cachedInfo{} + if err := json.Unmarshal(fdata, &cachedInfo); err != nil || cachedInfo.Kind != CNICacheV1 { + return c.getLegacyCachedResult(netName, cniVersion, rt) + } + + newBytes, err := json.Marshal(&cachedInfo.RawResult) + if err != nil { + return nil, fmt.Errorf("failed to marshal cached network %q config: %w", netName, err) + } + + // Load the cached result + result, err := create.CreateFromBytes(newBytes) + if err != nil { + return nil, err + } + + // Convert to the config version to ensure plugins get prevResult + // in the same version as the config. The cached result version + // should match the config version unless the config was changed + // while the container was running. + result, err = result.GetAsVersion(cniVersion) + if err != nil { + return nil, fmt.Errorf("failed to convert cached result to config version %q: %w", cniVersion, err) + } + return result, nil +} + +// GetNetworkListCachedResult returns the cached Result of the previous +// AddNetworkList() operation for a network list, or an error. +func (c *CNIConfig) GetNetworkListCachedResult(list *NetworkConfigList, rt *RuntimeConf) (types.Result, error) { + return c.getCachedResult(list.Name, list.CNIVersion, rt) +} + +// GetNetworkCachedResult returns the cached Result of the previous +// AddNetwork() operation for a network, or an error. +func (c *CNIConfig) GetNetworkCachedResult(net *NetworkConfig, rt *RuntimeConf) (types.Result, error) { + return c.getCachedResult(net.Network.Name, net.Network.CNIVersion, rt) +} + +// GetNetworkListCachedConfig copies the input RuntimeConf to output +// RuntimeConf with fields updated with info from the cached Config. +func (c *CNIConfig) GetNetworkListCachedConfig(list *NetworkConfigList, rt *RuntimeConf) ([]byte, *RuntimeConf, error) { + return c.getCachedConfig(list.Name, rt) +} + +// GetNetworkCachedConfig copies the input RuntimeConf to output +// RuntimeConf with fields updated with info from the cached Config. +func (c *CNIConfig) GetNetworkCachedConfig(net *NetworkConfig, rt *RuntimeConf) ([]byte, *RuntimeConf, error) { + return c.getCachedConfig(net.Network.Name, rt) +} + +func (c *CNIConfig) addNetwork(ctx context.Context, name, cniVersion string, net *NetworkConfig, prevResult types.Result, rt *RuntimeConf) (types.Result, error) { + c.ensureExec() + pluginPath, err := c.exec.FindInPath(net.Network.Type, c.Path) + if err != nil { + return nil, err + } + if err := utils.ValidateContainerID(rt.ContainerID); err != nil { + return nil, err + } + if err := utils.ValidateNetworkName(name); err != nil { + return nil, err + } + if err := utils.ValidateInterfaceName(rt.IfName); err != nil { + return nil, err + } + + newConf, err := buildOneConfig(name, cniVersion, net, prevResult, rt) + if err != nil { + return nil, err + } + + return invoke.ExecPluginWithResult(ctx, pluginPath, newConf.Bytes, c.args("ADD", rt), c.exec) +} + +// AddNetworkList executes a sequence of plugins with the ADD command +func (c *CNIConfig) AddNetworkList(ctx context.Context, list *NetworkConfigList, rt *RuntimeConf) (types.Result, error) { + var err error + var result types.Result + for _, net := range list.Plugins { + result, err = c.addNetwork(ctx, list.Name, list.CNIVersion, net, result, rt) + if err != nil { + return nil, fmt.Errorf("plugin %s failed (add): %w", pluginDescription(net.Network), err) + } + } + + if err = c.cacheAdd(result, list.Bytes, list.Name, rt); err != nil { + return nil, fmt.Errorf("failed to set network %q cached result: %w", list.Name, err) + } + + return result, nil +} + +func (c *CNIConfig) checkNetwork(ctx context.Context, name, cniVersion string, net *NetworkConfig, prevResult types.Result, rt *RuntimeConf) error { + c.ensureExec() + pluginPath, err := c.exec.FindInPath(net.Network.Type, c.Path) + if err != nil { + return err + } + + newConf, err := buildOneConfig(name, cniVersion, net, prevResult, rt) + if err != nil { + return err + } + + return invoke.ExecPluginWithoutResult(ctx, pluginPath, newConf.Bytes, c.args("CHECK", rt), c.exec) +} + +// CheckNetworkList executes a sequence of plugins with the CHECK command +func (c *CNIConfig) CheckNetworkList(ctx context.Context, list *NetworkConfigList, rt *RuntimeConf) error { + // CHECK was added in CNI spec version 0.4.0 and higher + if gtet, err := version.GreaterThanOrEqualTo(list.CNIVersion, "0.4.0"); err != nil { + return err + } else if !gtet { + return fmt.Errorf("configuration version %q does not support the CHECK command", list.CNIVersion) + } + + if list.DisableCheck { + return nil + } + + cachedResult, err := c.getCachedResult(list.Name, list.CNIVersion, rt) + if err != nil { + return fmt.Errorf("failed to get network %q cached result: %w", list.Name, err) + } + + for _, net := range list.Plugins { + if err := c.checkNetwork(ctx, list.Name, list.CNIVersion, net, cachedResult, rt); err != nil { + return err + } + } + + return nil +} + +func (c *CNIConfig) delNetwork(ctx context.Context, name, cniVersion string, net *NetworkConfig, prevResult types.Result, rt *RuntimeConf) error { + c.ensureExec() + pluginPath, err := c.exec.FindInPath(net.Network.Type, c.Path) + if err != nil { + return err + } + + newConf, err := buildOneConfig(name, cniVersion, net, prevResult, rt) + if err != nil { + return err + } + + return invoke.ExecPluginWithoutResult(ctx, pluginPath, newConf.Bytes, c.args("DEL", rt), c.exec) +} + +// DelNetworkList executes a sequence of plugins with the DEL command +func (c *CNIConfig) DelNetworkList(ctx context.Context, list *NetworkConfigList, rt *RuntimeConf) error { + var cachedResult types.Result + + // Cached result on DEL was added in CNI spec version 0.4.0 and higher + if gtet, err := version.GreaterThanOrEqualTo(list.CNIVersion, "0.4.0"); err != nil { + return err + } else if gtet { + cachedResult, err = c.getCachedResult(list.Name, list.CNIVersion, rt) + if err != nil { + return fmt.Errorf("failed to get network %q cached result: %w", list.Name, err) + } + } + + for i := len(list.Plugins) - 1; i >= 0; i-- { + net := list.Plugins[i] + if err := c.delNetwork(ctx, list.Name, list.CNIVersion, net, cachedResult, rt); err != nil { + return fmt.Errorf("plugin %s failed (delete): %w", pluginDescription(net.Network), err) + } + } + _ = c.cacheDel(list.Name, rt) + + return nil +} + +func pluginDescription(net *types.NetConf) string { + if net == nil { + return "" + } + pluginType := net.Type + out := fmt.Sprintf("type=%q", pluginType) + name := net.Name + if name != "" { + out += fmt.Sprintf(" name=%q", name) + } + return out +} + +// AddNetwork executes the plugin with the ADD command +func (c *CNIConfig) AddNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) (types.Result, error) { + result, err := c.addNetwork(ctx, net.Network.Name, net.Network.CNIVersion, net, nil, rt) + if err != nil { + return nil, err + } + + if err = c.cacheAdd(result, net.Bytes, net.Network.Name, rt); err != nil { + return nil, fmt.Errorf("failed to set network %q cached result: %w", net.Network.Name, err) + } + + return result, nil +} + +// CheckNetwork executes the plugin with the CHECK command +func (c *CNIConfig) CheckNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error { + // CHECK was added in CNI spec version 0.4.0 and higher + if gtet, err := version.GreaterThanOrEqualTo(net.Network.CNIVersion, "0.4.0"); err != nil { + return err + } else if !gtet { + return fmt.Errorf("configuration version %q does not support the CHECK command", net.Network.CNIVersion) + } + + cachedResult, err := c.getCachedResult(net.Network.Name, net.Network.CNIVersion, rt) + if err != nil { + return fmt.Errorf("failed to get network %q cached result: %w", net.Network.Name, err) + } + return c.checkNetwork(ctx, net.Network.Name, net.Network.CNIVersion, net, cachedResult, rt) +} + +// DelNetwork executes the plugin with the DEL command +func (c *CNIConfig) DelNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error { + var cachedResult types.Result + + // Cached result on DEL was added in CNI spec version 0.4.0 and higher + if gtet, err := version.GreaterThanOrEqualTo(net.Network.CNIVersion, "0.4.0"); err != nil { + return err + } else if gtet { + cachedResult, err = c.getCachedResult(net.Network.Name, net.Network.CNIVersion, rt) + if err != nil { + return fmt.Errorf("failed to get network %q cached result: %w", net.Network.Name, err) + } + } + + if err := c.delNetwork(ctx, net.Network.Name, net.Network.CNIVersion, net, cachedResult, rt); err != nil { + return err + } + _ = c.cacheDel(net.Network.Name, rt) + return nil +} + +// ValidateNetworkList checks that a configuration is reasonably valid. +// - all the specified plugins exist on disk +// - every plugin supports the desired version. +// +// Returns a list of all capabilities supported by the configuration, or error +func (c *CNIConfig) ValidateNetworkList(ctx context.Context, list *NetworkConfigList) ([]string, error) { + version := list.CNIVersion + + // holding map for seen caps (in case of duplicates) + caps := map[string]interface{}{} + + errs := []error{} + for _, net := range list.Plugins { + if err := c.validatePlugin(ctx, net.Network.Type, version); err != nil { + errs = append(errs, err) + } + for c, enabled := range net.Network.Capabilities { + if !enabled { + continue + } + caps[c] = struct{}{} + } + } + + if len(errs) > 0 { + return nil, fmt.Errorf("%v", errs) + } + + // make caps list + cc := make([]string, 0, len(caps)) + for c := range caps { + cc = append(cc, c) + } + + return cc, nil +} + +// ValidateNetwork checks that a configuration is reasonably valid. +// It uses the same logic as ValidateNetworkList) +// Returns a list of capabilities +func (c *CNIConfig) ValidateNetwork(ctx context.Context, net *NetworkConfig) ([]string, error) { + caps := []string{} + for c, ok := range net.Network.Capabilities { + if ok { + caps = append(caps, c) + } + } + if err := c.validatePlugin(ctx, net.Network.Type, net.Network.CNIVersion); err != nil { + return nil, err + } + return caps, nil +} + +// validatePlugin checks that an individual plugin's configuration is sane +func (c *CNIConfig) validatePlugin(ctx context.Context, pluginName, expectedVersion string) error { + c.ensureExec() + pluginPath, err := c.exec.FindInPath(pluginName, c.Path) + if err != nil { + return err + } + if expectedVersion == "" { + expectedVersion = "0.1.0" + } + + vi, err := invoke.GetVersionInfo(ctx, pluginPath, c.exec) + if err != nil { + return err + } + for _, vers := range vi.SupportedVersions() { + if vers == expectedVersion { + return nil + } + } + return fmt.Errorf("plugin %s does not support config version %q", pluginName, expectedVersion) +} + +// GetVersionInfo reports which versions of the CNI spec are supported by +// the given plugin. +func (c *CNIConfig) GetVersionInfo(ctx context.Context, pluginType string) (version.PluginInfo, error) { + c.ensureExec() + pluginPath, err := c.exec.FindInPath(pluginType, c.Path) + if err != nil { + return nil, err + } + + return invoke.GetVersionInfo(ctx, pluginPath, c.exec) +} + +// ===== +func (c *CNIConfig) args(action string, rt *RuntimeConf) *invoke.Args { + return &invoke.Args{ + Command: action, + ContainerID: rt.ContainerID, + NetNS: rt.NetNS, + PluginArgs: rt.Args, + IfName: rt.IfName, + Path: strings.Join(c.Path, string(os.PathListSeparator)), + } +} diff --git a/vendor/github.com/containernetworking/cni/libcni/conf.go b/vendor/github.com/containernetworking/cni/libcni/conf.go new file mode 100644 index 00000000000..d28135ff3cc --- /dev/null +++ b/vendor/github.com/containernetworking/cni/libcni/conf.go @@ -0,0 +1,268 @@ +// Copyright 2015 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package libcni + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sort" +) + +type NotFoundError struct { + Dir string + Name string +} + +func (e NotFoundError) Error() string { + return fmt.Sprintf(`no net configuration with name "%s" in %s`, e.Name, e.Dir) +} + +type NoConfigsFoundError struct { + Dir string +} + +func (e NoConfigsFoundError) Error() string { + return fmt.Sprintf(`no net configurations found in %s`, e.Dir) +} + +func ConfFromBytes(bytes []byte) (*NetworkConfig, error) { + conf := &NetworkConfig{Bytes: bytes} + if err := json.Unmarshal(bytes, &conf.Network); err != nil { + return nil, fmt.Errorf("error parsing configuration: %w", err) + } + if conf.Network.Type == "" { + return nil, fmt.Errorf("error parsing configuration: missing 'type'") + } + return conf, nil +} + +func ConfFromFile(filename string) (*NetworkConfig, error) { + bytes, err := ioutil.ReadFile(filename) + if err != nil { + return nil, fmt.Errorf("error reading %s: %w", filename, err) + } + return ConfFromBytes(bytes) +} + +func ConfListFromBytes(bytes []byte) (*NetworkConfigList, error) { + rawList := make(map[string]interface{}) + if err := json.Unmarshal(bytes, &rawList); err != nil { + return nil, fmt.Errorf("error parsing configuration list: %w", err) + } + + rawName, ok := rawList["name"] + if !ok { + return nil, fmt.Errorf("error parsing configuration list: no name") + } + name, ok := rawName.(string) + if !ok { + return nil, fmt.Errorf("error parsing configuration list: invalid name type %T", rawName) + } + + var cniVersion string + rawVersion, ok := rawList["cniVersion"] + if ok { + cniVersion, ok = rawVersion.(string) + if !ok { + return nil, fmt.Errorf("error parsing configuration list: invalid cniVersion type %T", rawVersion) + } + } + + disableCheck := false + if rawDisableCheck, ok := rawList["disableCheck"]; ok { + disableCheck, ok = rawDisableCheck.(bool) + if !ok { + return nil, fmt.Errorf("error parsing configuration list: invalid disableCheck type %T", rawDisableCheck) + } + } + + list := &NetworkConfigList{ + Name: name, + DisableCheck: disableCheck, + CNIVersion: cniVersion, + Bytes: bytes, + } + + var plugins []interface{} + plug, ok := rawList["plugins"] + if !ok { + return nil, fmt.Errorf("error parsing configuration list: no 'plugins' key") + } + plugins, ok = plug.([]interface{}) + if !ok { + return nil, fmt.Errorf("error parsing configuration list: invalid 'plugins' type %T", plug) + } + if len(plugins) == 0 { + return nil, fmt.Errorf("error parsing configuration list: no plugins in list") + } + + for i, conf := range plugins { + newBytes, err := json.Marshal(conf) + if err != nil { + return nil, fmt.Errorf("failed to marshal plugin config %d: %w", i, err) + } + netConf, err := ConfFromBytes(newBytes) + if err != nil { + return nil, fmt.Errorf("failed to parse plugin config %d: %w", i, err) + } + list.Plugins = append(list.Plugins, netConf) + } + + return list, nil +} + +func ConfListFromFile(filename string) (*NetworkConfigList, error) { + bytes, err := ioutil.ReadFile(filename) + if err != nil { + return nil, fmt.Errorf("error reading %s: %w", filename, err) + } + return ConfListFromBytes(bytes) +} + +func ConfFiles(dir string, extensions []string) ([]string, error) { + // In part, adapted from rkt/networking/podenv.go#listFiles + files, err := ioutil.ReadDir(dir) + switch { + case err == nil: // break + case os.IsNotExist(err): + return nil, nil + default: + return nil, err + } + + confFiles := []string{} + for _, f := range files { + if f.IsDir() { + continue + } + fileExt := filepath.Ext(f.Name()) + for _, ext := range extensions { + if fileExt == ext { + confFiles = append(confFiles, filepath.Join(dir, f.Name())) + } + } + } + return confFiles, nil +} + +func LoadConf(dir, name string) (*NetworkConfig, error) { + files, err := ConfFiles(dir, []string{".conf", ".json"}) + switch { + case err != nil: + return nil, err + case len(files) == 0: + return nil, NoConfigsFoundError{Dir: dir} + } + sort.Strings(files) + + for _, confFile := range files { + conf, err := ConfFromFile(confFile) + if err != nil { + return nil, err + } + if conf.Network.Name == name { + return conf, nil + } + } + return nil, NotFoundError{dir, name} +} + +func LoadConfList(dir, name string) (*NetworkConfigList, error) { + files, err := ConfFiles(dir, []string{".conflist"}) + if err != nil { + return nil, err + } + sort.Strings(files) + + for _, confFile := range files { + conf, err := ConfListFromFile(confFile) + if err != nil { + return nil, err + } + if conf.Name == name { + return conf, nil + } + } + + // Try and load a network configuration file (instead of list) + // from the same name, then upconvert. + singleConf, err := LoadConf(dir, name) + if err != nil { + // A little extra logic so the error makes sense + if _, ok := err.(NoConfigsFoundError); len(files) != 0 && ok { + // Config lists found but no config files found + return nil, NotFoundError{dir, name} + } + + return nil, err + } + return ConfListFromConf(singleConf) +} + +func InjectConf(original *NetworkConfig, newValues map[string]interface{}) (*NetworkConfig, error) { + config := make(map[string]interface{}) + err := json.Unmarshal(original.Bytes, &config) + if err != nil { + return nil, fmt.Errorf("unmarshal existing network bytes: %w", err) + } + + for key, value := range newValues { + if key == "" { + return nil, fmt.Errorf("keys cannot be empty") + } + + if value == nil { + return nil, fmt.Errorf("key '%s' value must not be nil", key) + } + + config[key] = value + } + + newBytes, err := json.Marshal(config) + if err != nil { + return nil, err + } + + return ConfFromBytes(newBytes) +} + +// ConfListFromConf "upconverts" a network config in to a NetworkConfigList, +// with the single network as the only entry in the list. +func ConfListFromConf(original *NetworkConfig) (*NetworkConfigList, error) { + // Re-deserialize the config's json, then make a raw map configlist. + // This may seem a bit strange, but it's to make the Bytes fields + // actually make sense. Otherwise, the generated json is littered with + // golang default values. + + rawConfig := make(map[string]interface{}) + if err := json.Unmarshal(original.Bytes, &rawConfig); err != nil { + return nil, err + } + + rawConfigList := map[string]interface{}{ + "name": original.Network.Name, + "cniVersion": original.Network.CNIVersion, + "plugins": []interface{}{rawConfig}, + } + + b, err := json.Marshal(rawConfigList) + if err != nil { + return nil, err + } + return ConfListFromBytes(b) +} diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/args.go b/vendor/github.com/containernetworking/cni/pkg/invoke/args.go new file mode 100644 index 00000000000..3cdb4bc8dad --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/invoke/args.go @@ -0,0 +1,128 @@ +// Copyright 2015 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package invoke + +import ( + "fmt" + "os" + "strings" +) + +type CNIArgs interface { + // For use with os/exec; i.e., return nil to inherit the + // environment from this process + // For use in delegation; inherit the environment from this + // process and allow overrides + AsEnv() []string +} + +type inherited struct{} + +var inheritArgsFromEnv inherited + +func (*inherited) AsEnv() []string { + return nil +} + +func ArgsFromEnv() CNIArgs { + return &inheritArgsFromEnv +} + +type Args struct { + Command string + ContainerID string + NetNS string + PluginArgs [][2]string + PluginArgsStr string + IfName string + Path string +} + +// Args implements the CNIArgs interface +var _ CNIArgs = &Args{} + +func (args *Args) AsEnv() []string { + env := os.Environ() + pluginArgsStr := args.PluginArgsStr + if pluginArgsStr == "" { + pluginArgsStr = stringify(args.PluginArgs) + } + + // Duplicated values which come first will be overridden, so we must put the + // custom values in the end to avoid being overridden by the process environments. + env = append(env, + "CNI_COMMAND="+args.Command, + "CNI_CONTAINERID="+args.ContainerID, + "CNI_NETNS="+args.NetNS, + "CNI_ARGS="+pluginArgsStr, + "CNI_IFNAME="+args.IfName, + "CNI_PATH="+args.Path, + ) + return dedupEnv(env) +} + +// taken from rkt/networking/net_plugin.go +func stringify(pluginArgs [][2]string) string { + entries := make([]string, len(pluginArgs)) + + for i, kv := range pluginArgs { + entries[i] = strings.Join(kv[:], "=") + } + + return strings.Join(entries, ";") +} + +// DelegateArgs implements the CNIArgs interface +// used for delegation to inherit from environments +// and allow some overrides like CNI_COMMAND +var _ CNIArgs = &DelegateArgs{} + +type DelegateArgs struct { + Command string +} + +func (d *DelegateArgs) AsEnv() []string { + env := os.Environ() + + // The custom values should come in the end to override the existing + // process environment of the same key. + env = append(env, + "CNI_COMMAND="+d.Command, + ) + return dedupEnv(env) +} + +// dedupEnv returns a copy of env with any duplicates removed, in favor of later values. +// Items not of the normal environment "key=value" form are preserved unchanged. +func dedupEnv(env []string) []string { + out := make([]string, 0, len(env)) + envMap := map[string]string{} + + for _, kv := range env { + // find the first "=" in environment, if not, just keep it + eq := strings.Index(kv, "=") + if eq < 0 { + out = append(out, kv) + continue + } + envMap[kv[:eq]] = kv[eq+1:] + } + + for k, v := range envMap { + out = append(out, fmt.Sprintf("%s=%s", k, v)) + } + + return out +} diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go b/vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go new file mode 100644 index 00000000000..8defe4dd398 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go @@ -0,0 +1,80 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package invoke + +import ( + "context" + "os" + "path/filepath" + + "github.com/containernetworking/cni/pkg/types" +) + +func delegateCommon(delegatePlugin string, exec Exec) (string, Exec, error) { + if exec == nil { + exec = defaultExec + } + + paths := filepath.SplitList(os.Getenv("CNI_PATH")) + pluginPath, err := exec.FindInPath(delegatePlugin, paths) + if err != nil { + return "", nil, err + } + + return pluginPath, exec, nil +} + +// DelegateAdd calls the given delegate plugin with the CNI ADD action and +// JSON configuration +func DelegateAdd(ctx context.Context, delegatePlugin string, netconf []byte, exec Exec) (types.Result, error) { + pluginPath, realExec, err := delegateCommon(delegatePlugin, exec) + if err != nil { + return nil, err + } + + // DelegateAdd will override the original "CNI_COMMAND" env from process with ADD + return ExecPluginWithResult(ctx, pluginPath, netconf, delegateArgs("ADD"), realExec) +} + +// DelegateCheck calls the given delegate plugin with the CNI CHECK action and +// JSON configuration +func DelegateCheck(ctx context.Context, delegatePlugin string, netconf []byte, exec Exec) error { + pluginPath, realExec, err := delegateCommon(delegatePlugin, exec) + if err != nil { + return err + } + + // DelegateCheck will override the original CNI_COMMAND env from process with CHECK + return ExecPluginWithoutResult(ctx, pluginPath, netconf, delegateArgs("CHECK"), realExec) +} + +// DelegateDel calls the given delegate plugin with the CNI DEL action and +// JSON configuration +func DelegateDel(ctx context.Context, delegatePlugin string, netconf []byte, exec Exec) error { + pluginPath, realExec, err := delegateCommon(delegatePlugin, exec) + if err != nil { + return err + } + + // DelegateDel will override the original CNI_COMMAND env from process with DEL + return ExecPluginWithoutResult(ctx, pluginPath, netconf, delegateArgs("DEL"), realExec) +} + +// return CNIArgs used by delegation +func delegateArgs(action string) *DelegateArgs { + return &DelegateArgs{ + Command: action, + } +} diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go b/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go new file mode 100644 index 00000000000..e79bffe63eb --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go @@ -0,0 +1,138 @@ +// Copyright 2015 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package invoke + +import ( + "context" + "fmt" + "os" + + "github.com/containernetworking/cni/pkg/types" + "github.com/containernetworking/cni/pkg/types/create" + "github.com/containernetworking/cni/pkg/version" +) + +// Exec is an interface encapsulates all operations that deal with finding +// and executing a CNI plugin. Tests may provide a fake implementation +// to avoid writing fake plugins to temporary directories during the test. +type Exec interface { + ExecPlugin(ctx context.Context, pluginPath string, stdinData []byte, environ []string) ([]byte, error) + FindInPath(plugin string, paths []string) (string, error) + Decode(jsonBytes []byte) (version.PluginInfo, error) +} + +// For example, a testcase could pass an instance of the following fakeExec +// object to ExecPluginWithResult() to verify the incoming stdin and environment +// and provide a tailored response: +// +//import ( +// "encoding/json" +// "path" +// "strings" +//) +// +//type fakeExec struct { +// version.PluginDecoder +//} +// +//func (f *fakeExec) ExecPlugin(pluginPath string, stdinData []byte, environ []string) ([]byte, error) { +// net := &types.NetConf{} +// err := json.Unmarshal(stdinData, net) +// if err != nil { +// return nil, fmt.Errorf("failed to unmarshal configuration: %v", err) +// } +// pluginName := path.Base(pluginPath) +// if pluginName != net.Type { +// return nil, fmt.Errorf("plugin name %q did not match config type %q", pluginName, net.Type) +// } +// for _, e := range environ { +// // Check environment for forced failure request +// parts := strings.Split(e, "=") +// if len(parts) > 0 && parts[0] == "FAIL" { +// return nil, fmt.Errorf("failed to execute plugin %s", pluginName) +// } +// } +// return []byte("{\"CNIVersion\":\"0.4.0\"}"), nil +//} +// +//func (f *fakeExec) FindInPath(plugin string, paths []string) (string, error) { +// if len(paths) > 0 { +// return path.Join(paths[0], plugin), nil +// } +// return "", fmt.Errorf("failed to find plugin %s in paths %v", plugin, paths) +//} + +func ExecPluginWithResult(ctx context.Context, pluginPath string, netconf []byte, args CNIArgs, exec Exec) (types.Result, error) { + if exec == nil { + exec = defaultExec + } + + stdoutBytes, err := exec.ExecPlugin(ctx, pluginPath, netconf, args.AsEnv()) + if err != nil { + return nil, err + } + + return create.CreateFromBytes(stdoutBytes) +} + +func ExecPluginWithoutResult(ctx context.Context, pluginPath string, netconf []byte, args CNIArgs, exec Exec) error { + if exec == nil { + exec = defaultExec + } + _, err := exec.ExecPlugin(ctx, pluginPath, netconf, args.AsEnv()) + return err +} + +// GetVersionInfo returns the version information available about the plugin. +// For recent-enough plugins, it uses the information returned by the VERSION +// command. For older plugins which do not recognize that command, it reports +// version 0.1.0 +func GetVersionInfo(ctx context.Context, pluginPath string, exec Exec) (version.PluginInfo, error) { + if exec == nil { + exec = defaultExec + } + args := &Args{ + Command: "VERSION", + + // set fake values required by plugins built against an older version of skel + NetNS: "dummy", + IfName: "dummy", + Path: "dummy", + } + stdin := []byte(fmt.Sprintf(`{"cniVersion":%q}`, version.Current())) + stdoutBytes, err := exec.ExecPlugin(ctx, pluginPath, stdin, args.AsEnv()) + if err != nil { + if err.Error() == "unknown CNI_COMMAND: VERSION" { + return version.PluginSupports("0.1.0"), nil + } + return nil, err + } + + return exec.Decode(stdoutBytes) +} + +// DefaultExec is an object that implements the Exec interface which looks +// for and executes plugins from disk. +type DefaultExec struct { + *RawExec + version.PluginDecoder +} + +// DefaultExec implements the Exec interface +var _ Exec = &DefaultExec{} + +var defaultExec = &DefaultExec{ + RawExec: &RawExec{Stderr: os.Stderr}, +} diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/find.go b/vendor/github.com/containernetworking/cni/pkg/invoke/find.go new file mode 100644 index 00000000000..e62029eb788 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/invoke/find.go @@ -0,0 +1,48 @@ +// Copyright 2015 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package invoke + +import ( + "fmt" + "os" + "path/filepath" + "strings" +) + +// FindInPath returns the full path of the plugin by searching in the provided path +func FindInPath(plugin string, paths []string) (string, error) { + if plugin == "" { + return "", fmt.Errorf("no plugin name provided") + } + + if strings.ContainsRune(plugin, os.PathSeparator) { + return "", fmt.Errorf("invalid plugin name: %s", plugin) + } + + if len(paths) == 0 { + return "", fmt.Errorf("no paths provided") + } + + for _, path := range paths { + for _, fe := range ExecutableFileExtensions { + fullpath := filepath.Join(path, plugin) + fe + if fi, err := os.Stat(fullpath); err == nil && fi.Mode().IsRegular() { + return fullpath, nil + } + } + } + + return "", fmt.Errorf("failed to find plugin %q in path %s", plugin, paths) +} diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/os_unix.go b/vendor/github.com/containernetworking/cni/pkg/invoke/os_unix.go new file mode 100644 index 00000000000..9bcfb455367 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/invoke/os_unix.go @@ -0,0 +1,20 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package invoke + +// Valid file extensions for plugin executables. +var ExecutableFileExtensions = []string{""} diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/os_windows.go b/vendor/github.com/containernetworking/cni/pkg/invoke/os_windows.go new file mode 100644 index 00000000000..7665125b133 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/invoke/os_windows.go @@ -0,0 +1,18 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package invoke + +// Valid file extensions for plugin executables. +var ExecutableFileExtensions = []string{".exe", ""} diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go b/vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go new file mode 100644 index 00000000000..5ab5cc88576 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go @@ -0,0 +1,88 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package invoke + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "os/exec" + "strings" + "time" + + "github.com/containernetworking/cni/pkg/types" +) + +type RawExec struct { + Stderr io.Writer +} + +func (e *RawExec) ExecPlugin(ctx context.Context, pluginPath string, stdinData []byte, environ []string) ([]byte, error) { + stdout := &bytes.Buffer{} + stderr := &bytes.Buffer{} + c := exec.CommandContext(ctx, pluginPath) + c.Env = environ + c.Stdin = bytes.NewBuffer(stdinData) + c.Stdout = stdout + c.Stderr = stderr + + // Retry the command on "text file busy" errors + for i := 0; i <= 5; i++ { + err := c.Run() + + // Command succeeded + if err == nil { + break + } + + // If the plugin is currently about to be written, then we wait a + // second and try it again + if strings.Contains(err.Error(), "text file busy") { + time.Sleep(time.Second) + continue + } + + // All other errors except than the busy text file + return nil, e.pluginErr(err, stdout.Bytes(), stderr.Bytes()) + } + + // Copy stderr to caller's buffer in case plugin printed to both + // stdout and stderr for some reason. Ignore failures as stderr is + // only informational. + if e.Stderr != nil && stderr.Len() > 0 { + _, _ = stderr.WriteTo(e.Stderr) + } + return stdout.Bytes(), nil +} + +func (e *RawExec) pluginErr(err error, stdout, stderr []byte) error { + emsg := types.Error{} + if len(stdout) == 0 { + if len(stderr) == 0 { + emsg.Msg = fmt.Sprintf("netplugin failed with no error message: %v", err) + } else { + emsg.Msg = fmt.Sprintf("netplugin failed: %q", string(stderr)) + } + } else if perr := json.Unmarshal(stdout, &emsg); perr != nil { + emsg.Msg = fmt.Sprintf("netplugin failed but error parsing its diagnostic message %q: %v", string(stdout), perr) + } + return &emsg +} + +func (e *RawExec) FindInPath(plugin string, paths []string) (string, error) { + return FindInPath(plugin, paths) +} diff --git a/vendor/github.com/containernetworking/cni/pkg/types/020/types.go b/vendor/github.com/containernetworking/cni/pkg/types/020/types.go new file mode 100644 index 00000000000..99b151ff240 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/types/020/types.go @@ -0,0 +1,189 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types020 + +import ( + "encoding/json" + "fmt" + "io" + "net" + "os" + + "github.com/containernetworking/cni/pkg/types" + convert "github.com/containernetworking/cni/pkg/types/internal" +) + +const ImplementedSpecVersion string = "0.2.0" + +var supportedVersions = []string{"", "0.1.0", ImplementedSpecVersion} + +// Register converters for all versions less than the implemented spec version +func init() { + convert.RegisterConverter("0.1.0", []string{ImplementedSpecVersion}, convertFrom010) + convert.RegisterConverter(ImplementedSpecVersion, []string{"0.1.0"}, convertTo010) + + // Creator + convert.RegisterCreator(supportedVersions, NewResult) +} + +// Compatibility types for CNI version 0.1.0 and 0.2.0 + +// NewResult creates a new Result object from JSON data. The JSON data +// must be compatible with the CNI versions implemented by this type. +func NewResult(data []byte) (types.Result, error) { + result := &Result{} + if err := json.Unmarshal(data, result); err != nil { + return nil, err + } + for _, v := range supportedVersions { + if result.CNIVersion == v { + if result.CNIVersion == "" { + result.CNIVersion = "0.1.0" + } + return result, nil + } + } + return nil, fmt.Errorf("result type supports %v but unmarshalled CNIVersion is %q", + supportedVersions, result.CNIVersion) +} + +// GetResult converts the given Result object to the ImplementedSpecVersion +// and returns the concrete type or an error +func GetResult(r types.Result) (*Result, error) { + result020, err := convert.Convert(r, ImplementedSpecVersion) + if err != nil { + return nil, err + } + result, ok := result020.(*Result) + if !ok { + return nil, fmt.Errorf("failed to convert result") + } + return result, nil +} + +func convertFrom010(from types.Result, toVersion string) (types.Result, error) { + if toVersion != "0.2.0" { + panic("only converts to version 0.2.0") + } + fromResult := from.(*Result) + return &Result{ + CNIVersion: ImplementedSpecVersion, + IP4: fromResult.IP4.Copy(), + IP6: fromResult.IP6.Copy(), + DNS: *fromResult.DNS.Copy(), + }, nil +} + +func convertTo010(from types.Result, toVersion string) (types.Result, error) { + if toVersion != "0.1.0" { + panic("only converts to version 0.1.0") + } + fromResult := from.(*Result) + return &Result{ + CNIVersion: "0.1.0", + IP4: fromResult.IP4.Copy(), + IP6: fromResult.IP6.Copy(), + DNS: *fromResult.DNS.Copy(), + }, nil +} + +// Result is what gets returned from the plugin (via stdout) to the caller +type Result struct { + CNIVersion string `json:"cniVersion,omitempty"` + IP4 *IPConfig `json:"ip4,omitempty"` + IP6 *IPConfig `json:"ip6,omitempty"` + DNS types.DNS `json:"dns,omitempty"` +} + +func (r *Result) Version() string { + return r.CNIVersion +} + +func (r *Result) GetAsVersion(version string) (types.Result, error) { + // If the creator of the result did not set the CNIVersion, assume it + // should be the highest spec version implemented by this Result + if r.CNIVersion == "" { + r.CNIVersion = ImplementedSpecVersion + } + return convert.Convert(r, version) +} + +func (r *Result) Print() error { + return r.PrintTo(os.Stdout) +} + +func (r *Result) PrintTo(writer io.Writer) error { + data, err := json.MarshalIndent(r, "", " ") + if err != nil { + return err + } + _, err = writer.Write(data) + return err +} + +// IPConfig contains values necessary to configure an interface +type IPConfig struct { + IP net.IPNet + Gateway net.IP + Routes []types.Route +} + +func (i *IPConfig) Copy() *IPConfig { + if i == nil { + return nil + } + + var routes []types.Route + for _, fromRoute := range i.Routes { + routes = append(routes, *fromRoute.Copy()) + } + return &IPConfig{ + IP: i.IP, + Gateway: i.Gateway, + Routes: routes, + } +} + +// net.IPNet is not JSON (un)marshallable so this duality is needed +// for our custom IPNet type + +// JSON (un)marshallable types +type ipConfig struct { + IP types.IPNet `json:"ip"` + Gateway net.IP `json:"gateway,omitempty"` + Routes []types.Route `json:"routes,omitempty"` +} + +func (c *IPConfig) MarshalJSON() ([]byte, error) { + ipc := ipConfig{ + IP: types.IPNet(c.IP), + Gateway: c.Gateway, + Routes: c.Routes, + } + + return json.Marshal(ipc) +} + +func (c *IPConfig) UnmarshalJSON(data []byte) error { + ipc := ipConfig{} + if err := json.Unmarshal(data, &ipc); err != nil { + return err + } + + c.IP = net.IPNet(ipc.IP) + c.Gateway = ipc.Gateway + c.Routes = ipc.Routes + return nil +} diff --git a/vendor/github.com/containernetworking/cni/pkg/types/040/types.go b/vendor/github.com/containernetworking/cni/pkg/types/040/types.go new file mode 100644 index 00000000000..3633b0eaa3a --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/types/040/types.go @@ -0,0 +1,306 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types040 + +import ( + "encoding/json" + "fmt" + "io" + "net" + "os" + + "github.com/containernetworking/cni/pkg/types" + types020 "github.com/containernetworking/cni/pkg/types/020" + convert "github.com/containernetworking/cni/pkg/types/internal" +) + +const ImplementedSpecVersion string = "0.4.0" + +var supportedVersions = []string{"0.3.0", "0.3.1", ImplementedSpecVersion} + +// Register converters for all versions less than the implemented spec version +func init() { + // Up-converters + convert.RegisterConverter("0.1.0", supportedVersions, convertFrom02x) + convert.RegisterConverter("0.2.0", supportedVersions, convertFrom02x) + convert.RegisterConverter("0.3.0", supportedVersions, convertInternal) + convert.RegisterConverter("0.3.1", supportedVersions, convertInternal) + + // Down-converters + convert.RegisterConverter("0.4.0", []string{"0.3.0", "0.3.1"}, convertInternal) + convert.RegisterConverter("0.4.0", []string{"0.1.0", "0.2.0"}, convertTo02x) + convert.RegisterConverter("0.3.1", []string{"0.1.0", "0.2.0"}, convertTo02x) + convert.RegisterConverter("0.3.0", []string{"0.1.0", "0.2.0"}, convertTo02x) + + // Creator + convert.RegisterCreator(supportedVersions, NewResult) +} + +func NewResult(data []byte) (types.Result, error) { + result := &Result{} + if err := json.Unmarshal(data, result); err != nil { + return nil, err + } + for _, v := range supportedVersions { + if result.CNIVersion == v { + return result, nil + } + } + return nil, fmt.Errorf("result type supports %v but unmarshalled CNIVersion is %q", + supportedVersions, result.CNIVersion) +} + +func GetResult(r types.Result) (*Result, error) { + resultCurrent, err := r.GetAsVersion(ImplementedSpecVersion) + if err != nil { + return nil, err + } + result, ok := resultCurrent.(*Result) + if !ok { + return nil, fmt.Errorf("failed to convert result") + } + return result, nil +} + +func NewResultFromResult(result types.Result) (*Result, error) { + newResult, err := convert.Convert(result, ImplementedSpecVersion) + if err != nil { + return nil, err + } + return newResult.(*Result), nil +} + +// Result is what gets returned from the plugin (via stdout) to the caller +type Result struct { + CNIVersion string `json:"cniVersion,omitempty"` + Interfaces []*Interface `json:"interfaces,omitempty"` + IPs []*IPConfig `json:"ips,omitempty"` + Routes []*types.Route `json:"routes,omitempty"` + DNS types.DNS `json:"dns,omitempty"` +} + +func convert020IPConfig(from *types020.IPConfig, ipVersion string) *IPConfig { + return &IPConfig{ + Version: ipVersion, + Address: from.IP, + Gateway: from.Gateway, + } +} + +func convertFrom02x(from types.Result, toVersion string) (types.Result, error) { + fromResult := from.(*types020.Result) + toResult := &Result{ + CNIVersion: toVersion, + DNS: *fromResult.DNS.Copy(), + Routes: []*types.Route{}, + } + if fromResult.IP4 != nil { + toResult.IPs = append(toResult.IPs, convert020IPConfig(fromResult.IP4, "4")) + for _, fromRoute := range fromResult.IP4.Routes { + toResult.Routes = append(toResult.Routes, fromRoute.Copy()) + } + } + + if fromResult.IP6 != nil { + toResult.IPs = append(toResult.IPs, convert020IPConfig(fromResult.IP6, "6")) + for _, fromRoute := range fromResult.IP6.Routes { + toResult.Routes = append(toResult.Routes, fromRoute.Copy()) + } + } + + return toResult, nil +} + +func convertInternal(from types.Result, toVersion string) (types.Result, error) { + fromResult := from.(*Result) + toResult := &Result{ + CNIVersion: toVersion, + DNS: *fromResult.DNS.Copy(), + Routes: []*types.Route{}, + } + for _, fromIntf := range fromResult.Interfaces { + toResult.Interfaces = append(toResult.Interfaces, fromIntf.Copy()) + } + for _, fromIPC := range fromResult.IPs { + toResult.IPs = append(toResult.IPs, fromIPC.Copy()) + } + for _, fromRoute := range fromResult.Routes { + toResult.Routes = append(toResult.Routes, fromRoute.Copy()) + } + return toResult, nil +} + +func convertTo02x(from types.Result, toVersion string) (types.Result, error) { + fromResult := from.(*Result) + toResult := &types020.Result{ + CNIVersion: toVersion, + DNS: *fromResult.DNS.Copy(), + } + + for _, fromIP := range fromResult.IPs { + // Only convert the first IP address of each version as 0.2.0 + // and earlier cannot handle multiple IP addresses + if fromIP.Version == "4" && toResult.IP4 == nil { + toResult.IP4 = &types020.IPConfig{ + IP: fromIP.Address, + Gateway: fromIP.Gateway, + } + } else if fromIP.Version == "6" && toResult.IP6 == nil { + toResult.IP6 = &types020.IPConfig{ + IP: fromIP.Address, + Gateway: fromIP.Gateway, + } + } + if toResult.IP4 != nil && toResult.IP6 != nil { + break + } + } + + for _, fromRoute := range fromResult.Routes { + is4 := fromRoute.Dst.IP.To4() != nil + if is4 && toResult.IP4 != nil { + toResult.IP4.Routes = append(toResult.IP4.Routes, types.Route{ + Dst: fromRoute.Dst, + GW: fromRoute.GW, + }) + } else if !is4 && toResult.IP6 != nil { + toResult.IP6.Routes = append(toResult.IP6.Routes, types.Route{ + Dst: fromRoute.Dst, + GW: fromRoute.GW, + }) + } + } + + // 0.2.0 and earlier require at least one IP address in the Result + if toResult.IP4 == nil && toResult.IP6 == nil { + return nil, fmt.Errorf("cannot convert: no valid IP addresses") + } + + return toResult, nil +} + +func (r *Result) Version() string { + return r.CNIVersion +} + +func (r *Result) GetAsVersion(version string) (types.Result, error) { + // If the creator of the result did not set the CNIVersion, assume it + // should be the highest spec version implemented by this Result + if r.CNIVersion == "" { + r.CNIVersion = ImplementedSpecVersion + } + return convert.Convert(r, version) +} + +func (r *Result) Print() error { + return r.PrintTo(os.Stdout) +} + +func (r *Result) PrintTo(writer io.Writer) error { + data, err := json.MarshalIndent(r, "", " ") + if err != nil { + return err + } + _, err = writer.Write(data) + return err +} + +// Interface contains values about the created interfaces +type Interface struct { + Name string `json:"name"` + Mac string `json:"mac,omitempty"` + Sandbox string `json:"sandbox,omitempty"` +} + +func (i *Interface) String() string { + return fmt.Sprintf("%+v", *i) +} + +func (i *Interface) Copy() *Interface { + if i == nil { + return nil + } + newIntf := *i + return &newIntf +} + +// Int returns a pointer to the int value passed in. Used to +// set the IPConfig.Interface field. +func Int(v int) *int { + return &v +} + +// IPConfig contains values necessary to configure an IP address on an interface +type IPConfig struct { + // IP version, either "4" or "6" + Version string + // Index into Result structs Interfaces list + Interface *int + Address net.IPNet + Gateway net.IP +} + +func (i *IPConfig) String() string { + return fmt.Sprintf("%+v", *i) +} + +func (i *IPConfig) Copy() *IPConfig { + if i == nil { + return nil + } + + ipc := &IPConfig{ + Version: i.Version, + Address: i.Address, + Gateway: i.Gateway, + } + if i.Interface != nil { + intf := *i.Interface + ipc.Interface = &intf + } + return ipc +} + +// JSON (un)marshallable types +type ipConfig struct { + Version string `json:"version"` + Interface *int `json:"interface,omitempty"` + Address types.IPNet `json:"address"` + Gateway net.IP `json:"gateway,omitempty"` +} + +func (c *IPConfig) MarshalJSON() ([]byte, error) { + ipc := ipConfig{ + Version: c.Version, + Interface: c.Interface, + Address: types.IPNet(c.Address), + Gateway: c.Gateway, + } + + return json.Marshal(ipc) +} + +func (c *IPConfig) UnmarshalJSON(data []byte) error { + ipc := ipConfig{} + if err := json.Unmarshal(data, &ipc); err != nil { + return err + } + + c.Version = ipc.Version + c.Interface = ipc.Interface + c.Address = net.IPNet(ipc.Address) + c.Gateway = ipc.Gateway + return nil +} diff --git a/vendor/github.com/containernetworking/cni/pkg/types/100/types.go b/vendor/github.com/containernetworking/cni/pkg/types/100/types.go new file mode 100644 index 00000000000..0e1e8b857b7 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/types/100/types.go @@ -0,0 +1,307 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types100 + +import ( + "encoding/json" + "fmt" + "io" + "net" + "os" + + "github.com/containernetworking/cni/pkg/types" + types040 "github.com/containernetworking/cni/pkg/types/040" + convert "github.com/containernetworking/cni/pkg/types/internal" +) + +const ImplementedSpecVersion string = "1.0.0" + +var supportedVersions = []string{ImplementedSpecVersion} + +// Register converters for all versions less than the implemented spec version +func init() { + // Up-converters + convert.RegisterConverter("0.1.0", supportedVersions, convertFrom02x) + convert.RegisterConverter("0.2.0", supportedVersions, convertFrom02x) + convert.RegisterConverter("0.3.0", supportedVersions, convertFrom04x) + convert.RegisterConverter("0.3.1", supportedVersions, convertFrom04x) + convert.RegisterConverter("0.4.0", supportedVersions, convertFrom04x) + + // Down-converters + convert.RegisterConverter("1.0.0", []string{"0.3.0", "0.3.1", "0.4.0"}, convertTo04x) + convert.RegisterConverter("1.0.0", []string{"0.1.0", "0.2.0"}, convertTo02x) + + // Creator + convert.RegisterCreator(supportedVersions, NewResult) +} + +func NewResult(data []byte) (types.Result, error) { + result := &Result{} + if err := json.Unmarshal(data, result); err != nil { + return nil, err + } + for _, v := range supportedVersions { + if result.CNIVersion == v { + return result, nil + } + } + return nil, fmt.Errorf("result type supports %v but unmarshalled CNIVersion is %q", + supportedVersions, result.CNIVersion) +} + +func GetResult(r types.Result) (*Result, error) { + resultCurrent, err := r.GetAsVersion(ImplementedSpecVersion) + if err != nil { + return nil, err + } + result, ok := resultCurrent.(*Result) + if !ok { + return nil, fmt.Errorf("failed to convert result") + } + return result, nil +} + +func NewResultFromResult(result types.Result) (*Result, error) { + newResult, err := convert.Convert(result, ImplementedSpecVersion) + if err != nil { + return nil, err + } + return newResult.(*Result), nil +} + +// Result is what gets returned from the plugin (via stdout) to the caller +type Result struct { + CNIVersion string `json:"cniVersion,omitempty"` + Interfaces []*Interface `json:"interfaces,omitempty"` + IPs []*IPConfig `json:"ips,omitempty"` + Routes []*types.Route `json:"routes,omitempty"` + DNS types.DNS `json:"dns,omitempty"` +} + +func convertFrom02x(from types.Result, toVersion string) (types.Result, error) { + result040, err := convert.Convert(from, "0.4.0") + if err != nil { + return nil, err + } + result100, err := convertFrom04x(result040, ImplementedSpecVersion) + if err != nil { + return nil, err + } + return result100, nil +} + +func convertIPConfigFrom040(from *types040.IPConfig) *IPConfig { + to := &IPConfig{ + Address: from.Address, + Gateway: from.Gateway, + } + if from.Interface != nil { + intf := *from.Interface + to.Interface = &intf + } + return to +} + +func convertInterfaceFrom040(from *types040.Interface) *Interface { + return &Interface{ + Name: from.Name, + Mac: from.Mac, + Sandbox: from.Sandbox, + } +} + +func convertFrom04x(from types.Result, toVersion string) (types.Result, error) { + fromResult := from.(*types040.Result) + toResult := &Result{ + CNIVersion: toVersion, + DNS: *fromResult.DNS.Copy(), + Routes: []*types.Route{}, + } + for _, fromIntf := range fromResult.Interfaces { + toResult.Interfaces = append(toResult.Interfaces, convertInterfaceFrom040(fromIntf)) + } + for _, fromIPC := range fromResult.IPs { + toResult.IPs = append(toResult.IPs, convertIPConfigFrom040(fromIPC)) + } + for _, fromRoute := range fromResult.Routes { + toResult.Routes = append(toResult.Routes, fromRoute.Copy()) + } + return toResult, nil +} + +func convertIPConfigTo040(from *IPConfig) *types040.IPConfig { + version := "6" + if from.Address.IP.To4() != nil { + version = "4" + } + to := &types040.IPConfig{ + Version: version, + Address: from.Address, + Gateway: from.Gateway, + } + if from.Interface != nil { + intf := *from.Interface + to.Interface = &intf + } + return to +} + +func convertInterfaceTo040(from *Interface) *types040.Interface { + return &types040.Interface{ + Name: from.Name, + Mac: from.Mac, + Sandbox: from.Sandbox, + } +} + +func convertTo04x(from types.Result, toVersion string) (types.Result, error) { + fromResult := from.(*Result) + toResult := &types040.Result{ + CNIVersion: toVersion, + DNS: *fromResult.DNS.Copy(), + Routes: []*types.Route{}, + } + for _, fromIntf := range fromResult.Interfaces { + toResult.Interfaces = append(toResult.Interfaces, convertInterfaceTo040(fromIntf)) + } + for _, fromIPC := range fromResult.IPs { + toResult.IPs = append(toResult.IPs, convertIPConfigTo040(fromIPC)) + } + for _, fromRoute := range fromResult.Routes { + toResult.Routes = append(toResult.Routes, fromRoute.Copy()) + } + return toResult, nil +} + +func convertTo02x(from types.Result, toVersion string) (types.Result, error) { + // First convert to 0.4.0 + result040, err := convertTo04x(from, "0.4.0") + if err != nil { + return nil, err + } + result02x, err := convert.Convert(result040, toVersion) + if err != nil { + return nil, err + } + return result02x, nil +} + +func (r *Result) Version() string { + return r.CNIVersion +} + +func (r *Result) GetAsVersion(version string) (types.Result, error) { + // If the creator of the result did not set the CNIVersion, assume it + // should be the highest spec version implemented by this Result + if r.CNIVersion == "" { + r.CNIVersion = ImplementedSpecVersion + } + return convert.Convert(r, version) +} + +func (r *Result) Print() error { + return r.PrintTo(os.Stdout) +} + +func (r *Result) PrintTo(writer io.Writer) error { + data, err := json.MarshalIndent(r, "", " ") + if err != nil { + return err + } + _, err = writer.Write(data) + return err +} + +// Interface contains values about the created interfaces +type Interface struct { + Name string `json:"name"` + Mac string `json:"mac,omitempty"` + Sandbox string `json:"sandbox,omitempty"` +} + +func (i *Interface) String() string { + return fmt.Sprintf("%+v", *i) +} + +func (i *Interface) Copy() *Interface { + if i == nil { + return nil + } + newIntf := *i + return &newIntf +} + +// Int returns a pointer to the int value passed in. Used to +// set the IPConfig.Interface field. +func Int(v int) *int { + return &v +} + +// IPConfig contains values necessary to configure an IP address on an interface +type IPConfig struct { + // Index into Result structs Interfaces list + Interface *int + Address net.IPNet + Gateway net.IP +} + +func (i *IPConfig) String() string { + return fmt.Sprintf("%+v", *i) +} + +func (i *IPConfig) Copy() *IPConfig { + if i == nil { + return nil + } + + ipc := &IPConfig{ + Address: i.Address, + Gateway: i.Gateway, + } + if i.Interface != nil { + intf := *i.Interface + ipc.Interface = &intf + } + return ipc +} + +// JSON (un)marshallable types +type ipConfig struct { + Interface *int `json:"interface,omitempty"` + Address types.IPNet `json:"address"` + Gateway net.IP `json:"gateway,omitempty"` +} + +func (c *IPConfig) MarshalJSON() ([]byte, error) { + ipc := ipConfig{ + Interface: c.Interface, + Address: types.IPNet(c.Address), + Gateway: c.Gateway, + } + + return json.Marshal(ipc) +} + +func (c *IPConfig) UnmarshalJSON(data []byte) error { + ipc := ipConfig{} + if err := json.Unmarshal(data, &ipc); err != nil { + return err + } + + c.Interface = ipc.Interface + c.Address = net.IPNet(ipc.Address) + c.Gateway = ipc.Gateway + return nil +} diff --git a/vendor/github.com/containernetworking/cni/pkg/types/args.go b/vendor/github.com/containernetworking/cni/pkg/types/args.go new file mode 100644 index 00000000000..7516f03ef58 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/types/args.go @@ -0,0 +1,122 @@ +// Copyright 2015 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "encoding" + "fmt" + "reflect" + "strings" +) + +// UnmarshallableBool typedef for builtin bool +// because builtin type's methods can't be declared +type UnmarshallableBool bool + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +// Returns boolean true if the string is "1" or "[Tt]rue" +// Returns boolean false if the string is "0" or "[Ff]alse" +func (b *UnmarshallableBool) UnmarshalText(data []byte) error { + s := strings.ToLower(string(data)) + switch s { + case "1", "true": + *b = true + case "0", "false": + *b = false + default: + return fmt.Errorf("boolean unmarshal error: invalid input %s", s) + } + return nil +} + +// UnmarshallableString typedef for builtin string +type UnmarshallableString string + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +// Returns the string +func (s *UnmarshallableString) UnmarshalText(data []byte) error { + *s = UnmarshallableString(data) + return nil +} + +// CommonArgs contains the IgnoreUnknown argument +// and must be embedded by all Arg structs +type CommonArgs struct { + IgnoreUnknown UnmarshallableBool `json:"ignoreunknown,omitempty"` +} + +// GetKeyField is a helper function to receive Values +// Values that represent a pointer to a struct +func GetKeyField(keyString string, v reflect.Value) reflect.Value { + return v.Elem().FieldByName(keyString) +} + +// UnmarshalableArgsError is used to indicate error unmarshalling args +// from the args-string in the form "K=V;K2=V2;..." +type UnmarshalableArgsError struct { + error +} + +// LoadArgs parses args from a string in the form "K=V;K2=V2;..." +func LoadArgs(args string, container interface{}) error { + if args == "" { + return nil + } + + containerValue := reflect.ValueOf(container) + + pairs := strings.Split(args, ";") + unknownArgs := []string{} + for _, pair := range pairs { + kv := strings.Split(pair, "=") + if len(kv) != 2 { + return fmt.Errorf("ARGS: invalid pair %q", pair) + } + keyString := kv[0] + valueString := kv[1] + keyField := GetKeyField(keyString, containerValue) + if !keyField.IsValid() { + unknownArgs = append(unknownArgs, pair) + continue + } + + var keyFieldInterface interface{} + switch { + case keyField.Kind() == reflect.Ptr: + keyField.Set(reflect.New(keyField.Type().Elem())) + keyFieldInterface = keyField.Interface() + case keyField.CanAddr() && keyField.Addr().CanInterface(): + keyFieldInterface = keyField.Addr().Interface() + default: + return UnmarshalableArgsError{fmt.Errorf("field '%s' has no valid interface", keyString)} + } + u, ok := keyFieldInterface.(encoding.TextUnmarshaler) + if !ok { + return UnmarshalableArgsError{fmt.Errorf( + "ARGS: cannot unmarshal into field '%s' - type '%s' does not implement encoding.TextUnmarshaler", + keyString, reflect.TypeOf(keyFieldInterface))} + } + err := u.UnmarshalText([]byte(valueString)) + if err != nil { + return fmt.Errorf("ARGS: error parsing value of pair %q: %w", pair, err) + } + } + + isIgnoreUnknown := GetKeyField("IgnoreUnknown", containerValue).Bool() + if len(unknownArgs) > 0 && !isIgnoreUnknown { + return fmt.Errorf("ARGS: unknown args %q", unknownArgs) + } + return nil +} diff --git a/vendor/github.com/containernetworking/cni/pkg/types/create/create.go b/vendor/github.com/containernetworking/cni/pkg/types/create/create.go new file mode 100644 index 00000000000..ed28b33e8e1 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/types/create/create.go @@ -0,0 +1,56 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package create + +import ( + "encoding/json" + "fmt" + + "github.com/containernetworking/cni/pkg/types" + convert "github.com/containernetworking/cni/pkg/types/internal" +) + +// DecodeVersion returns the CNI version from CNI configuration or result JSON, +// or an error if the operation could not be performed. +func DecodeVersion(jsonBytes []byte) (string, error) { + var conf struct { + CNIVersion string `json:"cniVersion"` + } + err := json.Unmarshal(jsonBytes, &conf) + if err != nil { + return "", fmt.Errorf("decoding version from network config: %w", err) + } + if conf.CNIVersion == "" { + return "0.1.0", nil + } + return conf.CNIVersion, nil +} + +// Create creates a CNI Result using the given JSON with the expected +// version, or an error if the creation could not be performed +func Create(version string, bytes []byte) (types.Result, error) { + return convert.Create(version, bytes) +} + +// CreateFromBytes creates a CNI Result from the given JSON, automatically +// detecting the CNI spec version of the result. An error is returned if the +// operation could not be performed. +func CreateFromBytes(bytes []byte) (types.Result, error) { + version, err := DecodeVersion(bytes) + if err != nil { + return nil, err + } + return convert.Create(version, bytes) +} diff --git a/vendor/github.com/containernetworking/cni/pkg/types/internal/convert.go b/vendor/github.com/containernetworking/cni/pkg/types/internal/convert.go new file mode 100644 index 00000000000..bdbe4b0a59a --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/types/internal/convert.go @@ -0,0 +1,92 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package convert + +import ( + "fmt" + + "github.com/containernetworking/cni/pkg/types" +) + +// ConvertFn should convert from the given arbitrary Result type into a +// Result implementing CNI specification version passed in toVersion. +// The function is guaranteed to be passed a Result type matching the +// fromVersion it was registered with, and is guaranteed to be +// passed a toVersion matching one of the toVersions it was registered with. +type ConvertFn func(from types.Result, toVersion string) (types.Result, error) + +type converter struct { + // fromVersion is the CNI Result spec version that convertFn accepts + fromVersion string + // toVersions is a list of versions that convertFn can convert to + toVersions []string + convertFn ConvertFn +} + +var converters []*converter + +func findConverter(fromVersion, toVersion string) *converter { + for _, c := range converters { + if c.fromVersion == fromVersion { + for _, v := range c.toVersions { + if v == toVersion { + return c + } + } + } + } + return nil +} + +// Convert converts a CNI Result to the requested CNI specification version, +// or returns an error if the conversion could not be performed or failed +func Convert(from types.Result, toVersion string) (types.Result, error) { + if toVersion == "" { + toVersion = "0.1.0" + } + + fromVersion := from.Version() + + // Shortcut for same version + if fromVersion == toVersion { + return from, nil + } + + // Otherwise find the right converter + c := findConverter(fromVersion, toVersion) + if c == nil { + return nil, fmt.Errorf("no converter for CNI result version %s to %s", + fromVersion, toVersion) + } + return c.convertFn(from, toVersion) +} + +// RegisterConverter registers a CNI Result converter. SHOULD NOT BE CALLED +// EXCEPT FROM CNI ITSELF. +func RegisterConverter(fromVersion string, toVersions []string, convertFn ConvertFn) { + // Make sure there is no converter already registered for these + // from and to versions + for _, v := range toVersions { + if findConverter(fromVersion, v) != nil { + panic(fmt.Sprintf("converter already registered for %s to %s", + fromVersion, v)) + } + } + converters = append(converters, &converter{ + fromVersion: fromVersion, + toVersions: toVersions, + convertFn: convertFn, + }) +} diff --git a/vendor/github.com/containernetworking/cni/pkg/types/internal/create.go b/vendor/github.com/containernetworking/cni/pkg/types/internal/create.go new file mode 100644 index 00000000000..96363091259 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/types/internal/create.go @@ -0,0 +1,66 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package convert + +import ( + "fmt" + + "github.com/containernetworking/cni/pkg/types" +) + +type ResultFactoryFunc func([]byte) (types.Result, error) + +type creator struct { + // CNI Result spec versions that createFn can create a Result for + versions []string + createFn ResultFactoryFunc +} + +var creators []*creator + +func findCreator(version string) *creator { + for _, c := range creators { + for _, v := range c.versions { + if v == version { + return c + } + } + } + return nil +} + +// Create creates a CNI Result using the given JSON, or an error if the creation +// could not be performed +func Create(version string, bytes []byte) (types.Result, error) { + if c := findCreator(version); c != nil { + return c.createFn(bytes) + } + return nil, fmt.Errorf("unsupported CNI result version %q", version) +} + +// RegisterCreator registers a CNI Result creator. SHOULD NOT BE CALLED +// EXCEPT FROM CNI ITSELF. +func RegisterCreator(versions []string, createFn ResultFactoryFunc) { + // Make sure there is no creator already registered for these versions + for _, v := range versions { + if findCreator(v) != nil { + panic(fmt.Sprintf("creator already registered for %s", v)) + } + } + creators = append(creators, &creator{ + versions: versions, + createFn: createFn, + }) +} diff --git a/vendor/github.com/containernetworking/cni/pkg/types/types.go b/vendor/github.com/containernetworking/cni/pkg/types/types.go new file mode 100644 index 00000000000..fba17dfc0f3 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/types/types.go @@ -0,0 +1,234 @@ +// Copyright 2015 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "encoding/json" + "fmt" + "io" + "net" + "os" +) + +// like net.IPNet but adds JSON marshalling and unmarshalling +type IPNet net.IPNet + +// ParseCIDR takes a string like "10.2.3.1/24" and +// return IPNet with "10.2.3.1" and /24 mask +func ParseCIDR(s string) (*net.IPNet, error) { + ip, ipn, err := net.ParseCIDR(s) + if err != nil { + return nil, err + } + + ipn.IP = ip + return ipn, nil +} + +func (n IPNet) MarshalJSON() ([]byte, error) { + return json.Marshal((*net.IPNet)(&n).String()) +} + +func (n *IPNet) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + + tmp, err := ParseCIDR(s) + if err != nil { + return err + } + + *n = IPNet(*tmp) + return nil +} + +// NetConf describes a network. +type NetConf struct { + CNIVersion string `json:"cniVersion,omitempty"` + + Name string `json:"name,omitempty"` + Type string `json:"type,omitempty"` + Capabilities map[string]bool `json:"capabilities,omitempty"` + IPAM IPAM `json:"ipam,omitempty"` + DNS DNS `json:"dns"` + + RawPrevResult map[string]interface{} `json:"prevResult,omitempty"` + PrevResult Result `json:"-"` +} + +type IPAM struct { + Type string `json:"type,omitempty"` +} + +// NetConfList describes an ordered list of networks. +type NetConfList struct { + CNIVersion string `json:"cniVersion,omitempty"` + + Name string `json:"name,omitempty"` + DisableCheck bool `json:"disableCheck,omitempty"` + Plugins []*NetConf `json:"plugins,omitempty"` +} + +// Result is an interface that provides the result of plugin execution +type Result interface { + // The highest CNI specification result version the result supports + // without having to convert + Version() string + + // Returns the result converted into the requested CNI specification + // result version, or an error if conversion failed + GetAsVersion(version string) (Result, error) + + // Prints the result in JSON format to stdout + Print() error + + // Prints the result in JSON format to provided writer + PrintTo(writer io.Writer) error +} + +func PrintResult(result Result, version string) error { + newResult, err := result.GetAsVersion(version) + if err != nil { + return err + } + return newResult.Print() +} + +// DNS contains values interesting for DNS resolvers +type DNS struct { + Nameservers []string `json:"nameservers,omitempty"` + Domain string `json:"domain,omitempty"` + Search []string `json:"search,omitempty"` + Options []string `json:"options,omitempty"` +} + +func (d *DNS) Copy() *DNS { + if d == nil { + return nil + } + + to := &DNS{Domain: d.Domain} + for _, ns := range d.Nameservers { + to.Nameservers = append(to.Nameservers, ns) + } + for _, s := range d.Search { + to.Search = append(to.Search, s) + } + for _, o := range d.Options { + to.Options = append(to.Options, o) + } + return to +} + +type Route struct { + Dst net.IPNet + GW net.IP +} + +func (r *Route) String() string { + return fmt.Sprintf("%+v", *r) +} + +func (r *Route) Copy() *Route { + if r == nil { + return nil + } + + return &Route{ + Dst: r.Dst, + GW: r.GW, + } +} + +// Well known error codes +// see https://github.com/containernetworking/cni/blob/master/SPEC.md#well-known-error-codes +const ( + ErrUnknown uint = iota // 0 + ErrIncompatibleCNIVersion // 1 + ErrUnsupportedField // 2 + ErrUnknownContainer // 3 + ErrInvalidEnvironmentVariables // 4 + ErrIOFailure // 5 + ErrDecodingFailure // 6 + ErrInvalidNetworkConfig // 7 + ErrTryAgainLater uint = 11 + ErrInternal uint = 999 +) + +type Error struct { + Code uint `json:"code"` + Msg string `json:"msg"` + Details string `json:"details,omitempty"` +} + +func NewError(code uint, msg, details string) *Error { + return &Error{ + Code: code, + Msg: msg, + Details: details, + } +} + +func (e *Error) Error() string { + details := "" + if e.Details != "" { + details = fmt.Sprintf("; %v", e.Details) + } + return fmt.Sprintf("%v%v", e.Msg, details) +} + +func (e *Error) Print() error { + return prettyPrint(e) +} + +// net.IPNet is not JSON (un)marshallable so this duality is needed +// for our custom IPNet type + +// JSON (un)marshallable types +type route struct { + Dst IPNet `json:"dst"` + GW net.IP `json:"gw,omitempty"` +} + +func (r *Route) UnmarshalJSON(data []byte) error { + rt := route{} + if err := json.Unmarshal(data, &rt); err != nil { + return err + } + + r.Dst = net.IPNet(rt.Dst) + r.GW = rt.GW + return nil +} + +func (r Route) MarshalJSON() ([]byte, error) { + rt := route{ + Dst: IPNet(r.Dst), + GW: r.GW, + } + + return json.Marshal(rt) +} + +func prettyPrint(obj interface{}) error { + data, err := json.MarshalIndent(obj, "", " ") + if err != nil { + return err + } + _, err = os.Stdout.Write(data) + return err +} diff --git a/vendor/github.com/containernetworking/cni/pkg/utils/utils.go b/vendor/github.com/containernetworking/cni/pkg/utils/utils.go new file mode 100644 index 00000000000..b8ec3887459 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/utils/utils.go @@ -0,0 +1,84 @@ +// Copyright 2019 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package utils + +import ( + "bytes" + "fmt" + "regexp" + "unicode" + + "github.com/containernetworking/cni/pkg/types" +) + +const ( + // cniValidNameChars is the regexp used to validate valid characters in + // containerID and networkName + cniValidNameChars = `[a-zA-Z0-9][a-zA-Z0-9_.\-]` + + // maxInterfaceNameLength is the length max of a valid interface name + maxInterfaceNameLength = 15 +) + +var cniReg = regexp.MustCompile(`^` + cniValidNameChars + `*$`) + +// ValidateContainerID will validate that the supplied containerID is not empty does not contain invalid characters +func ValidateContainerID(containerID string) *types.Error { + + if containerID == "" { + return types.NewError(types.ErrUnknownContainer, "missing containerID", "") + } + if !cniReg.MatchString(containerID) { + return types.NewError(types.ErrInvalidEnvironmentVariables, "invalid characters in containerID", containerID) + } + return nil +} + +// ValidateNetworkName will validate that the supplied networkName does not contain invalid characters +func ValidateNetworkName(networkName string) *types.Error { + + if networkName == "" { + return types.NewError(types.ErrInvalidNetworkConfig, "missing network name:", "") + } + if !cniReg.MatchString(networkName) { + return types.NewError(types.ErrInvalidNetworkConfig, "invalid characters found in network name", networkName) + } + return nil +} + +// ValidateInterfaceName will validate the interface name based on the three rules below +// 1. The name must not be empty +// 2. The name must be less than 16 characters +// 3. The name must not be "." or ".." +// 3. The name must not contain / or : or any whitespace characters +// ref to https://github.com/torvalds/linux/blob/master/net/core/dev.c#L1024 +func ValidateInterfaceName(ifName string) *types.Error { + if len(ifName) == 0 { + return types.NewError(types.ErrInvalidEnvironmentVariables, "interface name is empty", "") + } + if len(ifName) > maxInterfaceNameLength { + return types.NewError(types.ErrInvalidEnvironmentVariables, "interface name is too long", fmt.Sprintf("interface name should be less than %d characters", maxInterfaceNameLength+1)) + } + if ifName == "." || ifName == ".." { + return types.NewError(types.ErrInvalidEnvironmentVariables, "interface name is . or ..", "") + } + for _, r := range bytes.Runes([]byte(ifName)) { + if r == '/' || r == ':' || unicode.IsSpace(r) { + return types.NewError(types.ErrInvalidEnvironmentVariables, "interface name contains / or : or whitespace characters", "") + } + } + + return nil +} diff --git a/vendor/github.com/containernetworking/cni/pkg/version/conf.go b/vendor/github.com/containernetworking/cni/pkg/version/conf.go new file mode 100644 index 00000000000..808c33b8382 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/version/conf.go @@ -0,0 +1,26 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package version + +import ( + "github.com/containernetworking/cni/pkg/types/create" +) + +// ConfigDecoder can decode the CNI version available in network config data +type ConfigDecoder struct{} + +func (*ConfigDecoder) Decode(jsonBytes []byte) (string, error) { + return create.DecodeVersion(jsonBytes) +} diff --git a/vendor/github.com/containernetworking/cni/pkg/version/plugin.go b/vendor/github.com/containernetworking/cni/pkg/version/plugin.go new file mode 100644 index 00000000000..d4bc9d169cd --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/version/plugin.go @@ -0,0 +1,144 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package version + +import ( + "encoding/json" + "fmt" + "io" + "strconv" + "strings" +) + +// PluginInfo reports information about CNI versioning +type PluginInfo interface { + // SupportedVersions returns one or more CNI spec versions that the plugin + // supports. If input is provided in one of these versions, then the plugin + // promises to use the same CNI version in its response + SupportedVersions() []string + + // Encode writes this CNI version information as JSON to the given Writer + Encode(io.Writer) error +} + +type pluginInfo struct { + CNIVersion_ string `json:"cniVersion"` + SupportedVersions_ []string `json:"supportedVersions,omitempty"` +} + +// pluginInfo implements the PluginInfo interface +var _ PluginInfo = &pluginInfo{} + +func (p *pluginInfo) Encode(w io.Writer) error { + return json.NewEncoder(w).Encode(p) +} + +func (p *pluginInfo) SupportedVersions() []string { + return p.SupportedVersions_ +} + +// PluginSupports returns a new PluginInfo that will report the given versions +// as supported +func PluginSupports(supportedVersions ...string) PluginInfo { + if len(supportedVersions) < 1 { + panic("programmer error: you must support at least one version") + } + return &pluginInfo{ + CNIVersion_: Current(), + SupportedVersions_: supportedVersions, + } +} + +// PluginDecoder can decode the response returned by a plugin's VERSION command +type PluginDecoder struct{} + +func (*PluginDecoder) Decode(jsonBytes []byte) (PluginInfo, error) { + var info pluginInfo + err := json.Unmarshal(jsonBytes, &info) + if err != nil { + return nil, fmt.Errorf("decoding version info: %w", err) + } + if info.CNIVersion_ == "" { + return nil, fmt.Errorf("decoding version info: missing field cniVersion") + } + if len(info.SupportedVersions_) == 0 { + if info.CNIVersion_ == "0.2.0" { + return PluginSupports("0.1.0", "0.2.0"), nil + } + return nil, fmt.Errorf("decoding version info: missing field supportedVersions") + } + return &info, nil +} + +// ParseVersion parses a version string like "3.0.1" or "0.4.5" into major, +// minor, and micro numbers or returns an error +func ParseVersion(version string) (int, int, int, error) { + var major, minor, micro int + if version == "" { + return -1, -1, -1, fmt.Errorf("invalid version %q: the version is empty", version) + } + + parts := strings.Split(version, ".") + if len(parts) >= 4 { + return -1, -1, -1, fmt.Errorf("invalid version %q: too many parts", version) + } + + major, err := strconv.Atoi(parts[0]) + if err != nil { + return -1, -1, -1, fmt.Errorf("failed to convert major version part %q: %w", parts[0], err) + } + + if len(parts) >= 2 { + minor, err = strconv.Atoi(parts[1]) + if err != nil { + return -1, -1, -1, fmt.Errorf("failed to convert minor version part %q: %w", parts[1], err) + } + } + + if len(parts) >= 3 { + micro, err = strconv.Atoi(parts[2]) + if err != nil { + return -1, -1, -1, fmt.Errorf("failed to convert micro version part %q: %w", parts[2], err) + } + } + + return major, minor, micro, nil +} + +// GreaterThanOrEqualTo takes two string versions, parses them into major/minor/micro +// numbers, and compares them to determine whether the first version is greater +// than or equal to the second +func GreaterThanOrEqualTo(version, otherVersion string) (bool, error) { + firstMajor, firstMinor, firstMicro, err := ParseVersion(version) + if err != nil { + return false, err + } + + secondMajor, secondMinor, secondMicro, err := ParseVersion(otherVersion) + if err != nil { + return false, err + } + + if firstMajor > secondMajor { + return true, nil + } else if firstMajor == secondMajor { + if firstMinor > secondMinor { + return true, nil + } else if firstMinor == secondMinor && firstMicro >= secondMicro { + return true, nil + } + } + return false, nil +} diff --git a/vendor/github.com/containernetworking/cni/pkg/version/reconcile.go b/vendor/github.com/containernetworking/cni/pkg/version/reconcile.go new file mode 100644 index 00000000000..25c3810b2aa --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/version/reconcile.go @@ -0,0 +1,49 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package version + +import "fmt" + +type ErrorIncompatible struct { + Config string + Supported []string +} + +func (e *ErrorIncompatible) Details() string { + return fmt.Sprintf("config is %q, plugin supports %q", e.Config, e.Supported) +} + +func (e *ErrorIncompatible) Error() string { + return fmt.Sprintf("incompatible CNI versions: %s", e.Details()) +} + +type Reconciler struct{} + +func (r *Reconciler) Check(configVersion string, pluginInfo PluginInfo) *ErrorIncompatible { + return r.CheckRaw(configVersion, pluginInfo.SupportedVersions()) +} + +func (*Reconciler) CheckRaw(configVersion string, supportedVersions []string) *ErrorIncompatible { + for _, supportedVersion := range supportedVersions { + if configVersion == supportedVersion { + return nil + } + } + + return &ErrorIncompatible{ + Config: configVersion, + Supported: supportedVersions, + } +} diff --git a/vendor/github.com/containernetworking/cni/pkg/version/version.go b/vendor/github.com/containernetworking/cni/pkg/version/version.go new file mode 100644 index 00000000000..1326f8038e5 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/version/version.go @@ -0,0 +1,89 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package version + +import ( + "encoding/json" + "fmt" + + "github.com/containernetworking/cni/pkg/types" + types100 "github.com/containernetworking/cni/pkg/types/100" + "github.com/containernetworking/cni/pkg/types/create" +) + +// Current reports the version of the CNI spec implemented by this library +func Current() string { + return types100.ImplementedSpecVersion +} + +// Legacy PluginInfo describes a plugin that is backwards compatible with the +// CNI spec version 0.1.0. In particular, a runtime compiled against the 0.1.0 +// library ought to work correctly with a plugin that reports support for +// Legacy versions. +// +// Any future CNI spec versions which meet this definition should be added to +// this list. +var Legacy = PluginSupports("0.1.0", "0.2.0") +var All = PluginSupports("0.1.0", "0.2.0", "0.3.0", "0.3.1", "0.4.0", "1.0.0") + +// VersionsFrom returns a list of versions starting from min, inclusive +func VersionsStartingFrom(min string) PluginInfo { + out := []string{} + // cheat, just assume ordered + ok := false + for _, v := range All.SupportedVersions() { + if !ok && v == min { + ok = true + } + if ok { + out = append(out, v) + } + } + return PluginSupports(out...) +} + +// Finds a Result object matching the requested version (if any) and asks +// that object to parse the plugin result, returning an error if parsing failed. +func NewResult(version string, resultBytes []byte) (types.Result, error) { + return create.Create(version, resultBytes) +} + +// ParsePrevResult parses a prevResult in a NetConf structure and sets +// the NetConf's PrevResult member to the parsed Result object. +func ParsePrevResult(conf *types.NetConf) error { + if conf.RawPrevResult == nil { + return nil + } + + // Prior to 1.0.0, Result types may not marshal a CNIVersion. Since the + // result version must match the config version, if the Result's version + // is empty, inject the config version. + if ver, ok := conf.RawPrevResult["CNIVersion"]; !ok || ver == "" { + conf.RawPrevResult["CNIVersion"] = conf.CNIVersion + } + + resultBytes, err := json.Marshal(conf.RawPrevResult) + if err != nil { + return fmt.Errorf("could not serialize prevResult: %w", err) + } + + conf.RawPrevResult = nil + conf.PrevResult, err = create.Create(conf.CNIVersion, resultBytes) + if err != nil { + return fmt.Errorf("could not parse prevResult: %w", err) + } + + return nil +} diff --git a/vendor/github.com/containernetworking/plugins/LICENSE b/vendor/github.com/containernetworking/plugins/LICENSE new file mode 100644 index 00000000000..8dada3edaf5 --- /dev/null +++ b/vendor/github.com/containernetworking/plugins/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containernetworking/plugins/pkg/ns/README.md b/vendor/github.com/containernetworking/plugins/pkg/ns/README.md new file mode 100644 index 00000000000..1e265c7a011 --- /dev/null +++ b/vendor/github.com/containernetworking/plugins/pkg/ns/README.md @@ -0,0 +1,41 @@ +### Namespaces, Threads, and Go +On Linux each OS thread can have a different network namespace. Go's thread scheduling model switches goroutines between OS threads based on OS thread load and whether the goroutine would block other goroutines. This can result in a goroutine switching network namespaces without notice and lead to errors in your code. + +### Namespace Switching +Switching namespaces with the `ns.Set()` method is not recommended without additional strategies to prevent unexpected namespace changes when your goroutines switch OS threads. + +Go provides the `runtime.LockOSThread()` function to ensure a specific goroutine executes on its current OS thread and prevents any other goroutine from running in that thread until the locked one exits. Careful usage of `LockOSThread()` and goroutines can provide good control over which network namespace a given goroutine executes in. + +For example, you cannot rely on the `ns.Set()` namespace being the current namespace after the `Set()` call unless you do two things. First, the goroutine calling `Set()` must have previously called `LockOSThread()`. Second, you must ensure `runtime.UnlockOSThread()` is not called somewhere in-between. You also cannot rely on the initial network namespace remaining the current network namespace if any other code in your program switches namespaces, unless you have already called `LockOSThread()` in that goroutine. Note that `LockOSThread()` prevents the Go scheduler from optimally scheduling goroutines for best performance, so `LockOSThread()` should only be used in small, isolated goroutines that release the lock quickly. + +### Do() The Recommended Thing +The `ns.Do()` method provides **partial** control over network namespaces for you by implementing these strategies. All code dependent on a particular network namespace (including the root namespace) should be wrapped in the `ns.Do()` method to ensure the correct namespace is selected for the duration of your code. For example: + +```go +err = targetNs.Do(func(hostNs ns.NetNS) error { + dummy := &netlink.Dummy{ + LinkAttrs: netlink.LinkAttrs{ + Name: "dummy0", + }, + } + return netlink.LinkAdd(dummy) +}) +``` + +Note this requirement to wrap every network call is very onerous - any libraries you call might call out to network services such as DNS, and all such calls need to be protected after you call `ns.Do()`. All goroutines spawned from within the `ns.Do` will not inherit the new namespace. The CNI plugins all exit very soon after calling `ns.Do()` which helps to minimize the problem. + +When a new thread is spawned in Linux, it inherits the namespace of its parent. In versions of go **prior to 1.10**, if the runtime spawns a new OS thread, it picks the parent randomly. If the chosen parent thread has been moved to a new namespace (even temporarily), the new OS thread will be permanently "stuck in the wrong namespace", and goroutines will non-deterministically switch namespaces as they are rescheduled. + +In short, **there was no safe way to change network namespaces, even temporarily, from within a long-lived, multithreaded Go process**. If you wish to do this, you must use go 1.10 or greater. + + +### Creating network namespaces +Earlier versions of this library managed namespace creation, but as CNI does not actually utilize this feature (and it was essentially unmaintained), it was removed. If you're writing a container runtime, you should implement namespace management yourself. However, there are some gotchas when doing so, especially around handling `/var/run/netns`. A reasonably correct reference implementation, borrowed from `rkt`, can be found in `pkg/testutils/netns_linux.go` if you're in need of a source of inspiration. + + +### Further Reading + - https://github.com/golang/go/wiki/LockOSThread + - http://morsmachine.dk/go-scheduler + - https://github.com/containernetworking/cni/issues/262 + - https://golang.org/pkg/runtime/ + - https://www.weave.works/blog/linux-namespaces-and-go-don-t-mix diff --git a/vendor/github.com/containernetworking/plugins/pkg/ns/ns_linux.go b/vendor/github.com/containernetworking/plugins/pkg/ns/ns_linux.go new file mode 100644 index 00000000000..3b745d4913f --- /dev/null +++ b/vendor/github.com/containernetworking/plugins/pkg/ns/ns_linux.go @@ -0,0 +1,234 @@ +// Copyright 2015-2017 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ns + +import ( + "fmt" + "os" + "runtime" + "sync" + "syscall" + + "golang.org/x/sys/unix" +) + +// Returns an object representing the current OS thread's network namespace +func GetCurrentNS() (NetNS, error) { + // Lock the thread in case other goroutine executes in it and changes its + // network namespace after getCurrentThreadNetNSPath(), otherwise it might + // return an unexpected network namespace. + runtime.LockOSThread() + defer runtime.UnlockOSThread() + return GetNS(getCurrentThreadNetNSPath()) +} + +func getCurrentThreadNetNSPath() string { + // /proc/self/ns/net returns the namespace of the main thread, not + // of whatever thread this goroutine is running on. Make sure we + // use the thread's net namespace since the thread is switching around + return fmt.Sprintf("/proc/%d/task/%d/ns/net", os.Getpid(), unix.Gettid()) +} + +func (ns *netNS) Close() error { + if err := ns.errorIfClosed(); err != nil { + return err + } + + if err := ns.file.Close(); err != nil { + return fmt.Errorf("Failed to close %q: %v", ns.file.Name(), err) + } + ns.closed = true + + return nil +} + +func (ns *netNS) Set() error { + if err := ns.errorIfClosed(); err != nil { + return err + } + + if err := unix.Setns(int(ns.Fd()), unix.CLONE_NEWNET); err != nil { + return fmt.Errorf("Error switching to ns %v: %v", ns.file.Name(), err) + } + + return nil +} + +type NetNS interface { + // Executes the passed closure in this object's network namespace, + // attempting to restore the original namespace before returning. + // However, since each OS thread can have a different network namespace, + // and Go's thread scheduling is highly variable, callers cannot + // guarantee any specific namespace is set unless operations that + // require that namespace are wrapped with Do(). Also, no code called + // from Do() should call runtime.UnlockOSThread(), or the risk + // of executing code in an incorrect namespace will be greater. See + // https://github.com/golang/go/wiki/LockOSThread for further details. + Do(toRun func(NetNS) error) error + + // Sets the current network namespace to this object's network namespace. + // Note that since Go's thread scheduling is highly variable, callers + // cannot guarantee the requested namespace will be the current namespace + // after this function is called; to ensure this wrap operations that + // require the namespace with Do() instead. + Set() error + + // Returns the filesystem path representing this object's network namespace + Path() string + + // Returns a file descriptor representing this object's network namespace + Fd() uintptr + + // Cleans up this instance of the network namespace; if this instance + // is the last user the namespace will be destroyed + Close() error +} + +type netNS struct { + file *os.File + closed bool +} + +// netNS implements the NetNS interface +var _ NetNS = &netNS{} + +const ( + // https://github.com/torvalds/linux/blob/master/include/uapi/linux/magic.h + NSFS_MAGIC = 0x6e736673 + PROCFS_MAGIC = 0x9fa0 +) + +type NSPathNotExistErr struct{ msg string } + +func (e NSPathNotExistErr) Error() string { return e.msg } + +type NSPathNotNSErr struct{ msg string } + +func (e NSPathNotNSErr) Error() string { return e.msg } + +func IsNSorErr(nspath string) error { + stat := syscall.Statfs_t{} + if err := syscall.Statfs(nspath, &stat); err != nil { + if os.IsNotExist(err) { + err = NSPathNotExistErr{msg: fmt.Sprintf("failed to Statfs %q: %v", nspath, err)} + } else { + err = fmt.Errorf("failed to Statfs %q: %v", nspath, err) + } + return err + } + + switch stat.Type { + case PROCFS_MAGIC, NSFS_MAGIC: + return nil + default: + return NSPathNotNSErr{msg: fmt.Sprintf("unknown FS magic on %q: %x", nspath, stat.Type)} + } +} + +// Returns an object representing the namespace referred to by @path +func GetNS(nspath string) (NetNS, error) { + err := IsNSorErr(nspath) + if err != nil { + return nil, err + } + + fd, err := os.Open(nspath) + if err != nil { + return nil, err + } + + return &netNS{file: fd}, nil +} + +func (ns *netNS) Path() string { + return ns.file.Name() +} + +func (ns *netNS) Fd() uintptr { + return ns.file.Fd() +} + +func (ns *netNS) errorIfClosed() error { + if ns.closed { + return fmt.Errorf("%q has already been closed", ns.file.Name()) + } + return nil +} + +func (ns *netNS) Do(toRun func(NetNS) error) error { + if err := ns.errorIfClosed(); err != nil { + return err + } + + containedCall := func(hostNS NetNS) error { + threadNS, err := GetCurrentNS() + if err != nil { + return fmt.Errorf("failed to open current netns: %v", err) + } + defer threadNS.Close() + + // switch to target namespace + if err = ns.Set(); err != nil { + return fmt.Errorf("error switching to ns %v: %v", ns.file.Name(), err) + } + defer func() { + err := threadNS.Set() // switch back + if err == nil { + // Unlock the current thread only when we successfully switched back + // to the original namespace; otherwise leave the thread locked which + // will force the runtime to scrap the current thread, that is maybe + // not as optimal but at least always safe to do. + runtime.UnlockOSThread() + } + }() + + return toRun(hostNS) + } + + // save a handle to current network namespace + hostNS, err := GetCurrentNS() + if err != nil { + return fmt.Errorf("Failed to open current namespace: %v", err) + } + defer hostNS.Close() + + var wg sync.WaitGroup + wg.Add(1) + + // Start the callback in a new green thread so that if we later fail + // to switch the namespace back to the original one, we can safely + // leave the thread locked to die without a risk of the current thread + // left lingering with incorrect namespace. + var innerError error + go func() { + defer wg.Done() + runtime.LockOSThread() + innerError = containedCall(hostNS) + }() + wg.Wait() + + return innerError +} + +// WithNetNSPath executes the passed closure under the given network +// namespace, restoring the original namespace afterwards. +func WithNetNSPath(nspath string, toRun func(NetNS) error) error { + ns, err := GetNS(nspath) + if err != nil { + return err + } + defer ns.Close() + return ns.Do(toRun) +} diff --git a/vendor/github.com/containers/buildah/.cirrus.yml b/vendor/github.com/containers/buildah/.cirrus.yml new file mode 100644 index 00000000000..fd3e2731042 --- /dev/null +++ b/vendor/github.com/containers/buildah/.cirrus.yml @@ -0,0 +1,294 @@ +--- + +# Main collection of env. vars to set for all tasks and scripts. +env: + #### + #### Global variables used for all tasks + #### + # Name of the ultimate destination branch for this CI run, PR or post-merge. + DEST_BRANCH: "release-1.24" + GOPATH: "/var/tmp/go" + GOSRC: "${GOPATH}/src/github.com/containers/buildah" + # Overrides default location (/tmp/cirrus) for repo clone + CIRRUS_WORKING_DIR: "${GOSRC}" + # Shell used to execute all script commands + CIRRUS_SHELL: "/bin/bash" + # Automation script path relative to $CIRRUS_WORKING_DIR) + SCRIPT_BASE: "./contrib/cirrus" + # No need to go crazy, but grab enough to cover most PRs + CIRRUS_CLONE_DEPTH: 50 + # Unless set by in_podman.sh, default to operating outside of a podman container + IN_PODMAN: 'false' + + #### + #### Cache-image names to test with + #### + # GCE project where images live + IMAGE_PROJECT: "libpod-218412" + FEDORA_NAME: "fedora-35" + PRIOR_FEDORA_NAME: "fedora-34" + UBUNTU_NAME: "ubuntu-2110" + + IMAGE_SUFFIX: "c5814666029957120" + FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}" + PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${IMAGE_SUFFIX}" + UBUNTU_CACHE_IMAGE_NAME: "ubuntu-${IMAGE_SUFFIX}" + + IN_PODMAN_IMAGE: "quay.io/libpod/fedora_podman:${IMAGE_SUFFIX}" + + #### + #### Command variables to help avoid duplication + #### + # Command to prefix every output line with a timestamp + # (can't do inline awk script, Cirrus-CI or YAML mangles quoting) + _TIMESTAMP: 'awk -f ${CIRRUS_WORKING_DIR}/${SCRIPT_BASE}/timestamp.awk' + +gcp_credentials: ENCRYPTED[ae0bf7370f0b6e446bc61d0865a2c55d3e166b3fab9466eb0393e38e1c66a31ca4c71ddc7e0139d47d075c36dd6d3fd7] + +# Default timeout for each task +timeout_in: 120m + +# Default VM to use unless set or modified by task +gce_instance: + image_project: "${IMAGE_PROJECT}" + zone: "us-central1-c" # Required by Cirrus for the time being + cpu: 2 + memory: "4Gb" + disk: 200 # Gigabytes, do not set less than 200 per obscure GCE docs re: I/O performance + image_name: "${FEDORA_CACHE_IMAGE_NAME}" + + +# Update metadata on VM images referenced by this repository state +meta_task: + name: "VM img. keepalive" + alias: meta + + container: + image: "quay.io/libpod/imgts:${IMAGE_SUFFIX}" # see contrib/imgts + cpu: 1 + memory: 1 + + env: + # Space-separated list of images used by this repository state + IMGNAMES: |- + ${FEDORA_CACHE_IMAGE_NAME} + ${PRIOR_FEDORA_CACHE_IMAGE_NAME} + ${UBUNTU_CACHE_IMAGE_NAME} + BUILDID: "${CIRRUS_BUILD_ID}" + REPOREF: "${CIRRUS_CHANGE_IN_REPO}" + GCPJSON: ENCRYPTED[d3614d6f5cc0e66be89d4252b3365fd84f14eee0259d4eb47e25fc0bc2842c7937f5ee8c882b7e547b4c5ec4b6733b14] + GCPNAME: ENCRYPTED[8509e6a681b859479ce6aa275bd3c4ac82de5beec6df6057925afc4cd85b7ef2e879066ae8baaa2d453b82958e434578] + GCPPROJECT: ENCRYPTED[cc09b62d0ec6746a3df685e663ad25d9d5af95ef5fd843c96f3d0ec9d7f065dc63216b9c685c9f43a776a1d403991494] + CIRRUS_CLONE_DEPTH: 1 # source not used + + script: '/usr/local/bin/entrypoint.sh |& ${_TIMESTAMP}' + + +smoke_task: + alias: 'smoke' + name: "Smoke Test" + + setup_script: '${SCRIPT_BASE}/setup.sh |& ${_TIMESTAMP}' + build_script: '${SCRIPT_BASE}/build.sh |& ${_TIMESTAMP}' + + binary_artifacts: + path: "./bin/*" + +# Check that all included go modules from other sources match +# # what is expected in `vendor/modules.txt` vs `go.mod`. +vendor_task: + name: "Test Vendoring" + alias: vendor + + env: + CIRRUS_WORKING_DIR: "/var/tmp/go/src/github.com/containers/buildah" + GOPATH: "/var/tmp/go" + GOSRC: "/var/tmp/go/src/github.com/containers/buildah" + + # Runs within Cirrus's "community cluster" + container: + image: docker.io/library/golang:1.16 + cpu: 1 + memory: 1 + + timeout_in: 5m + + vendor_script: + - 'make vendor' + - './hack/tree_status.sh' + + +# Confirm cross-compile ALL architectures on a Mac OS-X VM. +cross_build_task: + name: "Cross Compile" + alias: cross_build + only_if: ¬_docs $CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*' + + osx_instance: + image: 'big-sur-base' + + script: + - brew update + - brew install go + - brew install go-md2man + - brew install gpgme + - go version + - make cross CGO_ENABLED=0 + + binary_artifacts: + path: "./bin/*" + + +unit_task: + name: 'Unit tests w/ $STORAGE_DRIVER' + alias: unit + only_if: *not_docs + depends_on: &smoke_vendor_cross + - smoke + - vendor + - cross_build + + timeout_in: 1h + + matrix: + - env: + STORAGE_DRIVER: 'vfs' + - env: + STORAGE_DRIVER: 'overlay' + + setup_script: '${SCRIPT_BASE}/setup.sh |& ${_TIMESTAMP}' + build_script: '${SCRIPT_BASE}/build.sh |& ${_TIMESTAMP}' + unit_test_script: '${SCRIPT_BASE}/test.sh unit |& ${_TIMESTAMP}' + + binary_artifacts: + path: "./bin/*" + + +conformance_task: + name: 'Build Conformance w/ $STORAGE_DRIVER' + alias: conformance + only_if: *not_docs + depends_on: *smoke_vendor_cross + + gce_instance: + image_name: "${UBUNTU_CACHE_IMAGE_NAME}" + + timeout_in: 25m + + matrix: + - env: + STORAGE_DRIVER: 'vfs' + - env: + STORAGE_DRIVER: 'overlay' + + setup_script: '${SCRIPT_BASE}/setup.sh conformance |& ${_TIMESTAMP}' + conformance_test_script: '${SCRIPT_BASE}/test.sh conformance |& ${_TIMESTAMP}' + + +integration_task: + name: "Integration $DISTRO_NV w/ $STORAGE_DRIVER" + alias: integration + only_if: *not_docs + depends_on: *smoke_vendor_cross + + matrix: + # VFS + - env: + DISTRO_NV: "${FEDORA_NAME}" + IMAGE_NAME: "${FEDORA_CACHE_IMAGE_NAME}" + STORAGE_DRIVER: 'vfs' + - env: + DISTRO_NV: "${PRIOR_FEDORA_NAME}" + IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}" + STORAGE_DRIVER: 'vfs' + - env: + DISTRO_NV: "${UBUNTU_NAME}" + IMAGE_NAME: "${UBUNTU_CACHE_IMAGE_NAME}" + STORAGE_DRIVER: 'vfs' + # OVERLAY + - env: + DISTRO_NV: "${FEDORA_NAME}" + IMAGE_NAME: "${FEDORA_CACHE_IMAGE_NAME}" + STORAGE_DRIVER: 'overlay' + - env: + DISTRO_NV: "${PRIOR_FEDORA_NAME}" + IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}" + STORAGE_DRIVER: 'overlay' + - env: + DISTRO_NV: "${UBUNTU_NAME}" + IMAGE_NAME: "${UBUNTU_CACHE_IMAGE_NAME}" + STORAGE_DRIVER: 'overlay' + + gce_instance: + image_name: "$IMAGE_NAME" + + # Separate scripts for separate outputs, makes debugging easier. + setup_script: '${SCRIPT_BASE}/setup.sh |& ${_TIMESTAMP}' + build_script: '${SCRIPT_BASE}/build.sh |& ${_TIMESTAMP}' + integration_test_script: '${SCRIPT_BASE}/test.sh integration |& ${_TIMESTAMP}' + + binary_artifacts: + path: "./bin/*" + + always: &standardlogs + audit_log_script: '$GOSRC/$SCRIPT_BASE/logcollector.sh audit' + df_script: '$GOSRC/$SCRIPT_BASE/logcollector.sh df' + journal_script: '$GOSRC/$SCRIPT_BASE/logcollector.sh journal' + podman_system_info_script: '$GOSRC/$SCRIPT_BASE/logcollector.sh podman' + buildah_version_script: '$GOSRC/$SCRIPT_BASE/logcollector.sh buildah_version' + buildah_info_script: '$GOSRC/$SCRIPT_BASE/logcollector.sh buildah_info' + package_versions_script: '$GOSRC/$SCRIPT_BASE/logcollector.sh packages' + golang_version_script: '$GOSRC/$SCRIPT_BASE/logcollector.sh golang' + + +in_podman_task: + name: "Containerized Integration" + alias: in_podman + only_if: *not_docs + depends_on: *smoke_vendor_cross + + env: + # This is key, cause the scripts to re-execute themselves inside a container. + IN_PODMAN: 'true' + BUILDAH_ISOLATION: 'chroot' + STORAGE_DRIVER: 'vfs' + + # Separate scripts for separate outputs, makes debugging easier. + setup_script: '${SCRIPT_BASE}/setup.sh |& ${_TIMESTAMP}' + build_script: '${SCRIPT_BASE}/build.sh |& ${_TIMESTAMP}' + integration_test_script: '${SCRIPT_BASE}/test.sh integration |& ${_TIMESTAMP}' + + binary_artifacts: + path: "./bin/*" + + always: + <<: *standardlogs + + +# Status aggregator for all tests. This task simply ensures a defined +# set of tasks all passed, and allows confirming that based on the status +# of this task. +success_task: + name: "Total Success" + alias: success + + depends_on: + - meta + - smoke + - unit + - conformance + - vendor + - cross_build + - integration + - in_podman + + container: + image: "quay.io/libpod/alpine:latest" + cpu: 1 + memory: 1 + + env: + CIRRUS_SHELL: direct # execute command directly + + clone_script: mkdir -p $CIRRUS_WORKING_DIR + script: /bin/true diff --git a/vendor/github.com/containers/buildah/.gitignore b/vendor/github.com/containers/buildah/.gitignore new file mode 100644 index 00000000000..939ce6ef52d --- /dev/null +++ b/vendor/github.com/containers/buildah/.gitignore @@ -0,0 +1,12 @@ +docs/buildah*.1 +docs/*.5 +/bin +/buildah +/imgtype +/build/ +/tests/tools/build +Dockerfile* +!/tests/bud/*/Dockerfile* +!/tests/conformance/**/Dockerfile* +*.swp +/result/ diff --git a/vendor/github.com/containers/buildah/.golangci.yml b/vendor/github.com/containers/buildah/.golangci.yml new file mode 100644 index 00000000000..af0b10c76b2 --- /dev/null +++ b/vendor/github.com/containers/buildah/.golangci.yml @@ -0,0 +1,13 @@ +--- +run: + build-tags: + - apparmor + - seccomp + - selinux + # Don't exceed number of threads available when running under CI + concurrency: 4 +linters: + enable: + - revive + - unconvert + - unparam diff --git a/vendor/github.com/containers/buildah/CHANGELOG.md b/vendor/github.com/containers/buildah/CHANGELOG.md new file mode 100644 index 00000000000..190efc5e34d --- /dev/null +++ b/vendor/github.com/containers/buildah/CHANGELOG.md @@ -0,0 +1,2390 @@ +![buildah logo](https://cdn.rawgit.com/containers/buildah/main/logos/buildah-logo_large.png) + +# Changelog + +## v1.24.5 (2022-07-14) + + Bump github.com/containers/storage to v1.38.5 + drop commas from changelog dates because `rpmspec -q` doesn't like them + +## v1.24.4 (2022-05-11) + + Bump golang.org/x/crypto to 7b82a4e (GHSA-8c26-wmh5-6g9v - CVE-2022-27191) + Bump c/image to v5.19.3 and c/ocicrypt to v1.1.4 + +## v1.24.3 (2022-03-01) + + vendor: bump c/common to 0.47.5 + Add a test for CVE-2022-27651 + do not set the inheritable capabilities + Bump github.com/prometheus/client_golang to v1.11.1 + vendor: bump c/image to v5.19.2 + vendor: bump c/storage to v1.38.3 + +## v1.24.2 (2022-02-16) + + Increase subuid/subgid to 65535 + history: only add proxy vars to history if specified + run_linux: use --systemd-cgroup + buildah: new global option --cgroup-manager + Makefile: build with systemd when available + build(deps): bump github.com/fsouza/go-dockerclient from 1.7.7 to 1.7.8 + Bump c/common to v0.47.4 + Cirrus: Use updated VM images + conformance: add a few "replace-directory-with-symlink" tests + Bump back to v1.25.0-dev + +## v1.24.1 (2022-02-03) + + executor: Add support for inline --platform within Dockerfile + caps: fix buildah run --cap-add=all + Update vendor of openshift/imagebuilder + Bump version of containers/image and containers/common + Update vendor of containers/common + System tests: fix accidental vandalism of source dir + build(deps): bump github.com/containers/storage from 1.38.1 to 1.38.2 + imagebuildah.BuildDockerfiles(): create the jobs semaphore + build(deps): bump github.com/onsi/gomega from 1.18.0 to 1.18.1 + overlay: always honor mountProgram + overlay: move mount program invocation to separate function + overlay: move mount program lookup to separate function + Bump to v1.25.0-dev [NO TESTS NEEDED] + +## v1.24.0 (2022-01-26) + + Update vendor of containers/common + build(deps): bump github.com/golangci/golangci-lint in /tests/tools + Github-workflow: Report both failures and errors. + build(deps): bump github.com/containers/image/v5 from 5.18.0 to 5.19.0 + Update docs/buildah-build.1.md + [CI:DOCS] Fix typos and improve language + buildah bud --network add support for custom networks + Make pull commands be consistent + docs/buildah-build.1.md: don't imply that -v isn't just a RUN thing + build(deps): bump github.com/onsi/gomega from 1.17.0 to 1.18.0 + Vendor in latest containers/image + Run codespell on code + .github/dependabot.yml: add tests/tools go.mod + CI: rm git-validation, add GHA job to validate PRs + tests/tools: bump go-md2man to v2.0.1 + tests/tools/Makefile: simplify + tests/tools: bump onsi/ginkgo to v1.16.5 + vendor: bump c/common and others + mount: add support for custom upper and workdir with overlay mounts + linux: fix lookup for runtime + overlay: add MountWithOptions to API which extends support for advanced overlay + Allow processing of SystemContext from FlagSet + .golangci.yml: enable unparam linter + util/resolveName: rm bool return + tests/tools: bump golangci-lint + .gitignore: fixups + all: fix capabilities.NewPid deprecation warnings + bind/mount.go: fix linter comment + all: fix gosimple warning S1039 + tests/e2e/buildah_suite_test.go: fix gosimple warnings + imagebuildah/executor.go: fix gosimple warning + util.go: fix gosimple warning + build(deps): bump github.com/opencontainers/runc from 1.0.3 to 1.1.0 + Enable git-daemon tests + Allow processing of id options from FlagSet + Cirrus: Re-order tasks for more parallelism + Cirrus: Freshen VM images + Fix platform handling for empty os/arch values + Allow processing of network options from FlagSet + Fix permissions on secrets directory + Update containers/image and containers/common + bud.bats: use a local git daemon for the git protocol test + Allow processing of common options from FlagSet + Cirrus: Run int. tests in parallel with unit + vendor c/common + Fix default CNI paths + build(deps): bump github.com/fsouza/go-dockerclient from 1.7.6 to 1.7.7 + multi-stage: enable mounting stages across each other with selinux enabled + executor: Share selinux label of first stage with other stages in a build + buildkit: add from field to bind and cache mounts so images can be used as source + Use config.ProxyEnv from containers/common + use libnetwork from c/common for networking + setup the netns in the buildah parent process + build(deps): bump github.com/containerd/containerd from 1.5.8 to 1.5.9 + build(deps): bump github.com/fsouza/go-dockerclient from 1.7.4 to 1.7.6 + build: fix libsubid test + Allow callers to replace the ContainerSuffix + parse: allow parsing anomaly non-human value for memory control group + .cirrus: remove static_build from ci + stage_executor: re-use all possible layers from cache for squashed builds + build(deps): bump github.com/spf13/cobra from 1.2.1 to 1.3.0 + Allow rootless buildah to set resource limits on cgroup V2 + build(deps): bump github.com/docker/docker + tests: move buildkit mount tests files from TESTSDIR to TESTDIR before modification + build(deps): bump github.com/opencontainers/runc from 1.0.2 to 1.0.3 + Wire logger through to config + copier.Put: check for is-not-a-directory using lstat, not stat + Turn on rootless cgroupv2 tests + Grab all of the containers.conf settings for namespaces. + image: set MediaType in OCI manifests + copier: RemoveAll possibly-directories + Simple README fix + images: accept multiple filter with logical AND + build(deps): bump github.com/containernetworking/cni from 0.8.1 to 1.0.1 + UPdate vendor of container/storage + build(deps): bump github.com/onsi/gomega from 1.16.0 to 1.17.0 + build(deps): bump github.com/containers/image/v5 from 5.16.1 to 5.17.0 + Make LocalIP public function so Podman can use it + Fix UnsetEnv for buildah bud + Tests should rely only on static/unchanging images + run: ensure that stdio pipes are labeled correctly + build(deps): bump github.com/docker/docker + Cirrus: Bump up to Fedora 35 & Ubuntu 21.10 + chroot: don't use the generate default seccomp filter for unit tests + build(deps): bump github.com/containerd/containerd from 1.5.7 to 1.5.8 + ssh-agent: Increase timeout before we explicitly close connection + docs/tutorials: update + Clarify that manifest defaults to localhost as the registry name + "config": remove a stray bit of debug output + "commit": fix a flag typo + Fix an error message: unlocking vs locking + Expand the godoc for CommonBuildOptions.Secrets + chroot: accept an "rw" option + Add --unsetenv option to buildah commit and build + define.TempDirForURL(): show CombinedOutput when a command fails + config: support the variant field + rootless: do not bind mount /sys if not needed + Fix tutorial to specify command on buildah run line + build: history should not contain ARG values + docs: Use guaranteed path for go-md2man + run: honor --network=none from builder if nothing specified + networkpolicy: Should be enabled instead of default when explictly set + Add support for env var secret sources + build(deps): bump github.com/docker/docker + fix: another non-portable shebang + Rootless containers users should use additional groups + Support overlayfs path contains colon + Report ignorefile location when no content added + Add support for host.containers.internal in the /etc/hosts + build(deps): bump github.com/onsi/ginkgo from 1.16.4 to 1.16.5 + imagebuildah: fix nil deref + buildkit: add support for mount=type=cache + Default secret mode to 400 + [CI:DOCS] Include manifest example usage + docs: update buildah-from, buildah-pull 'platform' option compatibility notes + docs: update buildah-build 'platform' option compatibility notes + De-dockerize the man page as much as possible + [CI:DOCS] Touch up Containerfile man page to show ARG can be 1st + docs: Fix and Update Containerfile man page with supported mount types + mount: add tmpcopyup to tmpfs mount option + buildkit: Add support for --mount=type=tmpfs + build(deps): bump github.com/opencontainers/selinux from 1.8.5 to 1.9.1 + Fix command doc links in README.md + build(deps): bump github.com/containers/image/v5 from 5.16.0 to 5.16.1 + build: Add support for buildkit like --mount=type=bind + Bump containerd to v1.5.7 + build(deps): bump github.com/docker/docker + tests: stop pulling php, composer + Fix .containerignore link file + Cirrus: Fix defunct package metadata breaking cache + build(deps): bump github.com/containers/storage from 1.36.0 to 1.37.0 + buildah build: add --all-platforms + Add man page for Containerfile and .containerignore + Plumb the remote logger throughut Buildah + Replace fmt.Sprintf("%d", x) with strconv.Itoa(x) + Run: Cleanup run directory after every RUN step + build(deps): bump github.com/containers/common from 0.45.0 to 0.46.0 + Makefile: adjust -ldflags/-gcflags/-gccgoflags depending on the go implementation + Makefile: check for `-race` using `-mod=vendor` + imagebuildah: fix an attempt to write to a nil map + push: support to specify the compression format + conformance: allow test cases to specify dockerUseBuildKit + build(deps): bump github.com/containers/common from 0.44.1 to 0.45.0 + build(deps): bump github.com/containers/common from 0.44.0 to 0.44.1 + unmarshalConvertedConfig(): handle zstd compression + tests/copy/copy: wire up compression options + Update to github.com/vbauerster/mpb v7.1.5 + Add flouthoc to OWNERS + build: Add additional step nodes when labels are modified + Makefile: turn on race detection whenever it's available + conformance: add more tests for exclusion short-circuiting + Update VM Images + Drop prior-ubuntu testing + Bump to v1.24.0-dev + +## v1.23.0 (2021-09-13) + + Vendor in containers/common v0.44.0 + build(deps): bump github.com/containers/storage from 1.35.0 to 1.36.0 + Update 05-openshift-rootless-build.md + build(deps): bump github.com/opencontainers/selinux from 1.8.4 to 1.8.5 + .cirrus.yml: run cross_build_task on Big Sur + Makefile: update cross targets + Add support for rootless overlay mounts + Cirrus: Increase unit-test timeout + Docs: Clarify rmi w/ manifest/index use + build: mirror --authfile to filesystem if pointing to FD instead of file + Fix build with .git url with branch + manifest: rm should remove only manifests not referenced images. + vendor: bump c/common to v0.43.3-0.20210902095222-a7acc160fb25 + Avoid rehashing and noop compression writer + corrected man page section; .conf file to mention its man page + copy: add --max-parallel-downloads to tune that copy option + copier.Get(): try to avoid descending into directories + tag: Support tagging manifest list instead of resolving to images + Install new manpages to correct sections + conformance: tighten up exception specifications + Add support for libsubid + Add epoch time field to buildah images + Fix ownership of /home/build/.local/share/containers + build(deps): bump github.com/containers/image/v5 from 5.15.2 to 5.16.0 + Rename bud to build, while keeping an alias for to bud. + Replace golang.org/x/crypto/ssh/terminal with golang.org/x/term + build(deps): bump github.com/opencontainers/runc from 1.0.1 to 1.0.2 + build(deps): bump github.com/onsi/gomega from 1.15.0 to 1.16.0 + build(deps): bump github.com/fsouza/go-dockerclient from 1.7.3 to 1.7.4 + build(deps): bump github.com/containers/common from 0.43.1 to 0.43.2 + Move DiscoverContainerfile to pkg/util directory + build(deps): bump github.com/containers/image/v5 from 5.15.1 to 5.15.2 + Remove some references to Docker + build(deps): bump github.com/containers/image/v5 from 5.15.0 to 5.15.1 + imagebuildah: handle --manifest directly + build(deps): bump github.com/containers/common from 0.42.1 to 0.43.1 + build(deps): bump github.com/opencontainers/selinux from 1.8.3 to 1.8.4 + executor: make sure imageMap is updated with terminatedStage + tests/serve/serve.go: use a kernel-assigned port + Bump go for vendor-in-container from 1.13 to 1.16 + imagebuildah: move multiple-platform building internal + Adds GenerateStructure helper function to support rootfs-overlay. + Run codespell to fix spelling + Implement SSH RUN mount + build(deps): bump github.com/onsi/gomega from 1.14.0 to 1.15.0 + Fix resolv.conf content with run --net=private + run: fix nil deref using the option's logger + build(deps): bump github.com/containerd/containerd from 1.5.1 to 1.5.5 + make vendor-in-container + bud: teach --platform to take a list + set base-image annotations + build(deps): bump github.com/opencontainers/selinux from 1.8.2 to 1.8.3 + [CI:DOCS] Fix CHANGELOG.md + Bump to v1.23.0-dev [NO TESTS NEEDED] + Accept repositories on login/logout + +## v1.22.0 (2021-08-02) + c/image, c/storage, c/common vendor before Podman 3.3 release + WIP: tests: new assert() + Proposed patch for 3399 (shadowutils) + Fix handling of --restore shadow-utils + build(deps): bump github.com/containers/image/v5 from 5.13.2 to 5.14.0 + runtime-flag (debug) test: handle old & new runc + build(deps): bump github.com/containers/storage from 1.32.6 to 1.33.0 + Allow dst and destination for target in secret mounts + Multi-arch: Always push updated version-tagged img + Add a few tests on cgroups V2 + imagebuildah.stageExecutor.prepare(): remove pseudonym check + refine dangling filter + Chown with environment variables not set should fail + Just restore protections of shadow-utils + build(deps): bump github.com/opencontainers/runc from 1.0.0 to 1.0.1 + Remove specific kernel version number requirement from install.md + Multi-arch image workflow: Make steps generic + chroot: fix environment value leakage to intermediate processes + Update nix pin with `make nixpkgs` + buildah source - create and manage source images + Update cirrus-cron notification GH workflow + Reuse code from containers/common/pkg/parse + Cirrus: Freshen VM images + build(deps): bump github.com/containers/storage from 1.32.5 to 1.32.6 + Fix excludes exception begining with / or ./ + Fix syntax for --manifest example + build(deps): bump github.com/onsi/gomega from 1.13.0 to 1.14.0 + vendor containers/common@main + Cirrus: Drop dependence on fedora-minimal + Adjust conformance-test error-message regex + Workaround appearance of differing debug messages + Cirrus: Install docker from package cache + build(deps): bump github.com/containers/ocicrypt from 1.1.1 to 1.1.2 + Switch rusagelogfile to use options.Out + build(deps): bump github.com/containers/storage from 1.32.4 to 1.32.5 + Turn stdio back to blocking when command finishes + Add support for default network creation + Cirrus: Updates for master->main rename + Change references from master to main + Add `--env` and `--workingdir` flags to run command + build(deps): bump github.com/opencontainers/runc + [CI:DOCS] buildah bud: spelling --ignore-file requires parameter + [CI:DOCS] push/pull: clarify supported transports + Remove unused function arguments + Create mountOptions for mount command flags + Extract version command implementation to function + Add --json flags to `mount` and `version` commands + build(deps): bump github.com/containers/storage from 1.32.2 to 1.32.3 + build(deps): bump github.com/containers/common from 0.40.0 to 0.40.1 + copier.Put(): set xattrs after ownership + buildah add/copy: spelling + build(deps): bump github.com/containers/common from 0.39.0 to 0.40.0 + buildah copy and buildah add should support .containerignore + Remove unused util.StartsWithValidTransport + Fix documentation of the --format option of buildah push + Don't use alltransports.ParseImageName with known transports + build(deps): bump github.com/containers/image/v5 from 5.13.0 to 5.13.1 + man pages: clarify `rmi` removes dangling parents + tests: make it easer to override the location of the copy helper + build(deps): bump github.com/containers/image/v5 from 5.12.0 to 5.13.0 + [CI:DOCS] Fix links to c/image master branch + imagebuildah: use the specified logger for logging preprocessing warnings + Fix copy into workdir for a single file + Fix docs links due to branch rename + Update nix pin with `make nixpkgs` + build(deps): bump github.com/fsouza/go-dockerclient from 1.7.2 to 1.7.3 + build(deps): bump github.com/opencontainers/selinux from 1.8.1 to 1.8.2 + build(deps): bump go.etcd.io/bbolt from 1.3.5 to 1.3.6 + build(deps): bump github.com/containers/storage from 1.32.1 to 1.32.2 + build(deps): bump github.com/mattn/go-shellwords from 1.0.11 to 1.0.12 + build(deps): bump github.com/onsi/ginkgo from 1.16.3 to 1.16.4 + fix(docs): typo + Move to v1.22.0-dev + Fix handling of auth.json file while in a user namespace + Add rusage-logfile flag to optionally send rusage to a file + imagebuildah: redo step logging + build(deps): bump github.com/onsi/ginkgo from 1.16.2 to 1.16.3 + build(deps): bump github.com/containers/storage from 1.32.0 to 1.32.1 + Add volumes to make running buildah within a container easier + build(deps): bump github.com/onsi/gomega from 1.12.0 to 1.13.0 + Add and use a "copy" helper instead of podman load/save + Bump github.com/containers/common from 0.38.4 to 0.39.0 + containerImageRef/containerImageSource: don't buffer uncompressed layers + containerImageRef(): squashed images have no parent images + Sync. workflow across skopeo, buildah, and podman + Bump github.com/containers/storage from 1.31.1 to 1.31.2 + Bump github.com/opencontainers/runc from 1.0.0-rc94 to 1.0.0-rc95 + Bump to v1.21.1-dev [NO TESTS NEEDED] + +## v1.21.0 (2021-05-19) + Don't blow up if cpp detects errors + Vendor in containers/common v0.38.4 + Remove 'buildah run --security-opt' from completion + update c/common + Fix handling of --default-mounts-file + update vendor of containers/storage v1.31.1 + Bump github.com/containers/storage from 1.30.3 to 1.31.0 + Send logrus messages back to caller when building + github: Fix bad repo. ref in workflow config + Check earlier for bad image tags name + buildah bud: fix containers/podman/issues/10307 + Bump github.com/containers/storage from 1.30.1 to 1.30.3 + Cirrus: Support [CI:DOCS] test skipping + Notification email for cirrus-cron build failures + Bump github.com/opencontainers/runc from 1.0.0-rc93 to 1.0.0-rc94 + Fix race condition + Fix copy race while walking paths + Preserve ownership of lower directory when doing an overlay mount + Bump github.com/onsi/gomega from 1.11.0 to 1.12.0 + Update nix pin with `make nixpkgs` + codespell cleanup + Multi-arch github-action workflow unification + Bump github.com/containers/image/v5 from 5.11.1 to 5.12.0 + Bump github.com/onsi/ginkgo from 1.16.1 to 1.16.2 + imagebuildah: ignore signatures when tagging images + update to latest libimage + Bump github.com/containers/common from 0.37.0 to 0.37.1 + Bump github.com/containers/storage from 1.30.0 to 1.30.1 + Upgrade to GitHub-native Dependabot + Document location of auth.json file if XDG_RUNTIME_DIR is not set + run.bats: fix flake in run-user test + Cirrus: Update F34beta -> F34 + pr-should-include-tests: try to make work in buildah + runUsingRuntime: when relaying error from the runtime, mention that + Run(): avoid Mkdir() into the rootfs + imagebuildah: replace archive with chrootarchive + imagebuildah.StageExecutor.volumeCacheSaveVFS(): set up bind mounts + conformance: use :Z with transient mounts when SELinux is enabled + bud.bats: fix a bats warning + imagebuildah: create volume directories when using overlays + imagebuildah: drop resolveSymlink() + namespaces test - refactoring and cleanup + Refactor 'idmapping' system test + Cirrus: Update Ubuntu images to 21.04 + Tiny fixes in bud system tests + Add compabitility wrappers for removed packages + Fix expected message at pulling image + Fix system tests of 'bud' subcommand + [CI:DOCS] Update steps for CentOS runc users + Add support for secret mounts + Add buildah manifest rm command + restore push/pull and util API + [CI:DOCS] Remove older distro docs + Rename rhel secrets to subscriptions + vendor in openshift/imagebuilder + Remove buildah bud --loglevel ... + use new containers/common/libimage package + Fix copier when using globs + Test namespace flags of 'bud' subcommand + Add system test of 'bud' subcommand + Output names of multiple tags in buildah bud + push to docker test: don't get fooled by podman + copier: add Remove() + build(deps): bump github.com/containers/image/v5 from 5.10.5 to 5.11.1 + Restore log timestamps + Add system test of 'buildah help' with a tiny fix + tests: copy.bats: fix infinite hang + Do not force hard code to crun in rootless mode + build(deps): bump github.com/openshift/imagebuilder from 1.2.0 to 1.2.1 + build(deps): bump github.com/containers/ocicrypt from 1.1.0 to 1.1.1 + build(deps): bump github.com/containers/common from 0.35.4 to 0.36.0 + Fix arg missing warning in bud + Check without flag in 'from --cgroup-parent' test + Minor fixes to Buildah as a library tutorial documentation + Add system test of 'buildah version' for packaged buildah + Add a few system tests of 'buildah from' + Log the final error with %+v at logging level "trace" + copier: add GetOptions.NoCrossDevice + Update nix pin with `make nixpkgs` + Bump to v1.20.2-dev + +## v1.20.1 (2021-04-13) + Run container with isolation type set at 'from' + bats helpers.bash - minor refactoring + Bump containers/storage vendor to v1.29.0 + build(deps): bump github.com/onsi/ginkgo from 1.16.0 to 1.16.1 + Cirrus: Update VMs w/ F34beta + CLI add/copy: add a --from option + build(deps): bump github.com/onsi/ginkgo from 1.15.2 to 1.16.0 + Add authentication system tests for 'commit' and 'bud' + fix local image lookup for custom platform + Double-check existence of OCI runtimes + Cirrus: Make use of shared get_ci_vm container + Add system tests of "buildah run" + Update nix pin with `make nixpkgs` + Remove some stuttering on returns errors + Setup alias for --tty to --terminal + Add conformance tests for COPY /... + Put a few more minutes on the clock for the CI conformance test + Add a conformance test for COPY --from $symlink + Add conformance tests for COPY "" + Check for symlink in builtin volume + Sort all mounts by destination directory + System-test cleanup + Export parse.Platform string to be used by podman-remote + blobcache: fix sequencing error + build(deps): bump github.com/containers/common from 0.35.3 to 0.35.4 + Fix URL in demos/buildah_multi_stage.sh + Add a few system tests + [NO TESTS NEEDED] Use --recurse-modules when building git context + Bump to v1.20.1-dev + +## v1.20.0 (2021-03-25) + * vendor in containers/storage v1.28.1 + * build(deps): bump github.com/containers/common from 0.35.2 to 0.35.3 + * tests: prefetch: use buildah, not podman, for pulls + * Use faster way to check image tag existence during multi-arch build + * Add information about multi-arch images to the Readme + * COPY --chown: expand the conformance test + * pkg/chrootuser: use a bufio.Scanner + * [CI:DOCS] Fix rootful typo in docs + * build(deps): bump github.com/onsi/ginkgo from 1.15.1 to 1.15.2 + * Add documentation and testing for .containerignore + * build(deps): bump github.com/sirupsen/logrus from 1.8.0 to 1.8.1 + * build(deps): bump github.com/hashicorp/go-multierror from 1.1.0 to 1.1.1 + * Lookup Containerfile if user specifies a directory + * Add Tag format placeholder to docs + * copier: ignore sockets + * image: propagate errors from extractRootfs + * Remove system test of 'buildah containers -a' + * Clarify userns options are usable only as root in man pages + * Fix system test of 'containers -a' + * Remove duplicated code in addcopy + * build(deps): bump github.com/onsi/ginkgo from 1.15.0 to 1.15.1 + * build(deps): bump github.com/onsi/gomega from 1.10.5 to 1.11.0 + * build(deps): bump github.com/fsouza/go-dockerclient from 1.7.1 to 1.7.2 + * Update multi-arch buildah build setup with new logic + * Update nix pin with `make nixpkgs` + * overlay.bats: fix the "overlay source permissions" test + * imagebuildah: use overlay for volumes when using overlay + * Make PolicyMap and PullPolicy names align + * copier: add GetOptions.IgnoreUnreadable + * Check local image to match system context + * fix: Containerfiles - smaller set of userns u/gids + * Set upperdir permissions based on source + * Shrink the vendoring size of pkc/cli + * Clarify image name match failure message + * ADD/COPY: create the destination directory first, chroot to it + * copier.GetOptions: add NoDerefSymLinks + * copier: add an Eval function + * Update system test for 'from --cap-add/drop' + * copier: fix a renaming bug + * copier: return child process stderr if we can't JSON decode the response + * Add some system tests + * build(deps): bump github.com/containers/storage from 1.26.0 to 1.27.0 + * complement add/copy --chmod documentation + * buildah login and logout, do not need to enter user namespace + * Add multi-arch image build + * chmod/chown added/fixed in bash completions + * OWNERS: add @lsm5 + * buildah add/copy --chmod dockerfile implementation + * bump github.com/openshift/imagebuilder from 1.1.8 to 1.2.0 + * buildah add/copy --chmod cli implementation for files and urls + * Make sure we set the buildah version label + * Isolation strings, should match user input + * [CI:DOCS] buildah-from.md: remove dup arch,os + * build(deps): bump github.com/containers/image/v5 from 5.10.2 to 5.10.3 + * Cirrus: Temp. disable prior-fedora (F32) testing + * pr-should-include-tests: recognized "renamed" tests + * build(deps): bump github.com/sirupsen/logrus from 1.7.0 to 1.8.0 + * build(deps): bump github.com/fsouza/go-dockerclient from 1.7.0 to 1.7.1 + * build(deps): bump github.com/containers/common from 0.34.2 to 0.35.0 + * Fix reaping of stages with no instructions + * add stale bot + * Add base image name to comment + * build(deps): bump github.com/spf13/cobra from 1.1.1 to 1.1.3 + * Don't fail copy to emptydir + * buildah: use volatile containers + * vendor: update containers/storage + * Eliminate the use of containers/building import in pkg subdirs + * Add more support for removing config + * Improve messages about --cache-from not being supported + * Revert patch to allow COPY/ADD of empty dirs. + * Don't fail copy to emptydir + * Fix tutorial for rootless mode + * Fix caching layers with build args + * Vendor in containers/image v5.10.2 + * build(deps): bump github.com/containers/common from 0.34.0 to 0.34.2 + * build(deps): bump github.com/onsi/ginkgo from 1.14.2 to 1.15.0 + * 'make validate': require PRs to include tests + * build(deps): bump github.com/onsi/gomega from 1.10.4 to 1.10.5 + * build(deps): bump github.com/containers/storage from 1.24.5 to 1.25.0 + * Use chown function for U volume flag from containers/common repository + * --iidfile: print hash prefix + * bump containernetworking/cni to v0.8.1 - fix for CVE-2021-20206 + * run: fix check for host pid namespace + * Finish plumbing for buildah bud --manifest + * buildah manifest add localimage should work + * Stop testing directory permissions with latest docker + * Fix build arg check + * build(deps): bump github.com/containers/ocicrypt from 1.0.3 to 1.1.0 + * [ci:docs] Fix man page for buildah push + * Update nix pin with `make nixpkgs` + * Bump to containers/image v5.10.1 + * Rebuild layer if a change in ARG is detected + * Bump golang.org/x/crypto to the latest + * Add Ashley and Urvashi to Approvers + * local image lookup by digest + * Use build-arg ENV val from local environment if set + * Pick default OCI Runtime from containers.conf + * Added required devel packages + * Cirrus: Native OSX Build + * Cirrus: Two minor cleanup items + * Workaround for RHEL gating test failure + * build(deps): bump github.com/stretchr/testify from 1.6.1 to 1.7.0 + * build(deps): bump github.com/mattn/go-shellwords from 1.0.10 to 1.0.11 + * Reset upstream branch to dev version + * If destination does not exists, do not throw error + +## v1.19.0 (2021-01-08) + Update vendor of containers/storage and containers/common + Buildah inspect should be able to inspect manifests + Make buildah push support pushing manifests lists and digests + Fix handling of TMPDIR environment variable + Add support for --manifest flags + Upper directory should match mode of destination directory + Only grab the OS, Arch if the user actually specified them + Use --arch and --os and --variant options to select architecture and os + Cirrus: Track libseccomp and golang version + copier.PutOptions: add an "IgnoreDevices" flag + fix: `rmi --prune` when parent image is in store. + build(deps): bump github.com/containers/storage from 1.24.3 to 1.24.4 + build(deps): bump github.com/containers/common from 0.31.1 to 0.31.2 + Allow users to specify stdin into containers + Drop log message on failure to mount on /sys file systems to info + Spelling + SELinux no longer requires a tag. + build(deps): bump github.com/opencontainers/selinux from 1.6.0 to 1.8.0 + build(deps): bump github.com/containers/common from 0.31.0 to 0.31.1 + Update nix pin with `make nixpkgs` + Switch references of /var/run -> /run + Allow FROM to be overriden with from option + copier: don't assume we can chroot() on Unixy systems + copier: add PutOptions.NoOverwriteDirNonDir, Get/PutOptions.Rename + copier: handle replacing directories with not-directories + copier: Put: skip entries with zero-length names + build(deps): bump github.com/containers/storage from 1.24.2 to 1.24.3 + Add U volume flag to chown source volumes + Turn off PRIOR_UBUNTU Test until vm is updated + pkg, cli: rootless uses correct isolation + build(deps): bump github.com/onsi/gomega from 1.10.3 to 1.10.4 + update installation doc to reflect current status + Move away from using docker.io + enable short-name aliasing + build(deps): bump github.com/containers/storage from 1.24.1 to 1.24.2 + build(deps): bump github.com/containers/common from 0.30.0 to 0.31.0 + Throw errors when using bogus --network flags + pkg/supplemented test: replace our null blobinfocache + build(deps): bump github.com/containers/common from 0.29.0 to 0.30.0 + inserts forgotten quotation mark + Not prefer use local image create/add manifest + Add container information to .containerenv + Add --ignorefile flag to use alternate .dockerignore flags + Add a source debug build + Fix crash on invalid filter commands + build(deps): bump github.com/containers/common from 0.27.0 to 0.29.0 + Switch to using containers/common pkg's + fix: non-portable shebang #2812 + Remove copy/paste errors that leaked `Podman` into man pages. + Add suggests cpp to spec file + Apply suggestions from code review + update docs for debian testing and unstable + imagebuildah: disable pseudo-terminals for RUN + Compute diffID for mapped-layer at creating image source + intermediateImageExists: ignore images whose history we can't read + Bump to v1.19.0-dev + build(deps): bump github.com/containers/common from 0.26.3 to 0.27.0 + +## v1.18.0 (2020-11-16) + Fix testing error caused by simultanious merge + Vendor in containers/storage v1.24.0 + short-names aliasing + Add --policy flag to buildah pull + Stop overwrapping and stuttering + copier.Get(): ignore ENOTSUP/ENOSYS when listing xattrs + Run: don't forcibly disable UTS namespaces in rootless mode + test: ensure non-directory in a Dockerfile path is handled correctly + Add a few tests for `pull` command + Fix buildah config --cmd to handle array + build(deps): bump github.com/containers/storage from 1.23.8 to 1.23.9 + Fix NPE when Dockerfile path contains non-directory entries + Update buildah bud man page from podman build man page + Move declaration of decryption-keys to common cli + Run: correctly call copier.Mkdir + util: digging UID/GID out of os.FileInfo should work on Unix + imagebuildah.getImageTypeAndHistoryAndDiffIDs: cache results + Verify userns-uid-map and userns-gid-map input + Use CPP, CC and flags in dep check scripts + Avoid overriding LDFLAGS in Makefile + ADD: handle --chown on URLs + Update nix pin with `make nixpkgs` + (*Builder).Run: MkdirAll: handle EEXIST error + copier: try to force loading of nsswitch modules before chroot() + fix MkdirAll usage + build(deps): bump github.com/containers/common from 0.26.2 to 0.26.3 + build(deps): bump github.com/containers/storage from 1.23.7 to 1.23.8 + Use osusergo build tag for static build + imagebuildah: cache should take image format into account + Bump to v1.18.0-dev + +## v1.17.0 (2020-10-29) + Handle cases where other tools mount/unmount containers + overlay.MountReadOnly: support RO overlay mounts + overlay: use fusermount for rootless umounts + overlay: fix umount + Switch default log level of Buildah to Warn. Users need to see these messages + Drop error messages about OCI/Docker format to Warning level + build(deps): bump github.com/containers/common from 0.26.0 to 0.26.2 + tests/testreport: adjust for API break in storage v1.23.6 + build(deps): bump github.com/containers/storage from 1.23.5 to 1.23.7 + build(deps): bump github.com/fsouza/go-dockerclient from 1.6.5 to 1.6.6 + copier: put: ignore Typeflag="g" + Use curl to get repo file (fix #2714) + build(deps): bump github.com/containers/common from 0.25.0 to 0.26.0 + build(deps): bump github.com/spf13/cobra from 1.0.0 to 1.1.1 + Remove docs that refer to bors, since we're not using it + Buildah bud should not use stdin by default + bump containerd, docker, and golang.org/x/sys + Makefile: cross: remove windows.386 target + copier.copierHandlerPut: don't check length when there are errors + Stop excessive wrapping + CI: require that conformance tests pass + bump(github.com/openshift/imagebuilder) to v1.1.8 + Skip tlsVerify insecure BUILD_REGISTRY_SOURCES + Fix build path wrong https://github.com/containers/podman/issues/7993 + refactor pullpolicy to avoid deps + build(deps): bump github.com/containers/common from 0.24.0 to 0.25.0 + CI: run gating tasks with a lot more memory + ADD and COPY: descend into excluded directories, sometimes + copier: add more context to a couple of error messages + copier: check an error earlier + copier: log stderr output as debug on success + Update nix pin with `make nixpkgs` + Set directory ownership when copied with ID mapping + build(deps): bump github.com/sirupsen/logrus from 1.6.0 to 1.7.0 + build(deps): bump github.com/containers/common from 0.23.0 to 0.24.0 + Cirrus: Remove bors artifacts + Sort build flag definitions alphabetically + ADD: only expand archives at the right time + Remove configuration for bors + Shell Completion for podman build flags + Bump c/common to v0.24.0 + New CI check: xref --help vs man pages + CI: re-enable several linters + Move --userns-uid-map/--userns-gid-map description into buildah man page + add: preserve ownerships and permissions on ADDed archives + Makefile: tweak the cross-compile target + Bump containers/common to v0.23.0 + chroot: create bind mount targets 0755 instead of 0700 + Change call to Split() to safer SplitN() + chroot: fix handling of errno seccomp rules + build(deps): bump github.com/containers/image/v5 from 5.5.2 to 5.6.0 + Add In Progress section to contributing + integration tests: make sure tests run in ${topdir}/tests + Run(): ignore containers.conf's environment configuration + Warn when setting healthcheck in OCI format + Cirrus: Skip git-validate on branches + tools: update git-validation to the latest commit + tools: update golangci-lint to v1.18.0 + Add a few tests of push command + Add(): fix handling of relative paths with no ContextDir + build(deps): bump github.com/containers/common from 0.21.0 to 0.22.0 + Lint: Use same linters as podman + Validate: reference HEAD + Fix buildah mount to display container names not ids + Update nix pin with `make nixpkgs` + Add missing --format option in buildah from man page + Fix up code based on codespell + build(deps): bump github.com/openshift/imagebuilder from 1.1.6 to 1.1.7 + build(deps): bump github.com/containers/storage from 1.23.4 to 1.23.5 + Improve buildah completions + Cirrus: Fix validate commit epoch + Fix bash completion of manifest flags + Uniform some man pages + Update Buildah Tutorial to address BZ1867426 + Update bash completion of `manifest add` sub command + copier.Get(): hard link targets shouldn't be relative paths + build(deps): bump github.com/onsi/gomega from 1.10.1 to 1.10.2 + Pass timestamp down to history lines + Timestamp gets updated everytime you inspect an image + bud.bats: use absolute paths in newly-added tests + contrib/cirrus/lib.sh: don't use CN for the hostname + tests: Add some tests + Update `manifest add` man page + Extend flags of `manifest add` + build(deps): bump github.com/containers/storage from 1.23.3 to 1.23.4 + build(deps): bump github.com/onsi/ginkgo from 1.14.0 to 1.14.1 + Bump to v1.17.0-dev + CI: expand cross-compile checks + +## v1.16.0 (2020-09-03) + fix build on 32bit arches + containerImageRef.NewImageSource(): don't always force timestamps + Add fuse module warning to image readme + Heed our retry delay option values when retrying commit/pull/push + Switch to containers/common for seccomp + Use --timestamp rather then --omit-timestamp + docs: remove outdated notice + docs: remove outdated notice + build-using-dockerfile: add a hidden --log-rusage flag + build(deps): bump github.com/containers/image/v5 from 5.5.1 to 5.5.2 + Discard ReportWriter if user sets options.Quiet + build(deps): bump github.com/containers/common from 0.19.0 to 0.20.3 + Fix ownership of content copied using COPY --from + newTarDigester: zero out timestamps in tar headers + Update nix pin with `make nixpkgs` + bud.bats: correct .dockerignore integration tests + Use pipes for copying + run: include stdout in error message + run: use the correct error for errors.Wrapf + copier: un-export internal types + copier: add Mkdir() + in_podman: don't get tripped up by $CIRRUS_CHANGE_TITLE + docs/buildah-commit.md: tweak some wording, add a --rm example + imagebuildah: don’t blank out destination names when COPYing + Replace retry functions with common/pkg/retry + StageExecutor.historyMatches: compare timestamps using .Equal + Update vendor of containers/common + Fix errors found in coverity scan + Change namespace handling flags to better match podman commands + conformance testing: ignore buildah.BuilderIdentityAnnotation labels + Vendor in containers/storage v1.23.0 + Add buildah.IsContainer interface + Avoid feeding run_buildah to pipe + fix(buildahimage): add xz dependency in buildah image + Bump github.com/containers/common from 0.15.2 to 0.18.0 + Howto for rootless image building from OpenShift + Add --omit-timestamp flag to buildah bud + Update nix pin with `make nixpkgs` + Shutdown storage on failures + Handle COPY --from when an argument is used + Bump github.com/seccomp/containers-golang from 0.5.0 to 0.6.0 + Cirrus: Use newly built VM images + Bump github.com/opencontainers/runc from 1.0.0-rc91 to 1.0.0-rc92 + Enhance the .dockerignore man pages + conformance: add a test for COPY from subdirectory + fix bug manifest inspct + Add documentation for .dockerignore + Add BuilderIdentityAnnotation to identify buildah version + DOC: Add quay.io/containers/buildah image to README.md + Update buildahimages readme + fix spelling mistake in "info" command result display + Don't bind /etc/host and /etc/resolv.conf if network is not present + blobcache: avoid an unnecessary NewImage() + Build static binary with `buildGoModule` + copier: split StripSetidBits into StripSetuidBit/StripSetgidBit/StripStickyBit + tarFilterer: handle multiple archives + Fix a race we hit during conformance tests + Rework conformance testing + Update 02-registries-repositories.md + test-unit: invoke cmd/buildah tests with --flags + parse: fix a type mismatch in a test + Fix compilation of tests/testreport/testreport + build.sh: log the version of Go that we're using + test-unit: increase the test timeout to 40/45 minutes + Add the "copier" package + Fix & add notes regarding problematic language in codebase + Add dependency on github.com/stretchr/testify/require + CompositeDigester: add the ability to filter tar streams + BATS tests: make more robust + vendor golang.org/x/text@v0.3.3 + Switch golang 1.12 to golang 1.13 + imagebuildah: wait for stages that might not have even started yet + chroot, run: not fail on bind mounts from /sys + chroot: do not use setgroups if it is blocked + Set engine env from containers.conf + imagebuildah: return the right stage's image as the "final" image + Fix a help string + Deduplicate environment variables + switch containers/libpod to containers/podman + Bump github.com/containers/ocicrypt from 1.0.2 to 1.0.3 + Bump github.com/opencontainers/selinux from 1.5.2 to 1.6.0 + Mask out /sys/dev to prevent information leak + linux: skip errors from the runtime kill + Mask over the /sys/fs/selinux in mask branch + Add VFS additional image store to container + tests: add auth tests + Allow "readonly" as alias to "ro" in mount options + Ignore OS X specific consistency mount option + Bump github.com/onsi/ginkgo from 1.13.0 to 1.14.0 + Bump github.com/containers/common from 0.14.0 to 0.15.2 + Rootless Buildah should default to IsolationOCIRootless + imagebuildah: fix inheriting multi-stage builds + Make imagebuildah.BuildOptions.Architecture/OS optional + Make imagebuildah.BuildOptions.Jobs optional + Resolve a possible race in imagebuildah.Executor.startStage() + Switch scripts to use containers.conf + Bump openshift/imagebuilder to v1.1.6 + Bump go.etcd.io/bbolt from 1.3.4 to 1.3.5 + buildah, bud: support --jobs=N for parallel execution + executor: refactor build code inside new function + Add bud regression tests + Cirrus: Fix missing htpasswd in registry img + docs: clarify the 'triples' format + CHANGELOG.md: Fix markdown formatting + Add nix derivation for static builds + Bump to v1.16.0-dev + version centos7 for compatible + +## v1.15.0 (2020-06-17) + Bump github.com/containers/common from 0.12.0 to 0.13.1 + Bump github.com/containers/storage from 1.20.1 to 1.20.2 + Bump github.com/seccomp/containers-golang from 0.4.1 to 0.5.0 + Bump github.com/stretchr/testify from 1.6.0 to 1.6.1 + Bump github.com/opencontainers/runc from 1.0.0-rc9 to 1.0.0-rc90 + Add CVE-2020-10696 to CHANGELOG.md and changelog.txt + Bump github.com/stretchr/testify from 1.5.1 to 1.6.0 + Bump github.com/onsi/ginkgo from 1.12.2 to 1.12.3 + Vendor in containers/common v0.12.0 + fix lighttpd example + Vendor in new go.etcd.io/bbolt + Bump github.com/onsi/ginkgo from 1.12.1 to 1.12.2 + Bump imagebuilder for ARG fix + Bump github.com/containers/common from 0.11.2 to 0.11.4 + remove dependency on openshift struct + Warn on unset build arguments + vendor: update seccomp/containers-golang to v0.4.1 + Ammended docs + Updated docs + clean up comments + update exit code for tests + Implement commit for encryption + implementation of encrypt/decrypt push/pull/bud/from + fix resolve docker image name as transport + Bump github.com/opencontainers/go-digest from 1.0.0-rc1 to 1.0.0 + Bump github.com/onsi/ginkgo from 1.12.0 to 1.12.1 + Bump github.com/containers/storage from 1.19.1 to 1.19.2 + Bump github.com/containers/image/v5 from 5.4.3 to 5.4.4 + Add preliminary profiling support to the CLI + Bump github.com/containers/common from 0.10.0 to 0.11.2 + Evaluate symlinks in build context directory + fix error info about get signatures for containerImageSource + Add Security Policy + Cirrus: Fixes from review feedback + Bump github.com/containers/storage from 1.19.0 to 1.19.1 + Bump github.com/sirupsen/logrus from 1.5.0 to 1.6.0 + imagebuildah: stages shouldn't count as their base images + Update containers/common v0.10.0 + Bump github.com/fsouza/go-dockerclient from 1.6.4 to 1.6.5 + Add registry to buildahimage Dockerfiles + Cirrus: Use pre-installed VM packages + F32 + Cirrus: Re-enable all distro versions + Cirrus: Update to F31 + Use cache images + golangci-lint: Disable gosimple + Lower number of golangci-lint threads + Fix permissions on containers.conf + Don't force tests to use runc + Bump github.com/containers/common from 0.9.1 to 0.9.5 + Return exit code from failed containers + Bump github.com/containers/storage from 1.18.2 to 1.19.0 + Bump github.com/containers/common from 0.9.0 to 0.9.1 + cgroup_manager should be under [engine] + Use c/common/pkg/auth in login/logout + Cirrus: Temporarily disable Ubuntu 19 testing + Add containers.conf to stablebyhand build + Update gitignore to exclude test Dockerfiles + Bump github.com/fsouza/go-dockerclient from 1.6.3 to 1.6.4 + Bump github.com/containers/common from 0.8.1 to 0.9.0 + Bump back to v1.15.0-dev + Remove warning for systemd inside of container + +## v1.14.8 (2020-04-09) + Run (make vendor) + Run (make -C tests/tools vendor) + Run (go mod tidy) before (go mod vendor) again + Fix (make vendor) + Bump validation + Bump back to v1.15.0-dev + +## v1.14.7 (2020-04-07) + Bump github.com/containers/image/v5 from 5.3.1 to 5.4.3 + make vendor: run `tidy` after `vendor` + Do not skip the directory when the ignore pattern matches + Bump github.com/containers/common from 0.7.0 to 0.8.1 + Downgrade siruspen/logrus from 1.4.2 + Fix errorf conventions + dockerignore tests : remove symlinks, rework + Bump back to v1.15.0-dev + +## v1.14.6 (2020-04-02) + bud.bats - cleanup, refactoring + vendor in latest containers/storage 1.18.0 and containers/common v0.7.0 + Bump github.com/spf13/cobra from 0.0.6 to 0.0.7 + Bump github.com/containers/storage from 1.16.5 to 1.17.0 + Bump github.com/containers/image/v5 from 5.2.1 to 5.3.1 + Fix Amazon install step + Bump back to v1.15.0-dev + Fix bud-build-arg-cache test + Make image history work correctly with new args handling + Don't add args to the RUN environment from the Builder + Update github.com/openshift/imagebuilder to v1.1.4 + Add .swp files to .gitignore + +## v1.14.5 (2020-03-26) + revert #2246 FIPS mode change + Bump back to v1.15.0-dev + image with dup layers: we now have one on quay + digest test : make more robust + +## v1.14.4 (2020-03-25) + Fix fips-mode check for RHEL8 boxes + Fix potential CVE in tarfile w/ symlink (Edit 02-Jun-2020: Addresses CVE-2020-10696) + Fix .dockerignore with globs and ! commands + update install steps for Amazon Linux 2 + Bump github.com/openshift/imagebuilder from 1.1.2 to 1.1.3 + Add comment for RUN command in volume ownership test + Run stat command directly for volume ownership test + vendor in containers/common v0.6.1 + Cleanup go.sum + Bump back to v1.15.0-dev + +## v1.14.3 (2020-03-17) + Update containers/storage to v1.16.5 + Bump github.com/containers/storage from 1.16.2 to 1.16.4 + Bump github.com/openshift/imagebuilder from 1.1.1 to 1.1.2 + Update github.com/openshift/imagebuilder vendoring + Update unshare man page to fix script example + Fix compilation errors on non linux platforms + Bump containers/common and opencontainers/selinux versions + Add tests for volume ownership + Preserve volume uid and gid through subsequent commands + Fix FORWARD_NULL errors found by Coverity + Bump github.com/containers/storage from 1.16.1 to 1.16.2 + Fix errors found by codespell + Bump back to v1.15.0-dev + Add Pull Request Template + +## v1.14.2 (2020-03-03) + Add Buildah pull request template + Bump to containers/storage v1.16.1 + run_linux: fix tight loop if file is not pollable + Bump github.com/opencontainers/selinux from 1.3.2 to 1.3.3 + Bump github.com/containers/common from 0.4.1 to 0.4.2 + Bump back to v1.15.0-dev + Add Containerfile to build a versioned stable image on quay.io + +## v1.14.1 (2020-02-27) + Search for local runtime per values in containers.conf + Set correct ownership on working directory + BATS : in teardown, umount stale mounts + Bump github.com/spf13/cobra from 0.0.5 to 0.0.6 + Bump github.com/fsouza/go-dockerclient from 1.6.1 to 1.6.3 + Bump github.com/stretchr/testify from 1.4.0 to 1.5.1 + Replace unix with syscall to allow vendoring into libpod + Update to containers/common v0.4.1 + Improve remote manifest retrieval + Fix minor spelling errors in containertools README + Clear the right variable in buildahimage + Correct a couple of incorrect format specifiers + Update to containers/common v0.3.0 + manifest push --format: force an image type, not a list type + run: adjust the order in which elements are added to $PATH + getDateAndDigestAndSize(): handle creation time not being set + Bump github.com/containers/common from 0.2.0 to 0.2.1 + include installation steps for CentOS 8 and Stream + include installation steps for CentOS7 and forks + Adjust Ubuntu install info to also work on Pop!_OS + Make the commit id clear like Docker + Show error on copied file above context directory in build + Bump github.com/containers/image/v5 from 5.2.0 to 5.2.1 + pull/from/commit/push: retry on most failures + Makefile: fix install.cni.sudo + Repair buildah so it can use containers.conf on the server side + Bump github.com/mattn/go-shellwords from 1.0.9 to 1.0.10 + Bump github.com/fsouza/go-dockerclient from 1.6.0 to 1.6.1 + Fixing formatting & build instructions + Add Code of Conduct + Bors: Fix no. req. github reviews + Cirrus+Bors: Simplify temp branch skipping + Bors-ng: Add documentation and status-icon + Bump github.com/onsi/ginkgo from 1.11.0 to 1.12.0 + fix XDG_RUNTIME_DIR for authfile + Cirrus: Disable F29 testing + Cirrus: Add jq package + Cirrus: Fix lint + validation using wrong epoch + Stop using fedorproject registry + Bors: Workaround ineffective required statuses + Bors: Enable app + Disable Travis + Cirrus: Add standardized log-collection + Cirrus: Improve automated lint + validation + Allow passing options to golangci-lint + Cirrus: Fixes from review feedback + Cirrus: Temporarily ignore VM testing failures + Cirrus: Migrate off papr + implement VM testing + Cirrus: Update packages + fixes for get_ci_vm.sh + Show validation command-line + Skip overlay test w/ vfs driver + use alpine, not centos, for various tests + Flake handling: cache and prefetch images + Bump to v1.15.0-dev + +## v1.14.0 (2020-02-05) + bump github.com/mtrmac/gpgme + Update containers/common to v0.1.4 + manifest push: add --format option + Bump github.com/onsi/gomega from 1.8.1 to 1.9.0 + vendor github.com/containers/image/v5@v5.2.0 + info test: deal with random key order + Bump back to v1.14.0-dev + +## v1.13.2 (2020-01-29) + sign.bats: set GPG_TTY=/dev/null + Fix parse_unsupported.go + getDateAndDigestAndSize(): use manifest.Digest + Bump github.com/opencontainers/selinux from 1.3.0 to 1.3.1 + Bump github.com/containers/common from 0.1.0 to 0.1.2 + Touch up os/arch doc + chroot: handle slightly broken seccomp defaults + buildahimage: specify fuse-overlayfs mount options + Bump github.com/mattn/go-shellwords from 1.0.7 to 1.0.9 + copy.bats: make sure we detect failures due to missing source + parse: don't complain about not being able to rename something to itself + Makefile: use a $(GO_TEST) macro, fix a typo + manifests: unit test fix + Fix build for 32bit platforms + Allow users to set OS and architecture on bud + Fix COPY in containerfile with envvar + Bump c/storage to v1.15.7 + add --sign-by to bud/commit/push, --remove-signatures for pull/push + Remove cut/paste error in CHANGELOG.md + Update vendor of containers/common to v0.1.0 + update install instructions for Debian, Raspbian and Ubuntu + Add support for containers.conf + Bump back to v1.14.0-dev + +## v1.13.1 (2020-01-14) + Bump github.com/containers/common from 0.0.5 to 0.0.7 + Bump github.com/onsi/ginkgo from 1.10.3 to 1.11.0 + Bump github.com/pkg/errors from 0.8.1 to 0.9.0 + Bump github.com/onsi/gomega from 1.7.1 to 1.8.1 + Add codespell support + copyFileWithTar: close source files at the right time + copy: don't digest files that we ignore + Check for .dockerignore specifically + Travis: rm go 1.12.x + Don't setup excludes, if their is only one pattern to match + set HOME env to /root on chroot-isolation by default + docs: fix references to containers-*.5 + update openshift/api + fix bug Add check .dockerignore COPY file + buildah bud --volume: run from tmpdir, not source dir + Fix imageNamePrefix to give consistent names in buildah-from + cpp: use -traditional and -undef flags + Fix image reference in tutorial 4 + discard outputs coming from onbuild command on buildah-from --quiet + make --format columnizing consistent with buildah images + Bump to v1.14.0-dev + +## v1.13.0 (2019-12-27) + Bump to c/storage v1.15.5 + Update container/storage to v1.15.4 + Fix option handling for volumes in build + Rework overlay pkg for use with libpod + Fix buildahimage builds for buildah + Add support for FIPS-Mode backends + Set the TMPDIR for pulling/pushing image to $TMPDIR + WIP: safer test for pull --all-tags + BATS major cleanup: blobcache.bats: refactor + BATS major cleanup: part 4: manual stuff + BATS major cleanup, step 3: yet more run_buildah + BATS major cleanup, part 2: use more run_buildah + BATS major cleanup, part 1: log-level + Bump github.com/containers/image/v5 from 5.0.0 to 5.1.0 + Bump github.com/containers/common from 0.0.3 to 0.0.5 + Bump to v1.13.0-dev + +## v1.12.0 (2019-12-13) + Allow ADD to use http src + Bump to c/storage v.1.15.3 + install.md: update golang dependency + imgtype: reset storage opts if driver overridden + Start using containers/common + overlay.bats typo: fuse-overlays should be fuse-overlayfs + chroot: Unmount with MNT_DETACH instead of UnmountMountpoints() + bind: don't complain about missing mountpoints + imgtype: check earlier for expected manifest type + Vendor containers/storage fix + Vendor containers/storage v1.15.1 + Add history names support + PR takeover of #1966 + Tests: Add inspect test check steps + Tests: Add container name and id check in containers test steps + Test: Get permission in add test + Tests: Add a test for tag by id + Tests: Add test cases for push test + Tests: Add image digest test + Tests: Add some buildah from tests + Tests: Add two commit test + Tests: Add buildah bud with --quiet test + Tests: Add two test for buildah add + Bump back to v1.12.0-dev + +## v1.11.6 (2019-12-03) + Handle missing equal sign in --from and --chown flags for COPY/ADD + bud COPY does not download URL + Bump github.com/onsi/gomega from 1.7.0 to 1.7.1 + Fix .dockerignore exclude regression + Ran buildah through codespell + commit(docker): always set ContainerID and ContainerConfig + Touch up commit man page image parameter + Add builder identity annotations. + info: use util.Runtime() + Bump github.com/onsi/ginkgo from 1.10.2 to 1.10.3 + Bump back to v1.12.0-dev + +## v1.11.5 (2019-11-11) + Enhance error on unsafe symbolic link targets + Add OCIRuntime to info + Check nonexsit authfile + Only output image id if running buildah bud --quiet + Fix --pull=true||false and add --pull-never to bud and from (retry) + cgroups v2: tweak or skip tests + Prepwork: new 'skip' helpers for tests + Handle configuration blobs for manifest lists + unmarshalConvertedConfig: avoid using the updated image's ref + Add completions for Manifest commands + Add disableFips option to secrets pkg + Update bud.bats test archive test + Add test for caching based on content digest + Builder.untarPath(): always evaluate b.ContentDigester.Hash() + Bump github.com/onsi/ginkgo from 1.10.1 to 1.10.2 + Fix another broken test: copy-url-mtime + yet more fixes + Actual bug fix for 'add' test: fix the expected mode + BATS tests - lots of mostly minor cleanup + build: drop support for ostree + Add support for make vendor-in-container + imgtype: exit with error if storage fails + remove XDG_RUNTIME_DIR from default authfile path + fix troubleshooting redirect instructions + Bump back to v1.12.0-dev + +## v1.11.4 (2019-10-28) + buildah: add a "manifest" command + manifests: add the module + pkg/supplemented: add a package for grouping images together + pkg/manifests: add a manifest list build/manipulation API + Update for ErrUnauthorizedForCredentials API change in containers/image + Update for manifest-lists API changes in containers/image + version: also note the version of containers/image + Move to containers/image v5.0.0 + Enable --device directory as src device + Fix git build with branch specified + Bump github.com/openshift/imagebuilder from 1.1.0 to 1.1.1 + Bump github.com/fsouza/go-dockerclient from 1.4.4 to 1.5.0 + Add clarification to the Tutorial for new users + Silence "using cache" to ensure -q is fully quiet + Add OWNERS File to Buildah + Bump github.com/containers/storage from 1.13.4 to 1.13.5 + Move runtime flag to bud from common + Commit: check for storage.ErrImageUnknown using errors.Cause() + Fix crash when invalid COPY --from flag is specified. + Bump back to v1.12.0-dev + +## v1.11.3 (2019-10-04) + Update c/image to v4.0.1 + Bump github.com/spf13/pflag from 1.0.3 to 1.0.5 + Fix --build-args handling + Bump github.com/spf13/cobra from 0.0.3 to 0.0.5 + Bump github.com/cyphar/filepath-securejoin from 0.2.1 to 0.2.2 + Bump github.com/onsi/ginkgo from 1.8.0 to 1.10.1 + Bump github.com/fsouza/go-dockerclient from 1.3.0 to 1.4.4 + Add support for retrieving context from stdin "-" + Ensure bud remote context cleans up on error + info: add cgroups2 + Bump github.com/seccomp/libseccomp-golang from 0.9.0 to 0.9.1 + Bump github.com/mattn/go-shellwords from 1.0.5 to 1.0.6 + Bump github.com/stretchr/testify from 1.3.0 to 1.4.0 + Bump github.com/opencontainers/selinux from 1.2.2 to 1.3.0 + Bump github.com/etcd-io/bbolt from 1.3.2 to 1.3.3 + Bump github.com/onsi/gomega from 1.5.0 to 1.7.0 + update c/storage to v1.13.4 + Print build 'STEP' line to stdout, not stderr + Fix travis-ci on forks + Vendor c/storage v1.13.3 + Use Containerfile by default + Added tutorial on how to include Buildah as library + util/util: Fix "configuraitno" -> "configuration" log typo + Bump back to v1.12.0-dev + +## v1.11.2 (2019-09-13) + Add some cleanup code + Move devices code to unit specific directory. + Bump back to v1.12.0-dev + +## v1.11.1 (2019-09-11) + Add --devices flag to bud and from + Downgrade .papr to highest atomic verion + Add support for /run/.containerenv + Truncate output of too long image names + Preserve file and directory mount permissions + Bump fedora version from 28 to 30 + makeImageRef: ignore EmptyLayer if Squash is set + Set TMPDIR to /var/tmp by default + replace --debug=false with --log-level=error + Allow mounts.conf entries for equal source and destination paths + fix label and annotation for 1-line Dockerfiles + Enable interfacer linter and fix lints + install.md: mention goproxy + Makefile: use go proxy + Bump to v1.12.0-dev + +## v1.11.0 (2019-08-29) + tests/bud.bats: add --signature-policy to some tests + Vendor github.com/openshift/api + pull/commit/push: pay attention to $BUILD_REGISTRY_SOURCES + Add `--log-level` command line option and deprecate `--debug` + add support for cgroupsV2 + Correctly detect ExitError values from Run() + Disable empty logrus timestamps to reduce logger noise + Remove outdated deps Makefile target + Remove gofmt.sh in favor of golangci-lint + Remove govet.sh in favor of golangci-lint + Allow to override build date with SOURCE_DATE_EPOCH + Update shebangs to take env into consideration + Fix directory pull image names + Add --digestfile and Re-add push statement as debug + README: mention that Podman uses Buildah's API + Use content digests in ADD/COPY history entries + add: add a DryRun flag to AddAndCopyOptions + Fix possible runtime panic on bud + Add security-related volume options to validator + use correct path for ginkgo + Add bud 'without arguments' integration tests + Update documentation about bud + add: handle hard links when copying with .dockerignore + add: teach copyFileWithTar() about symlinks and directories + Allow buildah bud to be called without arguments + imagebuilder: fix detection of referenced stage roots + Touch up go mod instructions in install + run_linux: fix mounting /sys in a userns + Vendor Storage v1.13.2 + Cirrus: Update VM images + Fix handling of /dev/null masked devices + Update `bud`/`from` help to contain indicator for `--dns=none` + Bump back to v1.11.0-dev + +## v1.10.1 (2019-08-08) + Bump containers/image to v3.0.2 to fix keyring issue + Bug fix for volume minus syntax + Bump container/storage v1.13.1 and containers/image v3.0.1 + bump github.com/containernetworking/cni to v0.7.1 + Add overlayfs to fuse-overlayfs tip + Add automatic apparmor tag discovery + Fix bug whereby --get-login has no effect + Bump to v1.11.0-dev + +## v1.10.0 (2019-08-02) + vendor github.com/containers/image@v3.0.0 + Remove GO111MODULE in favor of `-mod=vendor` + Vendor in containers/storage v1.12.16 + Add '-' minus syntax for removal of config values + tests: enable overlay tests for rootless + rootless, overlay: use fuse-overlayfs + vendor github.com/containers/image@v2.0.1 + Added '-' syntax to remove volume config option + delete `successfully pushed` message + Add golint linter and apply fixes + vendor github.com/containers/storage@v1.12.15 + Change wait to sleep in buildahimage readme + Handle ReadOnly images when deleting images + Add support for listing read/only images + +## v1.9.2 (2019-07-19) + from/import: record the base image's digest, if it has one + Fix CNI version retrieval to not require network connection + Add misspell linter and apply fixes + Add goimports linter and apply fixes + Add stylecheck linter and apply fixes + Add unconvert linter and apply fixes + image: make sure we don't try to use zstd compression + run.bats: skip the "z" flag when testing --mount + Update to runc v1.0.0-rc8 + Update to match updated runtime-tools API + bump github.com/opencontainers/runtime-tools to v0.9.0 + Build e2e tests using the proper build tags + Add unparam linter and apply fixes + Run: correct a typo in the --cap-add help text + unshare: add a --mount flag + fix push check image name is not empty + Bump to v1.9.2-dev + +## v1.9.1 (2019-07-12) + add: fix slow copy with no excludes + Add errcheck linter and fix missing error check + Improve tests/tools/Makefile parallelism and abstraction + Fix response body not closed resource leak + Switch to golangci-lint + Add gomod instructions and mailing list links + On Masked path, check if /dev/null already mounted before mounting + Update to containers/storage v1.12.13 + Refactor code in package imagebuildah + Add rootless podman with NFS issue in documentation + Add --mount for buildah run + import method ValidateVolumeOpts from libpod + Fix typo + Makefile: set GO111MODULE=off + rootless: add the built-in slirp DNS server + Update docker/libnetwork to get rid of outdated sctp package + Update buildah-login.md + migrate to go modules + install.md: mention go modules + tests/tools: go module for test binaries + fix --volume splits comma delimited option + Add bud test for RUN with a priv'd command + vendor logrus v1.4.2 + pkg/cli: panic when flags can't be hidden + pkg/unshare: check all errors + pull: check error during report write + run_linux.go: ignore unchecked errors + conformance test: catch copy error + chroot/run_test.go: export funcs to actually be executed + tests/imgtype: ignore error when shutting down the store + testreport: check json error + bind/util.go: remove unused func + rm chroot/util.go + imagebuildah: remove unused `dedupeStringSlice` + StageExecutor: EnsureContainerPath: catch error from SecureJoin() + imagebuildah/build.go: return instead of branching + rmi: avoid redundant branching + conformance tests: nilness: allocate map + imagebuildah/build.go: avoid redundant `filepath.Join()` + imagebuildah/build.go: avoid redundant `os.Stat()` + imagebuildah: omit comparison to bool + fix "ineffectual assignment" lint errors + docker: ignore "repeats json tag" lint error + pkg/unshare: use `...` instead of iterating a slice + conformance: bud test: use raw strings for regexes + conformance suite: remove unused func/var + buildah test suite: remove unused vars/funcs + testreport: fix golangci-lint errors + util: remove redundant `return` statement + chroot: only log clean-up errors + images_test: ignore golangci-lint error + blobcache: log error when draining the pipe + imagebuildah: check errors in deferred calls + chroot: fix error handling in deferred funcs + cmd: check all errors + chroot/run_test.go: check errors + chroot/run.go: check errors in deferred calls + imagebuildah.Executor: remove unused onbuild field + docker/types.go: remove unused struct fields + util: use strings.ContainsRune instead of index check + Cirrus: Initial implementation + Bump to v1.9.1-dev + +## v1.9.0 (2019-06-15) + buildah-run: fix-out-of-range panic (2) + Bump back to v1.9.0-dev + + + +## v1.8.4 (2019-06-13) + Update containers/image to v2.0.0 + run: fix hang with run and --isolation=chroot + run: fix hang when using run + chroot: drop unused function call + remove --> before imgageID on build + Always close stdin pipe + Write deny to setgroups when doing single user mapping + Avoid including linux/memfd.h + Add a test for the symlink pointing to a directory + Add missing continue + Fix the handling of symlinks to absolute paths + Only set default network sysctls if not rootless + Support --dns=none like podman + fix bug --cpu-shares parsing typo + Fix validate complaint + Update vendor on containers/storage to v1.12.10 + Create directory paths for COPY thereby ensuring correct perms + imagebuildah: use a stable sort for comparing build args + imagebuildah: tighten up cache checking + bud.bats: add a test verying the order of --build-args + add -t to podman run + imagebuildah: simplify screening by top layers + imagebuildah: handle ID mappings for COPY --from + imagebuildah: apply additionalTags ourselves + bud.bats: test additional tags with cached images + bud.bats: add a test for WORKDIR and COPY with absolute destinations + Cleanup Overlay Mounts content + +## v1.8.3 (2019-06-04) + Add support for file secret mounts + Add ability to skip secrets in mounts file + allow 32bit builds + fix tutorial instructions + imagebuilder: pass the right contextDir to Add() + add: use fileutils.PatternMatcher for .dockerignore + bud.bats: add another .dockerignore test + unshare: fallback to single usermapping + addHelperSymlink: clear the destination on os.IsExist errors + bud.bats: test replacing symbolic links + imagebuildah: fix handling of destinations that end with '/' + bud.bats: test COPY with a final "/" in the destination + linux: add check for sysctl before using it + unshare: set _CONTAINERS_ROOTLESS_GID + Rework buildahimamges + build context: support https git repos + Add a test for ENV special chars behaviour + Check in new Dockerfiles + Apply custom SHELL during build time + config: expand variables only at the command line + SetEnv: we only need to expand v once + Add default /root if empty on chroot iso + Add support for Overlay volumes into the container. + Export buildah validate volume functions so it can share code with libpod + Bump baseline test to F30 + Fix rootless handling of /dev/shm size + Avoid fmt.Printf() in the library + imagebuildah: tighten cache checking back up + Handle WORKDIR with dangling target + Default Authfile to proper path + Make buildah run --isolation follow BUILDAH_ISOLATION environment + Vendor in latest containers/storage and containers/image + getParent/getChildren: handle layerless images + imagebuildah: recognize cache images for layerless images + bud.bats: test scratch images with --layers caching + Get CHANGELOG.md updates + Add some symlinks to test our .dockerignore logic + imagebuildah: addHelper: handle symbolic links + commit/push: use an everything-allowed policy + Correct manpage formatting in files section + Remove must be root statement from buildah doc + Change image names to stable, testing and upstream + Bump back to v1.9.0-dev + +## v1.8.2 (2019-05-02) + Vendor Storage 1.12.6 + Create scratch file in TESTDIR + Test bud-copy-dot with --layers picks up changed file + Bump back to 1.9.0-dev + +## v1.8.1 (2019-05-01) + Don't create directory on container + Replace kubernetes/pause in tests with k8s.gcr.io/pause + imagebuildah: don't remove intermediate images if we need them + Rework buildahimagegit to buildahimageupstream + Fix Transient Mounts + Handle WORKDIRs that are symlinks + allow podman to build a client for windows + Touch up 1.9-dev to 1.9.0-dev + Bump to 1.9-dev + +## v1.8.0 (2019-04-26) + Resolve symlink when checking container path + commit: commit on every instruction, but not always with layers + CommitOptions: drop the unused OnBuild field + makeImageRef: pass in the whole CommitOptions structure + cmd: API cleanup: stores before images + run: check if SELinux is enabled + Fix buildahimages Dockerfiles to include support for additionalimages mounted from host. + Detect changes in rootdir + Fix typo in buildah-pull(1) + Vendor in latest containers/storage + Keep track of any build-args used during buildah bud --layers + commit: always set a parent ID + imagebuildah: rework unused-argument detection + fix bug dest path when COPY .dockerignore + Move Host IDMAppings code from util to unshare + Add BUILDAH_ISOLATION rootless back + Travis CI: fail fast, upon error in any step + imagebuildah: only commit images for intermediate stages if we have to + Use errors.Cause() when checking for IsNotExist errors + auto pass http_proxy to container + Bump back to 1.8-dev + +## v1.7.3 (2019-04-16) + imagebuildah: don't leak image structs + Add Dockerfiles for buildahimages + Bump to Replace golang 1.10 with 1.12 + add --dns* flags to buildah bud + Add hack/build_speed.sh test speeds on building container images + Create buildahimage Dockerfile for Quay + rename 'is' to 'expect_output' + squash.bats: test squashing in multi-layered builds + bud.bats: test COPY --from in a Dockerfile while using the cache + commit: make target image names optional + Fix bud-args to allow comma separation + oops, missed some tests in commit.bats + new helper: expect_line_count + New tests for #1467 (string slices in cmdline opts) + Workarounds for dealing with travis; review feedback + BATS tests - extensive but minor cleanup + imagebuildah: defer pulling images for COPY --from + imagebuildah: centralize COMMIT and image ID output + Travis: do not use traviswait + imagebuildah: only initialize imagebuilder configuration once per stage + Make cleaner error on Dockerfile build errors + unshare: move to pkg/ + unshare: move some code from cmd/buildah/unshare + Fix handling of Slices versus Arrays + imagebuildah: reorganize stage and per-stage logic + imagebuildah: add empty layers for instructions + Add missing step in installing into Ubuntu + fix bug in .dockerignore support + imagebuildah: deduplicate prepended "FROM" instructions + Touch up intro + commit: set created-by to the shell if it isn't set + commit: check that we always set a "created-by" + docs/buildah.md: add "containers-" prefixes under "SEE ALSO" + Bump back to 1.8-dev + +## v1.7.2 (2019-03-28) + mount: do not create automatically a namespace + buildah: correctly create the userns if euid!=0 + imagebuildah.Build: consolidate cleanup logic + CommitOptions: drop the redundant Store field + Move pkg/chrootuser from libpod to buildah. + imagebuildah: record image IDs and references more often + vendor imagebuilder v1.1.0 + imagebuildah: fix requiresStart/noRunsRemaining confusion + imagebuildah: check for unused args across stages + bump github.com/containernetworking/cni to v0.7.0-rc2 + imagebuildah: use "useCache" instead of "noCache" + imagebuildah.resolveNameToImageRef(): take name as a parameter + Export fields of the DokcerIgnore struct + imagebuildah: drop the duplicate containerIDs list + rootless: by default use the host network namespace + imagebuildah: split Executor and per-stage execution + imagebuildah: move some fields around + golint: make golint happy + docs: 01-intro.md: add missing . in Dockerfile examples + fix bug using .dockerignore + Do not create empty mounts.conf file + images: suppress a spurious blank line with no images + from: distinguish between ADD and COPY + fix bug to not separate each --label value with comma + buildah-bud.md: correct a typo, note a default + Remove mistaken code that got merged in other PR + add sample registries.conf to docs + escape shell variables in README example + slirp4netns: set mtu to 65520 + images: imageReposToMap() already adds : + imagebuildah.ReposToMap: move to cmd + Build: resolve copyFrom references earlier + Allow rootless users to use the cache directory in homedir + bud.bats: use the per-test temp directory + bud.bats: log output before counting length + Simplify checks for leftover args + Print commitID with --layers + fix bug images use the template to print results + rootless: honor --net host + onsi/gomeage add missing files + vendor latest openshift/imagebuilder + Remove noop from squash help + Prepend a comment to files setup in container + imagebuildah resolveSymlink: fix handling of relative links + Errors should be printed to stderr + Add recommends for slirp4netns and fuse-overlay + Update pull and pull-always flags + Hide from users command options that we don't want them to use. + Update secrets fipsmode patch to work on rootless containers + fix unshare option handling and documentation + Vendor in latest containers/storage + Hard-code docker.Transport use in pull --all-tags + Use a types.ImageReference instead of (transport, name) strings in pullImage etc. + Move the computation of srcRef before first pullAndFindImage + Don't throw away user-specified tag for pull --all-tags + CHANGES BEHAVIOR: Remove the string format input to localImageNameForReference + Don't try to parse imageName as transport:image in pullImage + Use reference.WithTag instead of manual string manipulation in Pull + Don't pass image = transport:repo:tag, transport=transport to pullImage + Fix confusing variable naming in Pull + Don't try to parse image name as a transport:image + Fix error reporting when parsing trans+image + Remove 'transport == ""' handling from the pull path + Clean up "pulls" of local image IDs / ID prefixes + Simplify ExpandNames + Document the semantics of transport+name returned by ResolveName + UPdate gitvalidation epoch + Bump back to 1.8-dev + +## v1.7.1 (2019-02-26) + vendor containers/image v1.5 + Move secrets code from libpod into buildah + Update CHANGELOG.md with the past changes + README.md: fix typo + Fix a few issues found by tests/validate/gometalinter.sh + Neutralize buildah/unshare on non-Linux platforms + Explicitly specify a directory to find(1) + README.md: rephrase Buildah description + Stop printing default twice in cli --help + install.md: add section about vendoring + Bump to 1.8-dev + +## v1.7 (2019-02-21) + vendor containers/image v1.4 + Make "images --all" faster + Remove a misleading comment + Remove quiet option from pull options + Make sure buildah pull --all-tags only works with docker transport + Support oci layout format + Fix pulling of images within buildah + Fix tls-verify polarity + Travis: execute make vendor and hack/tree_status.sh + vendor.conf: remove unused dependencies + add missing vendor/github.com/containers/libpod/vendor.conf + vendor.conf: remove github.com/inconshreveable/mousetrap + make vendor: always fetch the latest vndr + add hack/tree_status.sh script + Bump c/Storage to 1.10 + Add --all-tags test to pull + mount: make error clearer + Remove global flags from cli help + Set --disable-compression to true as documented + Help document using buildah mount in rootless mode + healthcheck start-period: update documentation + Vendor in latest c/storage and c/image + dumpbolt: handle nested buckets + Fix buildah commit compress by default + Test on xenial, not trusty + unshare: reexec using a memfd copy instead of the binary + Add --target to bud command + Fix example for setting multiple environment variables + main: fix rootless mode + buildah: force umask 022 + pull.bats: specify registry config when using registries + pull.bats: use the temporary directory, not /tmp + unshare: do not set rootless mode if euid=0 + Touch up cli help examples and a few nits + Add an undocumented dumpbolt command + Move tar commands into containers/storage + Fix bud issue with 2 line Dockerfile + Add package install descriptions + Note configuration file requirements + Replace urfave/cli with cobra + cleanup vendor.conf + Vendor in latest containers/storage + Add Quiet to PullOptions and PushOptions + cmd/commit: add flag omit-timestamp to allow for deterministic builds + Add options for empty-layer history entries + Make CLI help descriptions and usage a bit more consistent + vndr opencontainers/selinux + Bump baseline test Fedora to 29 + Bump to v1.7-dev-1 + Bump to v1.6-1 + Add support for ADD --chown + imagebuildah: make EnsureContainerPath() check/create the right one + Bump 1.7-dev + Fix contrib/rpm/bulidah.spec changelog date + +## v1.6-1 (2019-01-18) + Add support for ADD --chown + imagebuildah: make EnsureContainerPath() check/create the right one + Fix contrib/rpm/bulidah.spec changelog date + Vendor in latest containers/storage + Revendor everything + Revendor in latest code by release + unshare: do not set USER=root + run: ignore EIO when flushing at the end, avoid double log + build-using-dockerfile,commit: disable compression by default + Update some comments + Make rootless work under no_pivot_root + Add CreatedAtRaw date field for use with Format + Properly format images JSON output + pull: add all-tags option + Fix support for multiple Short options + pkg/blobcache: add synchronization + Skip empty files in file check of conformance test + Use NoPivot also for RUN, not only for run + Remove no longer used isReferenceInsecure / isRegistryInsecure + Do not set OCIInsecureSkipTLSVerify based on registries.conf + Remove duplicate entries from images JSON output + vendor parallel-copy from containers/image + blobcache.bats: adjust explicit push tests + Handle one line Dockerfile with layers + We should only warn if user actually requests Hostname be set in image + Fix compiler Warning about comparing different size types + imagebuildah: don't walk if rootdir and path are equal + Add aliases for buildah containers, so buildah list, ls and ps work + vendor: use faster version instead compress/gzip + vendor: update libpod + Properly handle Hostname inside of RUN command + docs: mention how to mount in rootless mode + tests: use fully qualified name for centos image + travis.yml: use the fully qualified name for alpine + mount: allow mount only when using vfs + Add some tests for buildah pull + Touch up images -q processing + Refactor: Use library shared idtools.ParseIDMap() instead of bundling it + bump GITVALIDATE_EPOCH + cli.BudFlags: add `--platform` nop + Makefile: allow packagers to more easily add tags + Makefile: soften the requirement on git + tests: add containers json test + Inline blobCache.putBlob into blobCacheDestination.PutBlob + Move saveStream and putBlob near blobCacheDestination.PutBlob + Remove BlobCache.PutBlob + Update for API changes + Vendor c/image after merging c/image#536 + Handle 'COPY --from' in Dockerfile + Vendor in latest content from github.com/containers/storage + Clarify docker.io default in push with docker-daemon + Test blob caching + Wire in a hidden --blob-cache option + Use a blob cache when we're asked to use one + Add --disable-compression to 'build-using-dockerfile' + Add a blob cache implementation + vendor: update containers/storage + Update for sysregistriesv2 API changes + Update containers/image to 63a1cbdc5e6537056695cf0d627c0a33b334df53 + clean up makefile variables + Fix file permission + Complete the instructions for the command + Show warning when a build arg not used + Assume user 0 group 0, if /etc/passwd file in container. + Add buildah info command + Enable -q when --filter is used for images command + Add v1.5 Release Announcement + Fix dangling filter for images command + Fix completions to print Names as well as IDs + tests: Fix file permissions + Bump 1.6-dev + +## v1.5-1 (2018-11-21) + Bump min go to 1.10 in install.md + vendor: update ostree-go + Update docker build command line in conformance test + Print command in SystemExec as debug information + Add some skip word for inspect check in conformance test + Update regex for multi stage base test + Sort CLI flags + vendor: update containers/storage + Add note to install about non-root on RHEL/CentOS + Update imagebuild depdency to support heading ARGs in Dockerfile + rootless: do not specify --rootless to the OCI runtime + Export resolvesymlink function + Exclude --force-rm from common bud cli flags + run: bind mount /etc/hosts and /etc/resolv.conf if not in a volume + rootless: use slirp4netns to setup the network namespace + Instructions for completing the pull command + Fix travis to not run environment variable patch + rootless: only discard network configuration names + run: only set up /etc/hosts or /etc/resolv.conf with network + common: getFormat: match entire string not only the prefix + vendor: update libpod + Change validation EPOCH + Fixing broken link for container-registries.conf + Restore rootless isolation test for from volume ro test + ostree: fix tag for build constraint + Handle directories better in bud -f + vndr in latest containers/storage + Fix unshare gofmt issue + runSetupBuiltinVolumes(): break up volume setup + common: support a per-user registries conf file + unshare: do not override the configuration + common: honor the rootless configuration file + unshare: create a new mount namespace + unshare: support libpod rootless pkg + Use libpod GetDefaultStorage to report proper storage config + Allow container storage to manage the SELinux labels + Resolve image names with default transport in from command + run: When the value of isolation is set, use the set value instead of the default value. + Vendor in latest containers/storage and opencontainers/selinux + Remove no longer valid todo + Check for empty buildTime in version + Change gofmt so it runs on all but 1.10 + Run gofmt only on Go 1.11 + Walk symlinks when checking cached images for copied/added files + ReserveSELinuxLabels(): handle wrapped errors from OpenBuilder + Set WorkingDir to empty, not / for conformance + Update calls in e2e to addres 1101 + imagebuilder.BuildDockerfiles: return the image ID + Update for changes in the containers/image API + bump(github.com/containers/image) + Allow setting --no-pivot default with an env var + Add man page and bash completion, for --no-pivot + Add the --no-pivot flag to the run command + Improve reporting about individual pull failures + Move the "short name but no search registries" error handling to resolveImage + Return a "search registries were needed but empty" indication in util.ResolveName + Simplify handling of the "tried to pull an image but found nothing" case in newBuilder + Don't even invoke the pull loop if options.FromImage == "" + Eliminate the long-running ref and img variables in resolveImage + In resolveImage, return immediately on success + Fix From As in Dockerfile + Vendor latest containers/image + Vendor in latest libpod + Sort CLI flags of buildah bud + Change from testing with golang 1.9 to 1.11. + unshare: detect when unprivileged userns are disabled + Optimize redundant code + fix missing format param + chroot: fix the args check + imagebuildah: make ResolveSymLink public + Update copy chown test + buildah: use the same logic for XDG_RUNTIME_DIR as podman + V1.4 Release Announcement + Podman --privileged selinux is broken + papr: mount source at gopath + parse: Modify the return value + parse: modify the verification of the isolation value + Make sure we log or return every error + pullImage(): when completing an image name, try docker:// + Fix up Tutorial 3 to account for format + Vendor in latest containers/storage and containers/image + docs/tutorials/01-intro.md: enhanced installation instructions + Enforce "blocked" for registries for the "docker" transport + Correctly set DockerInsecureSkipTLSVerify when pulling images + chroot: set up seccomp and capabilities after supplemental groups + chroot: fix capabilities list setup and application + .papr.yml: log the podman version + namespaces.bats: fix handling of uidmap/gidmap options in pairs + chroot: only create user namespaces when we know we need them + Check /proc/sys/user/max_user_namespaces on unshare(NEWUSERNS) + bash/buildah: add isolation option to the from command + +## v1.4 (2018-10-02) + from: fix isolation option + Touchup pull manpage + Export buildah ReserveSELinuxLables so podman can use it + Add buildah.io to README.md and doc fixes + Update rmi man for prune changes + Ignore file not found removal error in bud + bump(github.com/containers/{storage,image}) + NewImageSource(): only create one Diff() at a time + Copy ExposedPorts from base image into the config + tests: run conformance test suite in Travis + Change rmi --prune to not accept an imageID + Clear intermediate container IDs after each stage + Request podman version for build issues + unshare: keep the additional groups of the user + Builtin volumes should be owned by the UID/GID of the container + Get rid of dangling whitespace in markdown files + Move buildah from projecatatomic/buildah to containers/buildah + nitpick: parse.validateFlags loop in bud cli + bash: Completion options + Add signature policy to push tests + vendor in latest containers/image + Fix grammar in Container Tools Guide + Don't build btrfs if it is not installed + new: Return image-pulling errors from resolveImage + pull: Return image-pulling errors from pullImage + Add more volume mount tests + chroot: create missing parent directories for volume mounts + Push: Allow an empty destination + Add Podman relationship to readme, create container tools guide + Fix arg usage in buildah-tag + Add flags/arguments order verification to other commands + Handle ErrDuplicateName errors from store.CreateContainer() + Evaluate symbolic links on Add/Copy Commands + Vendor in latest containers/image and containers/storage + Retain bounding set when running containers as non root + run container-diff tests in Travis + buildah-images.md: Fix option contents + push: show image digest after push succeed + Vendor in latest containers/storage,image,libpod and runc + Change references to cri-o to point at new repository + Exclude --layers from the common bug cli flags + demos: Increase the executable permissions + run: clear default seccomp filter if not enabled + Bump maximum cyclomatic complexity to 45 + stdin: on HUP, read everything + nitpick: use tabs in tests/helpers.bash + Add flags/arguments order verification to one arg commands + nitpick: decrease cognitive complexity in buildah-bud + rename: Avoid renaming the same name as other containers + chroot isolation: chroot() before setting up seccomp + Small nitpick at the "if" condition in tag.go + cmd/images: Modify json option + cmd/images: Disallow the input of image when using the -a option + Fix examples to include context directory + Update containers/image to fix commit layer issue + cmd/containers: End loop early when using the json option + Make buildah-from error message clear when flags are after arg + Touch up README.md for conformance tests + Update container/storage for lock fix + cmd/rm: restore the correct containerID display + Remove debug lines + Remove docker build image after each test + Add README for conformance test + Update the MakeOptions to accept all command options for buildah + Update regrex to fit the docker output in test "run with JSON" + cmd/buildah: Remove redundant variable declarations + Warn about using Commands in Dockerfile that are not supported by OCI. + Add buildah bud conformance test + Fix rename to also change container name in builder + Makefile: use $(GO) env-var everywhere + Cleanup code to more closely match Docker Build images + Document BUILDAH_* environment variables in buildah bud --help output + Return error immediately if error occurs in Prepare step + Fix --layers ADD from url issue + Add "Sign your PRs" TOC item to contributing.md. + Display the correct ID after deleting image + rmi: Modify the handling of errors + Let util.ResolveName() return parsing errors + Explain Open Container Initiative (OCI) acronym, add link + Update vendor for urfave/cli back to master + Handle COPY --chown in Dockerfile + Switch to Recommends container-selinux + Update vendor for containernetworking, imagebuildah and podman + Document STORAGE_DRIVER and STORAGE_OPTS environment variable + Change references to projectatomic/libpod to containers/libpod + Add container PATH retrieval example + Expand variables names for --env + imagebuildah: provide a way to provide stdin for RUN + Remove an unused srcRef.NewImageSource in pullImage + chroot: correct a comment + chroot: bind mount an empty directory for masking + Don't bother with --no-pivot for rootless isolation + CentOS need EPEL repo + Export a Pull() function + Remove stream options, since docker build does not have it + release v1.3: mention openSUSE + Add Release Announcements directory + Bump to v1.4-dev + +## 1.3 (2018-08-4) + Revert pull error handling from 881 + bud should not search context directory for Dockerfile + Set BUILDAH_ISOLATION=rootless when running unprivileged + .papr.sh: Also test with BUILDAH_ISOLATION=rootless + Skip certain tests when we're using "rootless" isolation + .travis.yml: run integration tests with BUILDAH_ISOLATION=chroot + Add and implement IsolationOCIRootless + Add a value for IsolationOCIRootless + Fix rmi to remove intermediate images associated with an image + Return policy error on pull + Update containers/image to 216acb1bcd2c1abef736ee322e17147ee2b7d76c + Switch to github.com/containers/image/pkg/sysregistriesv2 + unshare: make adjusting the OOM score optional + Add flags validation + chroot: handle raising process limits + chroot: make the resource limits name map module-global + Remove rpm.bats, we need to run this manually + Set the default ulimits to match Docker + buildah: no args is out of bounds + unshare: error message missed the pid + preprocess ".in" suffixed Dockerfiles + Fix the the in buildah-config man page + Only test rpmbuild on latest fedora + Add support for multiple Short options + Update to latest urvave/cli + Add additional SELinux tests + Vendor in latest github.com/containers/{image;storage} + Stop testing with golang 1.8 + Fix volume cache issue with buildah bud --layers + Create buildah pull command + Increase the deadline for gometalinter during 'make validate' + .papr.sh: Also test with BUILDAH_ISOLATION=chroot + .travis.yml: run integration tests with BUILDAH_ISOLATION=chroot + Add a Dockerfile + Set BUILDAH_ISOLATION=chroot when running unprivileged + Add and implement IsolationChroot + Update github.com/opencontainers/runc + maybeReexecUsingUserNamespace: add a default for root + Allow ping command without NET_RAW Capabilities + rmi.storageImageID: fix Wrapf format warning + Allow Dockerfile content to come from stdin + Vendor latest container/storage to fix overlay mountopt + userns: assign additional IDs sequentially + Remove default dev/pts + Add OnBuild test to baseline test + tests/run.bats(volumes): use :z when SELinux is enabled + Avoid a stall in runCollectOutput() + Use manifest from container/image + Vendor in latest containers/image and containers/storage + add rename command + Completion command + Update CHANGELOG.md + Update vendor for runc to fix 32 bit builds + bash completion: remove shebang + Update vendor for runc to fix 32 bit builds + +## 1.2 (2018-07-14) + Vendor in lates containers/image + build-using-dockerfile: let -t include transports again + Block use of /proc/acpi and /proc/keys from inside containers + Fix handling of --registries-conf + Fix becoming a maintainer link + add optional CI test fo darwin + Don't pass a nil error to errors.Wrapf() + image filter test: use kubernetes/pause as a "since" + Add --cidfile option to from + vendor: update containers/storage + Contributors need to find the CONTRIBUTOR.md file easier + Add a --loglevel option to build-with-dockerfile + Create Development plan + cmd: Code improvement + allow buildah cross compile for a darwin target + Add unused function param lint check + docs: Follow man-pages(7) suggestions for SYNOPSIS + Start using github.com/seccomp/containers-golang + umount: add all option to umount all mounted containers + runConfigureNetwork(): remove an unused parameter + Update github.com/opencontainers/selinux + Fix buildah bud --layers + Force ownership of /etc/hosts and /etc/resolv.conf to 0:0 + main: if unprivileged, reexec in a user namespace + Vendor in latest imagebuilder + Reduce the complexity of the buildah.Run function + mount: output it before replacing lastError + Vendor in latest selinux-go code + Implement basic recognition of the "--isolation" option + Run(): try to resolve non-absolute paths using $PATH + Run(): don't include any default environment variables + build without seccomp + vendor in latest runtime-tools + bind/mount_unsupported.go: remove import errors + Update github.com/opencontainers/runc + Add Capabilities lists to BuilderInfo + Tweaks for commit tests + commit: recognize committing to second storage locations + Fix ARGS parsing for run commands + Add info on registries.conf to from manpage + Switch from using docker to podman for testing in .papr + buildah: set the HTTP User-Agent + ONBUILD tutorial + Add information about the configuration files to the install docs + Makefile: add uninstall + Add tilde info for push to troubleshooting + mount: support multiple inputs + Use the right formatting when adding entries to /etc/hosts + Vendor in latest go-selinux bindings + Allow --userns-uid-map/--userns-gid-map to be global options + bind: factor out UnmountMountpoints + Run(): simplify runCopyStdio() + Run(): handle POLLNVAL results + Run(): tweak terminal mode handling + Run(): rename 'copyStdio' to 'copyPipes' + Run(): don't set a Pdeathsig for the runtime + Run(): add options for adding and removing capabilities + Run(): don't use a callback when a slice will do + setupSeccomp(): refactor + Change RunOptions.Stdin/Stdout/Stderr to just be Reader/Writers + Escape use of '_' in .md docs + Break out getProcIDMappings() + Break out SetupIntermediateMountNamespace() + Add Multi From Demo + Use the c/image conversion code instead of converting configs manually + Don't throw away the manifest MIME type and guess again + Consolidate loading manifest and config in initConfig + Pass a types.Image to Builder.initConfig + Require an image ID in importBuilderDataFromImage + Use c/image/manifest.GuessMIMEType instead of a custom heuristic + Do not ignore any parsing errors in initConfig + Explicitly handle "from scratch" images in Builder.initConfig + Fix parsing of OCI images + Simplify dead but dangerous-looking error handling + Don't ignore v2s1 history if docker_version is not set + Add --rm and --force-rm to buildah bud + Add --all,-a flag to buildah images + Separate stdio buffering from writing + Remove tty check from images --format + Add environment variable BUILDAH_RUNTIME + Add --layers and --no-cache to buildah bud + Touch up images man + version.md: fix DESCRIPTION + tests: add containers test + tests: add images test + images: fix usage + fix make clean error + Change 'registries' to 'container registries' in man + add commit test + Add(): learn to record hashes of what we add + Minor update to buildah config documentation for entrypoint + Bump to v1.2-dev + Add registries.conf link to a few man pages + +## 1.1 (2018-06-08) + Drop capabilities if running container processes as non root + Print Warning message if cmd will not be used based on entrypoint + Update 01-intro.md + Shouldn't add insecure registries to list of search registries + Report errors on bad transports specification when pushing images + Move parsing code out of common for namespaces and into pkg/parse.go + Add disable-content-trust noop flag to bud + Change freenode chan to buildah + runCopyStdio(): don't close stdin unless we saw POLLHUP + Add registry errors for pull + runCollectOutput(): just read until the pipes are closed on us + Run(): provide redirection for stdio + rmi, rm: add test + add mount test + Add parameter judgment for commands that do not require parameters + Add context dir to bud command in baseline test + run.bats: check that we can run with symlinks in the bundle path + Give better messages to users when image can not be found + use absolute path for bundlePath + Add environment variable to buildah --format + rm: add validation to args and all option + Accept json array input for config entrypoint + Run(): process RunOptions.Mounts, and its flags + Run(): only collect error output from stdio pipes if we created some + Add OnBuild support for Dockerfiles + Quick fix on demo readme + run: fix validate flags + buildah bud should require a context directory or URL + Touchup tutorial for run changes + Validate common bud and from flags + images: Error if the specified imagename does not exist + inspect: Increase err judgments to avoid panic + add test to inspect + buildah bud picks up ENV from base image + Extend the amount of time travis_wait should wait + Add a make target for Installing CNI plugins + Add tests for namespace control flags + copy.bats: check ownerships in the container + Fix SELinux test errors when SELinux is enabled + Add example CNI configurations + Run: set supplemental group IDs + Run: use a temporary mount namespace + Use CNI to configure container networks + add/secrets/commit: Use mappings when setting permissions on added content + Add CLI options for specifying namespace and cgroup setup + Always set mappings when using user namespaces + Run(): break out creation of stdio pipe descriptors + Read UID/GID mapping information from containers and images + Additional bud CI tests + Run integration tests under travis_wait in Travis + build-using-dockerfile: add --annotation + Implement --squash for build-using-dockerfile and commit + Vendor in latest container/storage for devicemapper support + add test to inspect + Vendor github.com/onsi/ginkgo and github.com/onsi/gomega + Test with Go 1.10, too + Add console syntax highlighting to troubleshooting page + bud.bats: print "$output" before checking its contents + Manage "Run" containers more closely + Break Builder.Run()'s "run runc" bits out + util.ResolveName(): handle completion for tagged/digested image names + Handle /etc/hosts and /etc/resolv.conf properly in container + Documentation fixes + Make it easier to parse our temporary directory as an image name + Makefile: list new pkg/ subdirectoris as dependencies for buildah + containerImageSource: return more-correct errors + API cleanup: PullPolicy and TerminalPolicy should be types + Make "run --terminal" and "run -t" aliases for "run --tty" + Vendor github.com/containernetworking/cni v0.6.0 + Update github.com/containers/storage + Update github.com/containers/libpod + Add support for buildah bud --label + buildah push/from can push and pull images with no reference + Vendor in latest containers/image + Update gometalinter to fix install.tools error + Update troubleshooting with new run workaround + Added a bud demo and tidied up + Attempt to download file from url, if fails assume Dockerfile + Add buildah bud CI tests for ENV variables + Re-enable rpm .spec version check and new commit test + Update buildah scratch demo to support el7 + Added Docker compatibility demo + Update to F28 and new run format in baseline test + Touchup man page short options across man pages + Added demo dir and a demo. chged distrorlease + builder-inspect: fix format option + Add cpu-shares short flag (-c) and cpu-shares CI tests + Minor fixes to formatting in rpm spec changelog + Fix rpm .spec changelog formatting + CI tests and minor fix for cache related noop flags + buildah-from: add effective value to mount propagation + +## 1.0 (2018-05-06) + Declare Buildah 1.0 + Add cache-from and no-cache noops, and fix doco + Update option and documentation for --force-rm + Adding noop for --force-rm to match --rm + Add buildah bud ENTRYPOINT,CMD,RUN tests + Adding buildah bud RUN test scenarios + Extend tests for empty buildah run command + Fix formatting error in run.go + Update buildah run to make command required + Expanding buildah run cmd/entrypoint tests + Update test cases for buildah run behaviour + Remove buildah run cmd and entrypoint execution + Add Files section with registries.conf to pertinent man pages + tests/config: perfect test + tests/from: add name test + Do not print directly to stdout in Commit() + Touch up auth test commands + Force "localhost" as a default registry + Drop util.GetLocalTime() + Vendor in latest containers/image + Validate host and container paths passed to --volume + test/from: add add-host test + Add --compress, --rm, --squash flags as a noop for bud + Add FIPS mode secret to buildah run and bud + Add config --comment/--domainname/--history-comment/--hostname + 'buildah config': stop replacing Created-By whenever it's not specified + Modify man pages so they compile correctly in mandb + Add description on how to do --isolation to buildah-bud man page + Add support for --iidfile to bud and commit + Refactor buildah bud for vendoring + Fail if date or git not installed + Revert update of entrypoint behaviour to match docker + Vendor in latest imagebuilder code to fix multiple stage builds + Add /bin/sh -c to entrypoint in config + image_test: Improve the test + Fix README example of buildah config + buildah-image: add validation to 'format' + Simple changes to allow buildah to pass make validate + Clarify the use of buildah config options + containers_test: Perfect testing + buildah images and podman images are listing different sizes + buildah-containers: add tests and example to the man page + buildah-containers: add validation to 'format' + Clarify the use of buildah config options + Minor fix for lighttpd example in README + Add tls-verification to troubleshooting + Modify buildah rmi to account for changes in containers/storage + Vendor in latest containers/image and containers/storage + addcopy: add src validation + Remove tarball as an option from buildah push --help + Fix secrets patch + Update entrypoint behaviour to match docker + Display imageId after commit + config: add support for StopSignal + Fix docker login issue in travis.yml + Allow referencing stages as index and names + Add multi-stage builds tests + Add multi-stage builds support + Add accessor functions for comment and stop signal + Vendor in latest imagebuilder, to get mixed case AS support + Allow umount to have multi-containers + Update buildah push doc + buildah bud walks symlinks + Imagename is required for commit atm, update manpage + +## 0.16.0 (2018-04-08) + Bump to v0.16.0 + Remove requires for ostree-lib in rpm spec file + Add support for shell + buildah.spec should require ostree-libs + Vendor in latest containers/image + bash: prefer options + Change image time to locale, add troubleshooting.md, add logo to other mds + buildah-run.md: fix error SYNOPSIS + docs: fix error example + Allow --cmd parameter to have commands as values + Touchup README to re-enable logo + Clean up README.md + Make default-mounts-file a hidden option + Document the mounts.conf file + Fix man pages to format correctly + Add various transport support to buildah from + Add unit tests to run.go + If the user overrides the storage driver, the options should be dropped + Show Config/Manifest as JSON string in inspect when format is not set + Switch which for that in README.md + Remove COPR + Fix wrong order of parameters + Vendor in latest containers/image + Remove shallowCopy(), which shouldn't be saving us time any more + shallowCopy: avoid a second read of the container's layer + +## 0.5 - 2017-11-07 + Add secrets patch to buildah + Add proper SELinux labeling to buildah run + Add tls-verify to bud command + Make filtering by date use the image's date + images: don't list unnamed images twice + Fix timeout issue + Add further tty verbiage to buildah run + Make inspect try an image on failure if type not specified + Add support for `buildah run --hostname` + Tons of bug fixes and code cleanup + +## 0.4 - 2017-09-22 +### Added + Update buildah spec file to match new version + Bump to version 0.4 + Add default transport to push if not provided + Add authentication to commit and push + Remove --transport flag + Run: don't complain about missing volume locations + Add credentials to buildah from + Remove export command + Bump containers/storage and containers/image + +## 0.3 - 2017-07-20 +## 0.2 - 2017-07-18 +### Added + Vendor in latest containers/image and containers/storage + Update image-spec and runtime-spec to v1.0.0 + Add support for -- ending options parsing to buildah run + Add/Copy need to support glob syntax + Add flag to remove containers on commit + Add buildah export support + update 'buildah images' and 'buildah rmi' commands + buildah containers/image: Add JSON output option + Add 'buildah version' command + Handle "run" without an explicit command correctly + Ensure volume points get created, and with perms + Add a -a/--all option to "buildah containers" + +## 0.1 - 2017-06-14 +### Added + Vendor in latest container/storage container/image + Add a "push" command + Add an option to specify a Create date for images + Allow building a source image from another image + Improve buildah commit performance + Add a --volume flag to "buildah run" + Fix inspect/tag-by-truncated-image-ID + Include image-spec and runtime-spec versions + buildah mount command should list mounts when no arguments are given. + Make the output image format selectable + commit images in multiple formats + Also import configurations from V2S1 images + Add a "tag" command + Add an "inspect" command + Update reference comments for docker types origins + Improve configuration preservation in imagebuildah + Report pull/commit progress by default + Contribute buildah.spec + Remove --mount from buildah-from + Add a build-using-dockerfile command (alias: bud) + Create manpages for the buildah project + Add installation for buildah and bash completions + Rename "list"/"delete" to "containers"/"rm" + Switch `buildah list quiet` option to only list container id's + buildah delete should be able to delete multiple containers + Correctly set tags on the names of pulled images + Don't mix "config" in with "run" and "commit" + Add a "list" command, for listing active builders + Add "add" and "copy" commands + Add a "run" command, using runc + Massive refactoring + Make a note to distinguish compression of layers + +## 0.0 - 2017-01-26 +### Added + Initial version, needs work diff --git a/vendor/github.com/containers/buildah/CODE-OF-CONDUCT.md b/vendor/github.com/containers/buildah/CODE-OF-CONDUCT.md new file mode 100644 index 00000000000..1b9b2de3581 --- /dev/null +++ b/vendor/github.com/containers/buildah/CODE-OF-CONDUCT.md @@ -0,0 +1,3 @@ +## The Buildah Project Community Code of Conduct + +The Buildah Project follows the [Containers Community Code of Conduct](https://github.com/containers/common/blob/main/CODE-OF-CONDUCT.md). diff --git a/vendor/github.com/containers/buildah/CONTRIBUTING.md b/vendor/github.com/containers/buildah/CONTRIBUTING.md new file mode 100644 index 00000000000..80b812e35c8 --- /dev/null +++ b/vendor/github.com/containers/buildah/CONTRIBUTING.md @@ -0,0 +1,174 @@ +![buildah logo](https://cdn.rawgit.com/containers/buildah/main/logos/buildah-logo_large.png) + +# Contributing to Buildah + +We'd love to have you join the community! Below summarizes the processes +that we follow. + +## Topics + +* [Reporting Issues](#reporting-issues) +* [Working On Issues](#working-on-issues) +* [Submitting Pull Requests](#submitting-pull-requests) +* [Sign your PRs](#sign-your-prs) +* [Merge bot interaction](#merge-bot-interaction) +* [Communications](#communications) +* [Becoming a Maintainer](#becoming-a-maintainer) + +## Reporting Issues + +Before reporting an issue, check our backlog of +[open issues](https://github.com/containers/buildah/issues) +to see if someone else has already reported it. If so, feel free to add +your scenario, or additional information, to the discussion. Or simply +"subscribe" to it to be notified when it is updated. + +If you find a new issue with the project we'd love to hear about it! The most +important aspect of a bug report is that it includes enough information for +us to reproduce it. So, please include as much detail as possible and try +to remove the extra stuff that doesn't really relate to the issue itself. +The easier it is for us to reproduce it, the faster it'll be fixed! + +Please don't include any private/sensitive information in your issue! + +## Working On Issues + +Once you have decided to contribute to Buildah by working on an issue, check our +backlog of [open issues](https://github.com/containers/buildah/issues) looking +for any that do not have an "In Progress" label attached to it. Often issues +will be assigned to someone, to be worked on at a later time. If you have the +time to work on the issue now, add yourself as an assignee, and set the +"In Progress" label if you’re a member of the “Containers” GitHub organization. +If you can not set the label, just add a quick comment in the issue asking that +the “In Progress” label be set and a member will do so for you. + +## Submitting Pull Requests + +No Pull Request (PR) is too small! Typos, additional comments in the code, +new testcases, bug fixes, new features, more documentation, ... it's all +welcome! + +While bug fixes can first be identified via an "issue", that is not required. +It's ok to just open up a PR with the fix, but make sure you include the same +information you would have included in an issue - like how to reproduce it. + +PRs for new features should include some background on what use cases the +new code is trying to address. When possible and when it makes sense, try to break-up +larger PRs into smaller ones - it's easier to review smaller +code changes. But only if those smaller ones make sense as stand-alone PRs. + +Regardless of the type of PR, all PRs should include: +* well documented code changes +* additional testcases. Ideally, they should fail w/o your code change applied +* documentation changes + +Squash your commits into logical pieces of work that might want to be reviewed +separate from the rest of the PRs. But, squashing down to just one commit is ok +too since in the end the entire PR will be reviewed anyway. When in doubt, +squash. + +PRs that fix issues should include a reference like `Closes #XXXX` in the +commit message so that github will automatically close the referenced issue +when the PR is merged. + + + +### Sign your PRs + +The sign-off is a line at the end of the explanation for the patch. Your +signature certifies that you wrote the patch or otherwise have the right to pass +it on as an open-source patch. The rules are simple: if you can certify +the below (from [developercertificate.org](http://developercertificate.org/)): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +Then you just add a line to every git commit message: + + Signed-off-by: Joe Smith + +Use your real name (sorry, no pseudonyms or anonymous contributions.) + +If you set your `user.name` and `user.email` git configs, you can sign your +commit automatically with `git commit -s`. + +## Merge bot interaction + +Maintainers should never merge anything directly into upstream +branches. Instead, interact with the [openshift-ci-robot](https://github.com/openshift-ci-robot/) +through PR comments as summarized [here](https://prow.ci.openshift.org/command-help?repo=containers%2Fbuildah). +This ensures all upstream +branches contain commits in a predictable order, and that every commit +has passed automated testing at some point in the past. A +[Maintainer portal](https://prow.ci.openshift.org/pr?query=is%3Apr%20state%3Aopen%20repo%3Acontainers%2Fbuildah) +is available, showing all PRs awaiting review and approval. + +## Communications + +For general questions or discussions, please use the +IRC group on `irc.freenode.net` called `buildah` +that has been setup. + +### For discussions around issues/bugs and features: + +#### Buildah Mailing List + +You can join the Buildah mailing list by sending an email to `buildah-join@lists.buildah.io` with the word `subscribe` in the subject. You can also go to this [page](https://lists.podman.io/admin/lists/buildah.lists.buildah.io/), then scroll down to the bottom of the page and enter your email and optionally name, then click on the "Subscribe" button. + +#### GitHub +You can also use the github +[issues](https://github.com/containers/buildah/issues) +and +[PRs](https://github.com/containers/buildah/pulls) +tracking system. + +## Becoming a Maintainer + +To become a maintainer you must first be nominated by an existing maintainer. +If a majority (>50%) of maintainers agree then the proposal is adopted and +you will be added to the list. + +Removing a maintainer requires at least 75% of the remaining maintainers +approval, or if the person requests to be removed then it is automatic. +Normally, a maintainer will only be removed if they are considered to be +inactive for a long period of time or are viewed as disruptive to the community. + +The current list of maintainers can be found in the +[MAINTAINERS](MAINTAINERS) file. diff --git a/vendor/github.com/containers/buildah/LICENSE b/vendor/github.com/containers/buildah/LICENSE new file mode 100644 index 00000000000..8dada3edaf5 --- /dev/null +++ b/vendor/github.com/containers/buildah/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containers/buildah/MAINTAINERS b/vendor/github.com/containers/buildah/MAINTAINERS new file mode 100644 index 00000000000..5725ca9815a --- /dev/null +++ b/vendor/github.com/containers/buildah/MAINTAINERS @@ -0,0 +1,4 @@ +Dan Walsh (@rhatdan) +Nalin Dahyabhai (@nalind) +Tom Sweeney (@tomsweeneyredhat) +Urvashi Mohnani (@umohnani8) diff --git a/vendor/github.com/containers/buildah/Makefile b/vendor/github.com/containers/buildah/Makefile new file mode 100644 index 00000000000..8a10dfa05fd --- /dev/null +++ b/vendor/github.com/containers/buildah/Makefile @@ -0,0 +1,190 @@ +export GOPROXY=https://proxy.golang.org + +APPARMORTAG := $(shell hack/apparmor_tag.sh) +STORAGETAGS := $(shell ./btrfs_tag.sh) $(shell ./btrfs_installed_tag.sh) $(shell ./libdm_tag.sh) $(shell ./hack/libsubid_tag.sh) +SECURITYTAGS ?= seccomp $(APPARMORTAG) +TAGS ?= $(SECURITYTAGS) $(STORAGETAGS) $(shell ./hack/systemd_tag.sh) +BUILDTAGS += $(TAGS) +PREFIX := /usr/local +BINDIR := $(PREFIX)/bin +BASHINSTALLDIR = $(PREFIX)/share/bash-completion/completions +BUILDFLAGS := -tags "$(BUILDTAGS)" +BUILDAH := buildah + +GO := go +GO_LDFLAGS := $(shell if $(GO) version|grep -q gccgo; then echo "-gccgoflags"; else echo "-ldflags"; fi) +GO_GCFLAGS := $(shell if $(GO) version|grep -q gccgo; then echo "-gccgoflags"; else echo "-gcflags"; fi) +GO110 := 1.10 +GOVERSION := $(findstring $(GO110),$(shell go version)) +# test for go module support +ifeq ($(shell go help mod >/dev/null 2>&1 && echo true), true) +export GO_BUILD=GO111MODULE=on $(GO) build -mod=vendor +export GO_TEST=GO111MODULE=on $(GO) test -mod=vendor +else +export GO_BUILD=$(GO) build +export GO_TEST=$(GO) test +endif +RACEFLAGS := $(shell $(GO_TEST) -race ./pkg/dummy > /dev/null 2>&1 && echo -race) + +GIT_COMMIT ?= $(if $(shell git rev-parse --short HEAD),$(shell git rev-parse --short HEAD),$(error "git failed")) +SOURCE_DATE_EPOCH ?= $(if $(shell date +%s),$(shell date +%s),$(error "date failed")) +STATIC_STORAGETAGS = "containers_image_openpgp exclude_graphdriver_devicemapper $(STORAGE_TAGS)" + +CNI_COMMIT := $(shell sed -n 's;\tgithub.com/containernetworking/cni \([^ \n]*\).*$\;\1;p' go.mod) +#RUNC_COMMIT := $(shell sed -n 's;\tgithub.com/opencontainers/runc \([^ \n]*\).*$\;\1;p' go.mod) +RUNC_COMMIT := v1.0.0-rc8 +LIBSECCOMP_COMMIT := release-2.3 + +EXTRA_LDFLAGS ?= +BUILDAH_LDFLAGS := $(GO_LDFLAGS) '-X main.GitCommit=$(GIT_COMMIT) -X main.buildInfo=$(SOURCE_DATE_EPOCH) -X main.cniVersion=$(CNI_COMMIT) $(EXTRA_LDFLAGS)' +SOURCES=*.go imagebuildah/*.go bind/*.go chroot/*.go copier/*.go define/*.go docker/*.go manifests/*.go pkg/blobcache/*.go pkg/chrootuser/*.go pkg/cli/*.go pkg/completion/*.go pkg/formats/*.go pkg/overlay/*.go pkg/parse/*.go pkg/rusage/*.go pkg/sshagent/*.go pkg/umask/*.go pkg/util/*.go util/*.go + +LINTFLAGS ?= + +ifeq ($(DEBUG), 1) + override GOGCFLAGS += -N -l +endif + +# make all DEBUG=1 +# Note: Uses the -N -l go compiler options to disable compiler optimizations +# and inlining. Using these build options allows you to subsequently +# use source debugging tools like delve. +all: bin/buildah bin/imgtype bin/copy docs + +# Update nix/nixpkgs.json its latest stable commit +.PHONY: nixpkgs +nixpkgs: + @nix run \ + -f channel:nixos-20.09 nix-prefetch-git \ + -c nix-prefetch-git \ + --no-deepClone \ + https://github.com/nixos/nixpkgs refs/heads/nixos-20.09 > nix/nixpkgs.json + +# Build statically linked binary +.PHONY: static +static: + @nix build -f nix/ + mkdir -p ./bin + cp -rfp ./result/bin/* ./bin/ + +bin/buildah: $(SOURCES) cmd/buildah/*.go + $(GO_BUILD) $(BUILDAH_LDFLAGS) $(GO_GCFLAGS) "$(GOGCFLAGS)" -o $@ $(BUILDFLAGS) ./cmd/buildah + +.PHONY: buildah +buildah: bin/buildah + +ALL_CROSS_TARGETS := $(addprefix bin/buildah.,$(subst /,.,$(shell $(GO) tool dist list))) +LINUX_CROSS_TARGETS := $(filter bin/buildah.linux.%,$(ALL_CROSS_TARGETS)) +DARWIN_CROSS_TARGETS := $(filter bin/buildah.darwin.%,$(ALL_CROSS_TARGETS)) +WINDOWS_CROSS_TARGETS := $(addsuffix .exe,$(filter bin/buildah.windows.%,$(ALL_CROSS_TARGETS))) +.PHONY: cross +cross: $(LINUX_CROSS_TARGETS) $(DARWIN_CROSS_TARGETS) $(WINDOWS_CROSS_TARGETS) + +bin/buildah.%: + mkdir -p ./bin + GOOS=$(word 2,$(subst ., ,$@)) GOARCH=$(word 3,$(subst ., ,$@)) $(GO_BUILD) $(BUILDAH_LDFLAGS) -o $@ -tags "containers_image_openpgp" ./cmd/buildah + +bin/imgtype: $(SOURCES) tests/imgtype/imgtype.go + $(GO_BUILD) $(BUILDAH_LDFLAGS) -o $@ $(BUILDFLAGS) ./tests/imgtype/imgtype.go + +bin/copy: $(SOURCES) tests/copy/copy.go + $(GO_BUILD) $(BUILDAH_LDFLAGS) -o $@ $(BUILDFLAGS) ./tests/copy/copy.go + +.PHONY: clean +clean: + $(RM) -r bin tests/testreport/testreport + $(MAKE) -C docs clean + +.PHONY: docs +docs: install.tools ## build the docs on the host + $(MAKE) -C docs + +# For vendoring to work right, the checkout directory must be such that our top +# level is at $GOPATH/src/github.com/containers/buildah. +.PHONY: gopath +gopath: + test $(shell pwd) = $(shell cd ../../../../src/github.com/containers/buildah ; pwd) + +codespell: + codespell -S Makefile,build,buildah,buildah.spec,imgtype,copy,AUTHORS,bin,vendor,.git,go.sum,CHANGELOG.md,changelog.txt,seccomp.json,.cirrus.yml,"*.xz,*.gz,*.tar,*.tgz,*ico,*.png,*.1,*.5,*.orig,*.rej" -L uint,iff,od,ERRO -w + +.PHONY: validate +validate: install.tools + ./tests/validate/whitespace.sh + ./hack/xref-helpmsgs-manpages + ./tests/validate/pr-should-include-tests + ./tests/validate/buildahimages-are-sane + +.PHONY: install.tools +install.tools: + make -C tests/tools + +.PHONY: runc +runc: gopath + rm -rf ../../opencontainers/runc + git clone https://github.com/opencontainers/runc ../../opencontainers/runc + cd ../../opencontainers/runc && git checkout $(RUNC_COMMIT) && $(GO) build -tags "$(STORAGETAGS) $(SECURITYTAGS)" + ln -sf ../../opencontainers/runc/runc + +.PHONY: install.libseccomp.sudo +install.libseccomp.sudo: gopath + rm -rf ../../seccomp/libseccomp + git clone https://github.com/seccomp/libseccomp ../../seccomp/libseccomp + cd ../../seccomp/libseccomp && git checkout $(LIBSECCOMP_COMMIT) && ./autogen.sh && ./configure --prefix=/usr && make all && sudo make install + +.PHONY: install.cni.sudo +install.cni.sudo: gopath + rm -rf ../../containernetworking/plugins + git clone https://github.com/containernetworking/plugins ../../containernetworking/plugins + cd ../../containernetworking/plugins && ./build_linux.sh && sudo install -D -v -m755 -t /opt/cni/bin/ bin/* + +.PHONY: install +install: + install -D -m0755 bin/buildah $(DESTDIR)/$(BINDIR)/buildah + $(MAKE) -C docs install + +.PHONY: uninstall +uninstall: + rm -f $(DESTDIR)/$(BINDIR)/buildah + rm -f $(PREFIX)/share/man/man1/buildah*.1 + rm -f $(DESTDIR)/$(BASHINSTALLDIR)/buildah + +.PHONY: install.completions +install.completions: + install -m 644 -D contrib/completions/bash/buildah $(DESTDIR)/$(BASHINSTALLDIR)/buildah + +.PHONY: install.runc +install.runc: + install -m 755 ../../opencontainers/runc/runc $(DESTDIR)/$(BINDIR)/ + +.PHONY: test-conformance +test-conformance: + $(GO_TEST) -v -tags "$(STORAGETAGS) $(SECURITYTAGS)" -cover -timeout 20m ./tests/conformance + +.PHONY: test-integration +test-integration: install.tools + ./tests/tools/build/ginkgo $(BUILDFLAGS) -v tests/e2e/. + cd tests; ./test_runner.sh + +tests/testreport/testreport: tests/testreport/testreport.go + $(GO_BUILD) $(GO_LDFLAGS) "-linkmode external -extldflags -static" -tags "$(STORAGETAGS) $(SECURITYTAGS)" -o tests/testreport/testreport ./tests/testreport/testreport.go + +.PHONY: test-unit +test-unit: tests/testreport/testreport + $(GO_TEST) -v -tags "$(STORAGETAGS) $(SECURITYTAGS)" -cover $(RACEFLAGS) $(shell $(GO) list ./... | grep -v vendor | grep -v tests | grep -v cmd) -timeout 45m + tmp=$(shell mktemp -d) ; \ + mkdir -p $$tmp/root $$tmp/runroot; \ + $(GO_TEST) -v -tags "$(STORAGETAGS) $(SECURITYTAGS)" -cover $(RACEFLAGS) ./cmd/buildah -args --root $$tmp/root --runroot $$tmp/runroot --storage-driver vfs --signature-policy $(shell pwd)/tests/policy.json --registries-conf $(shell pwd)/tests/registries.conf + +vendor-in-container: + podman run --privileged --rm --env HOME=/root -v `pwd`:/src -w /src docker.io/library/golang:1.16 make vendor + +.PHONY: vendor +vendor: + GO111MODULE=on $(GO) mod tidy + GO111MODULE=on $(GO) mod vendor + GO111MODULE=on $(GO) mod verify + +.PHONY: lint +lint: install.tools + ./tests/tools/build/golangci-lint run $(LINTFLAGS) diff --git a/vendor/github.com/containers/buildah/OWNERS b/vendor/github.com/containers/buildah/OWNERS new file mode 100644 index 00000000000..2eb9ea84c3e --- /dev/null +++ b/vendor/github.com/containers/buildah/OWNERS @@ -0,0 +1,28 @@ +approvers: + - TomSweeneyRedHat + - ashley-cui + - cevich + - flouthoc + - giuseppe + - lsm5 + - nalind + - rhatdan + - umohnani8 + - vrothberg +reviewers: + - QiWang19 + - TomSweeneyRedHat + - ashley-cui + - baude + - cevich + - edsantiago + - giuseppe + - haircommander + - jwhonce + - lsm5 + - mheon + - mrunalp + - nalind + - rhatdan + - umohnani8 + - vrothberg diff --git a/vendor/github.com/containers/buildah/README.md b/vendor/github.com/containers/buildah/README.md new file mode 100644 index 00000000000..512d3f87368 --- /dev/null +++ b/vendor/github.com/containers/buildah/README.md @@ -0,0 +1,132 @@ +![buildah logo](https://cdn.rawgit.com/containers/buildah/main/logos/buildah-logo_large.png) + +# [Buildah](https://www.youtube.com/embed/YVk5NgSiUw8) - a tool that facilitates building [Open Container Initiative (OCI)](https://www.opencontainers.org/) container images + +[![Go Report Card](https://goreportcard.com/badge/github.com/containers/buildah)](https://goreportcard.com/report/github.com/containers/buildah) + + +The Buildah package provides a command line tool that can be used to +* create a working container, either from scratch or using an image as a starting point +* create an image, either from a working container or via the instructions in a Dockerfile +* images can be built in either the OCI image format or the traditional upstream docker image format +* mount a working container's root filesystem for manipulation +* unmount a working container's root filesystem +* use the updated contents of a container's root filesystem as a filesystem layer to create a new image +* delete a working container or an image +* rename a local container + +## Buildah Information for Developers + +For blogs, release announcements and more, please checkout the [buildah.io](https://buildah.io) website! + +**[Buildah Demos](demos)** + +**[Changelog](CHANGELOG.md)** + +**[Contributing](CONTRIBUTING.md)** + +**[Development Plan](developmentplan.md)** + +**[Installation notes](install.md)** + +**[Troubleshooting Guide](troubleshooting.md)** + +**[Tutorials](docs/tutorials)** + +## Buildah and Podman relationship + +Buildah and Podman are two complementary open-source projects that are +available on most Linux platforms and both projects reside at +[GitHub.com](https://github.com) with Buildah +[here](https://github.com/containers/buildah) and Podman +[here](https://github.com/containers/podman). Both, Buildah and Podman are +command line tools that work on Open Container Initiative (OCI) images and +containers. The two projects differentiate in their specialization. + +Buildah specializes in building OCI images. Buildah's commands replicate all +of the commands that are found in a Dockerfile. This allows building images +with and without Dockerfiles while not requiring any root privileges. +Buildah’s ultimate goal is to provide a lower-level coreutils interface to +build images. The flexibility of building images without Dockerfiles allows +for the integration of other scripting languages into the build process. +Buildah follows a simple fork-exec model and does not run as a daemon +but it is based on a comprehensive API in golang, which can be vendored +into other tools. + +Podman specializes in all of the commands and functions that help you to maintain and modify +OCI images, such as pulling and tagging. It also allows you to create, run, and maintain those containers +created from those images. For building container images via Dockerfiles, Podman uses Buildah's +golang API and can be installed independently from Buildah. + +A major difference between Podman and Buildah is their concept of a container. Podman +allows users to create "traditional containers" where the intent of these containers is +to be long lived. While Buildah containers are really just created to allow content +to be added back to the container image. An easy way to think of it is the +`buildah run` command emulates the RUN command in a Dockerfile while the `podman run` +command emulates the `docker run` command in functionality. Because of this and their underlying +storage differences, you can not see Podman containers from within Buildah or vice versa. + +In short, Buildah is an efficient way to create OCI images while Podman allows +you to manage and maintain those images and containers in a production environment using +familiar container cli commands. For more details, see the +[Container Tools Guide](https://github.com/containers/buildah/tree/main/docs/containertools). + +## Example + +From [`./examples/lighttpd.sh`](examples/lighttpd.sh): + +```bash +$ cat > lighttpd.sh <<"EOF" +#!/usr/bin/env bash + +set -x + +ctr1=$(buildah from "${1:-fedora}") + +## Get all updates and install our minimal httpd server +buildah run "$ctr1" -- dnf update -y +buildah run "$ctr1" -- dnf install -y lighttpd + +## Include some buildtime annotations +buildah config --annotation "com.example.build.host=$(uname -n)" "$ctr1" + +## Run our server and expose the port +buildah config --cmd "/usr/sbin/lighttpd -D -f /etc/lighttpd/lighttpd.conf" "$ctr1" +buildah config --port 80 "$ctr1" + +## Commit this container to an image name +buildah commit "$ctr1" "${2:-$USER/lighttpd}" +EOF + +$ chmod +x lighttpd.sh +$ sudo ./lighttpd.sh +``` + +## Commands +| Command | Description | +| ---------------------------------------------------- | ---------------------------------------------------------------------------------------------------- | +| [buildah-add(1)](/docs/buildah-add.1.md) | Add the contents of a file, URL, or a directory to the container. | +| [buildah-build(1)](/docs/buildah-build.1.md) | Build an image using instructions from Containerfiles or Dockerfiles. | +| [buildah-commit(1)](/docs/buildah-commit.1.md) | Create an image from a working container. | +| [buildah-config(1)](/docs/buildah-config.1.md) | Update image configuration settings. | +| [buildah-containers(1)](/docs/buildah-containers.1.md) | List the working containers and their base images. | +| [buildah-copy(1)](/docs/buildah-copy.1.md) | Copies the contents of a file, URL, or directory into a container's working directory. | +| [buildah-from(1)](/docs/buildah-from.1.md) | Creates a new working container, either from scratch or using a specified image as a starting point. | +| [buildah-images(1)](/docs/buildah-images.1.md) | List images in local storage. | +| [buildah-info(1)](/docs/buildah-info.1.md) | Display Buildah system information. | +| [buildah-inspect(1)](/docs/buildah-inspect.1.md) | Inspects the configuration of a container or image. | +| [buildah-mount(1)](/docs/buildah-mount.1.md) | Mount the working container's root filesystem. | +| [buildah-pull(1)](/docs/buildah-pull.1.md) | Pull an image from the specified location. | +| [buildah-push(1)](/docs/buildah-push.1.md) | Push an image from local storage to elsewhere. | +| [buildah-rename(1)](/docs/buildah-rename.1.md) | Rename a local container. | +| [buildah-rm(1)](/docs/buildah-rm.1.md) | Removes one or more working containers. | +| [buildah-rmi(1)](/docs/buildah-rmi.1.md) | Removes one or more images. | +| [buildah-run(1)](/docs/buildah-run.1.md) | Run a command inside of the container. | +| [buildah-tag(1)](/docs/buildah-tag.1.md) | Add an additional name to a local image. | +| [buildah-umount(1)](/docs/buildah-umount.1.md) | Unmount a working container's root file system. | +| [buildah-unshare(1)](/docs/buildah-unshare.1.md) | Launch a command in a user namespace with modified ID mappings. | +| [buildah-version(1)](/docs/buildah-version.1.md) | Display the Buildah Version Information | + +**Future goals include:** +* more CI tests +* additional CLI commands (?) diff --git a/vendor/github.com/containers/buildah/SECURITY.md b/vendor/github.com/containers/buildah/SECURITY.md new file mode 100644 index 00000000000..dfc531a5c1a --- /dev/null +++ b/vendor/github.com/containers/buildah/SECURITY.md @@ -0,0 +1,3 @@ +## Security and Disclosure Information Policy for the Buildah Project + +The Buildah Project follows the [Security and Disclosure Information Policy](https://github.com/containers/common/blob/main/SECURITY.md) for the Containers Projects. diff --git a/vendor/github.com/containers/buildah/add.go b/vendor/github.com/containers/buildah/add.go new file mode 100644 index 00000000000..f4e2943f057 --- /dev/null +++ b/vendor/github.com/containers/buildah/add.go @@ -0,0 +1,650 @@ +package buildah + +import ( + "archive/tar" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "path" + "path/filepath" + "strconv" + "strings" + "sync" + "syscall" + "time" + + "github.com/containers/buildah/copier" + "github.com/containers/buildah/define" + "github.com/containers/buildah/pkg/chrootuser" + "github.com/containers/storage/pkg/fileutils" + "github.com/containers/storage/pkg/idtools" + "github.com/hashicorp/go-multierror" + "github.com/opencontainers/runc/libcontainer/userns" + "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// AddAndCopyOptions holds options for add and copy commands. +type AddAndCopyOptions struct { + //Chmod sets the access permissions of the destination content. + Chmod string + // Chown is a spec for the user who should be given ownership over the + // newly-added content, potentially overriding permissions which would + // otherwise be set to 0:0. + Chown string + // PreserveOwnership, if Chown is not set, tells us to avoid setting + // ownership of copied items to 0:0, instead using whatever ownership + // information is already set. Not meaningful for remote sources or + // local archives that we extract. + PreserveOwnership bool + // All of the data being copied will pass through Hasher, if set. + // If the sources are URLs or files, their contents will be passed to + // Hasher. + // If the sources include directory trees, Hasher will be passed + // tar-format archives of the directory trees. + Hasher io.Writer + // Excludes is the contents of the .containerignore file. + Excludes []string + // IgnoreFile is the path to the .containerignore file. + IgnoreFile string + // ContextDir is the base directory for content being copied and + // Excludes patterns. + ContextDir string + // ID mapping options to use when contents to be copied are part of + // another container, and need ownerships to be mapped from the host to + // that container's values before copying them into the container. + IDMappingOptions *define.IDMappingOptions + // DryRun indicates that the content should be digested, but not actually + // copied into the container. + DryRun bool + // Clear the setuid bit on items being copied. Has no effect on + // archives being extracted, where the bit is always preserved. + StripSetuidBit bool + // Clear the setgid bit on items being copied. Has no effect on + // archives being extracted, where the bit is always preserved. + StripSetgidBit bool + // Clear the sticky bit on items being copied. Has no effect on + // archives being extracted, where the bit is always preserved. + StripStickyBit bool +} + +// sourceIsRemote returns true if "source" is a remote location. +func sourceIsRemote(source string) bool { + return strings.HasPrefix(source, "http://") || strings.HasPrefix(source, "https://") +} + +// getURL writes a tar archive containing the named content +func getURL(src string, chown *idtools.IDPair, mountpoint, renameTarget string, writer io.Writer, chmod *os.FileMode) error { + url, err := url.Parse(src) + if err != nil { + return err + } + response, err := http.Get(src) + if err != nil { + return err + } + defer response.Body.Close() + // Figure out what to name the new content. + name := renameTarget + if name == "" { + name = path.Base(url.Path) + } + // If there's a date on the content, use it. If not, use the Unix epoch + // for compatibility. + date := time.Unix(0, 0).UTC() + lastModified := response.Header.Get("Last-Modified") + if lastModified != "" { + d, err := time.Parse(time.RFC1123, lastModified) + if err != nil { + return errors.Wrapf(err, "error parsing last-modified time") + } + date = d + } + // Figure out the size of the content. + size := response.ContentLength + responseBody := response.Body + if size < 0 { + // Create a temporary file and copy the content to it, so that + // we can figure out how much content there is. + f, err := ioutil.TempFile(mountpoint, "download") + if err != nil { + return errors.Wrapf(err, "error creating temporary file to hold %q", src) + } + defer os.Remove(f.Name()) + defer f.Close() + size, err = io.Copy(f, response.Body) + if err != nil { + return errors.Wrapf(err, "error writing %q to temporary file %q", src, f.Name()) + } + _, err = f.Seek(0, io.SeekStart) + if err != nil { + return errors.Wrapf(err, "error setting up to read %q from temporary file %q", src, f.Name()) + } + responseBody = f + } + // Write the output archive. Set permissions for compatibility. + tw := tar.NewWriter(writer) + defer tw.Close() + uid := 0 + gid := 0 + if chown != nil { + uid = chown.UID + gid = chown.GID + } + var mode int64 = 0600 + if chmod != nil { + mode = int64(*chmod) + } + hdr := tar.Header{ + Typeflag: tar.TypeReg, + Name: name, + Size: size, + Uid: uid, + Gid: gid, + Mode: mode, + ModTime: date, + } + err = tw.WriteHeader(&hdr) + if err != nil { + return errors.Wrapf(err, "error writing header") + } + _, err = io.Copy(tw, responseBody) + return errors.Wrapf(err, "error writing content from %q to tar stream", src) +} + +// includeDirectoryAnyway returns true if "path" is a prefix for an exception +// known to "pm". If "path" is a directory that "pm" claims matches its list +// of patterns, but "pm"'s list of exclusions contains a pattern for which +// "path" is a prefix, then IncludeDirectoryAnyway() will return true. +// This is not always correct, because it relies on the directory part of any +// exception paths to be specified without wildcards. +func includeDirectoryAnyway(path string, pm *fileutils.PatternMatcher) bool { + if !pm.Exclusions() { + return false + } + prefix := strings.TrimPrefix(path, string(os.PathSeparator)) + string(os.PathSeparator) + for _, pattern := range pm.Patterns() { + if !pattern.Exclusion() { + continue + } + spec := strings.TrimPrefix(pattern.String(), string(os.PathSeparator)) + if strings.HasPrefix(spec, prefix) { + return true + } + } + return false +} + +// Add copies the contents of the specified sources into the container's root +// filesystem, optionally extracting contents of local files that look like +// non-empty archives. +func (b *Builder) Add(destination string, extract bool, options AddAndCopyOptions, sources ...string) error { + mountPoint, err := b.Mount(b.MountLabel) + if err != nil { + return err + } + defer func() { + if err2 := b.Unmount(); err2 != nil { + logrus.Errorf("error unmounting container: %v", err2) + } + }() + + contextDir := options.ContextDir + currentDir := options.ContextDir + if options.ContextDir == "" { + contextDir = string(os.PathSeparator) + currentDir, err = os.Getwd() + if err != nil { + return errors.Wrapf(err, "error determining current working directory") + } + } + + // Figure out what sorts of sources we have. + var localSources, remoteSources []string + for i, src := range sources { + if sourceIsRemote(src) { + remoteSources = append(remoteSources, src) + continue + } + if !filepath.IsAbs(src) && options.ContextDir == "" { + sources[i] = filepath.Join(currentDir, src) + } + localSources = append(localSources, sources[i]) + } + + // Check how many items our local source specs matched. Each spec + // should have matched at least one item, otherwise we consider it an + // error. + var localSourceStats []*copier.StatsForGlob + if len(localSources) > 0 { + statOptions := copier.StatOptions{ + CheckForArchives: extract, + } + localSourceStats, err = copier.Stat(contextDir, contextDir, statOptions, localSources) + if err != nil { + return errors.Wrapf(err, "checking on sources under %q", contextDir) + } + } + numLocalSourceItems := 0 + for _, localSourceStat := range localSourceStats { + if localSourceStat.Error != "" { + errorText := localSourceStat.Error + rel, err := filepath.Rel(contextDir, localSourceStat.Glob) + if err != nil { + errorText = fmt.Sprintf("%v; %s", err, errorText) + } + if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { + errorText = fmt.Sprintf("possible escaping context directory error: %s", errorText) + } + return errors.Errorf("checking on sources under %q: %v", contextDir, errorText) + } + if len(localSourceStat.Globbed) == 0 { + return errors.Wrapf(syscall.ENOENT, "checking source under %q: no glob matches", contextDir) + } + numLocalSourceItems += len(localSourceStat.Globbed) + } + if numLocalSourceItems+len(remoteSources) == 0 { + return errors.Wrapf(syscall.ENOENT, "no sources %v found", sources) + } + + // Find out which user (and group) the destination should belong to. + var chownDirs, chownFiles *idtools.IDPair + var userUID, userGID uint32 + if options.Chown != "" { + userUID, userGID, err = b.userForCopy(mountPoint, options.Chown) + if err != nil { + return errors.Wrapf(err, "error looking up UID/GID for %q", options.Chown) + } + } + var chmodDirsFiles *os.FileMode + if options.Chmod != "" { + p, err := strconv.ParseUint(options.Chmod, 8, 32) + if err != nil { + return errors.Wrapf(err, "error parsing chmod %q", options.Chmod) + } + perm := os.FileMode(p) + chmodDirsFiles = &perm + } + + chownDirs = &idtools.IDPair{UID: int(userUID), GID: int(userGID)} + chownFiles = &idtools.IDPair{UID: int(userUID), GID: int(userGID)} + if options.Chown == "" && options.PreserveOwnership { + chownDirs = nil + chownFiles = nil + } + + // If we have a single source archive to extract, or more than one + // source item, or the destination has a path separator at the end of + // it, and it's not a remote URL, the destination needs to be a + // directory. + if destination == "" || !filepath.IsAbs(destination) { + tmpDestination := filepath.Join(string(os.PathSeparator)+b.WorkDir(), destination) + if destination == "" || strings.HasSuffix(destination, string(os.PathSeparator)) { + destination = tmpDestination + string(os.PathSeparator) + } else { + destination = tmpDestination + } + } + destMustBeDirectory := (len(sources) > 1) || strings.HasSuffix(destination, string(os.PathSeparator)) || destination == b.WorkDir() + destCanBeFile := false + if len(sources) == 1 { + if len(remoteSources) == 1 { + destCanBeFile = sourceIsRemote(sources[0]) + } + if len(localSources) == 1 { + item := localSourceStats[0].Results[localSourceStats[0].Globbed[0]] + if item.IsDir || (item.IsArchive && extract) { + destMustBeDirectory = true + } + if item.IsRegular { + destCanBeFile = true + } + } + } + + // We care if the destination either doesn't exist, or exists and is a + // file. If the source can be a single file, for those cases we treat + // the destination as a file rather than as a directory tree. + renameTarget := "" + extractDirectory := filepath.Join(mountPoint, destination) + statOptions := copier.StatOptions{ + CheckForArchives: extract, + } + destStats, err := copier.Stat(mountPoint, filepath.Join(mountPoint, b.WorkDir()), statOptions, []string{extractDirectory}) + if err != nil { + return errors.Wrapf(err, "error checking on destination %v", extractDirectory) + } + if (len(destStats) == 0 || len(destStats[0].Globbed) == 0) && !destMustBeDirectory && destCanBeFile { + // destination doesn't exist - extract to parent and rename the incoming file to the destination's name + renameTarget = filepath.Base(extractDirectory) + extractDirectory = filepath.Dir(extractDirectory) + } + + // if the destination is a directory that doesn't yet exist, let's copy it. + newDestDirFound := false + if (len(destStats) == 1 || len(destStats[0].Globbed) == 0) && destMustBeDirectory && !destCanBeFile { + newDestDirFound = true + } + + if len(destStats) == 1 && len(destStats[0].Globbed) == 1 && destStats[0].Results[destStats[0].Globbed[0]].IsRegular { + if destMustBeDirectory { + return errors.Errorf("destination %v already exists but is not a directory", destination) + } + // destination exists - it's a file, we need to extract to parent and rename the incoming file to the destination's name + renameTarget = filepath.Base(extractDirectory) + extractDirectory = filepath.Dir(extractDirectory) + } + + pm, err := fileutils.NewPatternMatcher(options.Excludes) + if err != nil { + return errors.Wrapf(err, "error processing excludes list %v", options.Excludes) + } + + // Make sure that, if it's a symlink, we'll chroot to the target of the link; + // knowing that target requires that we resolve it within the chroot. + evalOptions := copier.EvalOptions{} + evaluated, err := copier.Eval(mountPoint, extractDirectory, evalOptions) + if err != nil { + return errors.Wrapf(err, "error checking on destination %v", extractDirectory) + } + extractDirectory = evaluated + + // Set up ID maps. + var srcUIDMap, srcGIDMap []idtools.IDMap + if options.IDMappingOptions != nil { + srcUIDMap, srcGIDMap = convertRuntimeIDMaps(options.IDMappingOptions.UIDMap, options.IDMappingOptions.GIDMap) + } + destUIDMap, destGIDMap := convertRuntimeIDMaps(b.IDMappingOptions.UIDMap, b.IDMappingOptions.GIDMap) + + // Create the target directory if it doesn't exist yet. + mkdirOptions := copier.MkdirOptions{ + UIDMap: destUIDMap, + GIDMap: destGIDMap, + ChownNew: chownDirs, + } + if err := copier.Mkdir(mountPoint, extractDirectory, mkdirOptions); err != nil { + return errors.Wrapf(err, "error ensuring target directory exists") + } + + // Copy each source in turn. + for _, src := range sources { + var multiErr *multierror.Error + var getErr, closeErr, renameErr, putErr error + var wg sync.WaitGroup + if sourceIsRemote(src) { + pipeReader, pipeWriter := io.Pipe() + wg.Add(1) + go func() { + getErr = getURL(src, chownFiles, mountPoint, renameTarget, pipeWriter, chmodDirsFiles) + pipeWriter.Close() + wg.Done() + }() + wg.Add(1) + go func() { + b.ContentDigester.Start("") + hashCloser := b.ContentDigester.Hash() + hasher := io.Writer(hashCloser) + if options.Hasher != nil { + hasher = io.MultiWriter(hasher, options.Hasher) + } + if options.DryRun { + _, putErr = io.Copy(hasher, pipeReader) + } else { + putOptions := copier.PutOptions{ + UIDMap: destUIDMap, + GIDMap: destGIDMap, + ChownDirs: nil, + ChmodDirs: nil, + ChownFiles: nil, + ChmodFiles: nil, + IgnoreDevices: userns.RunningInUserNS(), + } + putErr = copier.Put(extractDirectory, extractDirectory, putOptions, io.TeeReader(pipeReader, hasher)) + } + hashCloser.Close() + pipeReader.Close() + wg.Done() + }() + wg.Wait() + if getErr != nil { + getErr = errors.Wrapf(getErr, "error reading %q", src) + } + if putErr != nil { + putErr = errors.Wrapf(putErr, "error storing %q", src) + } + multiErr = multierror.Append(getErr, putErr) + if multiErr != nil && multiErr.ErrorOrNil() != nil { + if len(multiErr.Errors) > 1 { + return multiErr.ErrorOrNil() + } + return multiErr.Errors[0] + } + continue + } + + // Dig out the result of running glob+stat on this source spec. + var localSourceStat *copier.StatsForGlob + for _, st := range localSourceStats { + if st.Glob == src { + localSourceStat = st + break + } + } + if localSourceStat == nil { + continue + } + + // Iterate through every item that matched the glob. + itemsCopied := 0 + for _, glob := range localSourceStat.Globbed { + rel, err := filepath.Rel(contextDir, glob) + if err != nil { + return errors.Wrapf(err, "error computing path of %q relative to %q", glob, contextDir) + } + if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { + return errors.Errorf("possible escaping context directory error: %q is outside of %q", glob, contextDir) + } + // Check for dockerignore-style exclusion of this item. + if rel != "." { + excluded, err := pm.Matches(filepath.ToSlash(rel)) // nolint:staticcheck + if err != nil { + return errors.Wrapf(err, "error checking if %q(%q) is excluded", glob, rel) + } + if excluded { + // non-directories that are excluded are excluded, no question, but + // directories can only be skipped if we don't have to allow for the + // possibility of finding things to include under them + globInfo := localSourceStat.Results[glob] + if !globInfo.IsDir || !includeDirectoryAnyway(rel, pm) { + continue + } + } else { + // if the destination is a directory that doesn't yet exist, and is not excluded, let's copy it. + if newDestDirFound { + itemsCopied++ + } + } + } else { + // Make sure we don't trigger a "copied nothing" error for an empty context + // directory if we were told to copy the context directory itself. We won't + // actually copy it, but we need to make sure that we don't produce an error + // due to potentially not having anything in the tarstream that we passed. + itemsCopied++ + } + st := localSourceStat.Results[glob] + pipeReader, pipeWriter := io.Pipe() + wg.Add(1) + go func() { + renamedItems := 0 + writer := io.WriteCloser(pipeWriter) + if renameTarget != "" { + writer = newTarFilterer(writer, func(hdr *tar.Header) (bool, bool, io.Reader) { + hdr.Name = renameTarget + renamedItems++ + return false, false, nil + }) + } + writer = newTarFilterer(writer, func(hdr *tar.Header) (bool, bool, io.Reader) { + itemsCopied++ + return false, false, nil + }) + getOptions := copier.GetOptions{ + UIDMap: srcUIDMap, + GIDMap: srcGIDMap, + Excludes: options.Excludes, + ExpandArchives: extract, + ChownDirs: chownDirs, + ChmodDirs: chmodDirsFiles, + ChownFiles: chownFiles, + ChmodFiles: chmodDirsFiles, + StripSetuidBit: options.StripSetuidBit, + StripSetgidBit: options.StripSetgidBit, + StripStickyBit: options.StripStickyBit, + } + getErr = copier.Get(contextDir, contextDir, getOptions, []string{glob}, writer) + closeErr = writer.Close() + if renameTarget != "" && renamedItems > 1 { + renameErr = errors.Errorf("internal error: renamed %d items when we expected to only rename 1", renamedItems) + } + wg.Done() + }() + wg.Add(1) + go func() { + if st.IsDir { + b.ContentDigester.Start("dir") + } else { + b.ContentDigester.Start("file") + } + hashCloser := b.ContentDigester.Hash() + hasher := io.Writer(hashCloser) + if options.Hasher != nil { + hasher = io.MultiWriter(hasher, options.Hasher) + } + if options.DryRun { + _, putErr = io.Copy(hasher, pipeReader) + } else { + putOptions := copier.PutOptions{ + UIDMap: destUIDMap, + GIDMap: destGIDMap, + DefaultDirOwner: chownDirs, + DefaultDirMode: nil, + ChownDirs: nil, + ChmodDirs: nil, + ChownFiles: nil, + ChmodFiles: nil, + IgnoreDevices: userns.RunningInUserNS(), + } + putErr = copier.Put(extractDirectory, extractDirectory, putOptions, io.TeeReader(pipeReader, hasher)) + } + hashCloser.Close() + pipeReader.Close() + wg.Done() + }() + wg.Wait() + if getErr != nil { + getErr = errors.Wrapf(getErr, "error reading %q", src) + } + if closeErr != nil { + closeErr = errors.Wrapf(closeErr, "error closing %q", src) + } + if renameErr != nil { + renameErr = errors.Wrapf(renameErr, "error renaming %q", src) + } + if putErr != nil { + putErr = errors.Wrapf(putErr, "error storing %q", src) + } + multiErr = multierror.Append(getErr, closeErr, renameErr, putErr) + if multiErr != nil && multiErr.ErrorOrNil() != nil { + if len(multiErr.Errors) > 1 { + return multiErr.ErrorOrNil() + } + return multiErr.Errors[0] + } + } + if itemsCopied == 0 { + excludesFile := "" + if options.IgnoreFile != "" { + excludesFile = " using " + options.IgnoreFile + } + return errors.Wrapf(syscall.ENOENT, "no items matching glob %q copied (%d filtered out%s)", localSourceStat.Glob, len(localSourceStat.Globbed), excludesFile) + } + } + return nil +} + +// userForRun returns the user (and group) information which we should use for +// running commands +func (b *Builder) userForRun(mountPoint string, userspec string) (specs.User, string, error) { + if userspec == "" { + userspec = b.User() + } + + uid, gid, homeDir, err := chrootuser.GetUser(mountPoint, userspec) + u := specs.User{ + UID: uid, + GID: gid, + Username: userspec, + } + if !strings.Contains(userspec, ":") { + groups, err2 := chrootuser.GetAdditionalGroupsForUser(mountPoint, uint64(u.UID)) + if err2 != nil { + if errors.Cause(err2) != chrootuser.ErrNoSuchUser && err == nil { + err = err2 + } + } else { + u.AdditionalGids = groups + } + + } + return u, homeDir, err +} + +// userForCopy returns the user (and group) information which we should use for +// setting ownership of contents being copied. It's just like what +// userForRun() does, except for the case where we're passed a single numeric +// value, where we need to use that value for both the UID and the GID. +func (b *Builder) userForCopy(mountPoint string, userspec string) (uint32, uint32, error) { + var ( + user, group string + uid, gid uint64 + err error + ) + + split := strings.SplitN(userspec, ":", 2) + user = split[0] + if len(split) > 1 { + group = split[1] + } + + // If userspec did not specify any values for user or group, then fail + if user == "" && group == "" { + return 0, 0, errors.Errorf("can't find uid for user %s", userspec) + } + + // If userspec specifies values for user or group, check for numeric values + // and return early. If not, then translate username/groupname + if user != "" { + uid, err = strconv.ParseUint(user, 10, 32) + } + if err == nil { + // default gid to uid + gid = uid + if group != "" { + gid, err = strconv.ParseUint(group, 10, 32) + } + } + // If err != nil, then user or group not numeric, check filesystem + if err == nil { + return uint32(uid), uint32(gid), nil + } + + owner, _, err := b.userForRun(mountPoint, userspec) + if err != nil { + return 0xffffffff, 0xffffffff, err + } + return owner.UID, owner.GID, nil +} diff --git a/vendor/github.com/containers/buildah/bind/mount.go b/vendor/github.com/containers/buildah/bind/mount.go new file mode 100644 index 00000000000..0e45d12c2c8 --- /dev/null +++ b/vendor/github.com/containers/buildah/bind/mount.go @@ -0,0 +1,299 @@ +// +build linux + +package bind + +import ( + "fmt" + "os" + "path/filepath" + "syscall" + + "github.com/containers/buildah/util" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/mount" + "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +// SetupIntermediateMountNamespace creates a new mount namespace and bind +// mounts all bind-mount sources into a subdirectory of bundlePath that can +// only be reached by the root user of the container's user namespace, except +// for Mounts which include the NoBindOption option in their options list. The +// NoBindOption will then merely be removed. +func SetupIntermediateMountNamespace(spec *specs.Spec, bundlePath string) (unmountAll func() error, err error) { + defer stripNoBindOption(spec) + + // We expect a root directory to be defined. + if spec.Root == nil { + return nil, errors.Errorf("configuration has no root filesystem?") + } + rootPath := spec.Root.Path + + // Create a new mount namespace in which to do the things we're doing. + if err := unix.Unshare(unix.CLONE_NEWNS); err != nil { + return nil, errors.Wrapf(err, "error creating new mount namespace for %v", spec.Process.Args) + } + + // Make all of our mounts private to our namespace. + if err := mount.MakeRPrivate("/"); err != nil { + return nil, errors.Wrapf(err, "error making mounts private to mount namespace for %v", spec.Process.Args) + } + + // Make sure the bundle directory is searchable. We created it with + // TempDir(), so it should have started with permissions set to 0700. + info, err := os.Stat(bundlePath) + if err != nil { + return nil, errors.Wrapf(err, "error checking permissions on %q", bundlePath) + } + if err = os.Chmod(bundlePath, info.Mode()|0111); err != nil { + return nil, errors.Wrapf(err, "error loosening permissions on %q", bundlePath) + } + + // Figure out who needs to be able to reach these bind mounts in order + // for the container to be started. + rootUID, rootGID, err := util.GetHostRootIDs(spec) + if err != nil { + return nil, err + } + + // Hand back a callback that the caller can use to clean up everything + // we're doing here. + unmount := []string{} + unmountAll = func() (err error) { + for _, mountpoint := range unmount { + // Unmount it and anything under it. + if err2 := UnmountMountpoints(mountpoint, nil); err2 != nil { + logrus.Warnf("pkg/bind: error unmounting %q: %v", mountpoint, err2) + if err == nil { + err = err2 + } + } + if err2 := unix.Unmount(mountpoint, unix.MNT_DETACH); err2 != nil { + if errno, ok := err2.(syscall.Errno); !ok || errno != syscall.EINVAL { + logrus.Warnf("pkg/bind: error detaching %q: %v", mountpoint, err2) + if err == nil { + err = err2 + } + } + } + // Remove just the mountpoint. + retry := 10 + remove := unix.Unlink + err2 := remove(mountpoint) + for err2 != nil && retry > 0 { + if errno, ok := err2.(syscall.Errno); ok { + switch errno { + default: + retry = 0 + continue + case syscall.EISDIR: + remove = unix.Rmdir + err2 = remove(mountpoint) + case syscall.EBUSY: + if err3 := unix.Unmount(mountpoint, unix.MNT_DETACH); err3 == nil { + err2 = remove(mountpoint) + } + } + retry-- + } + } + if err2 != nil { + logrus.Warnf("pkg/bind: error removing %q: %v", mountpoint, err2) + if err == nil { + err = err2 + } + } + } + return err + } + + // Create a top-level directory that the "root" user will be able to + // access, that "root" from containers which use different mappings, or + // other unprivileged users outside of containers, shouldn't be able to + // access. + mnt := filepath.Join(bundlePath, "mnt") + if err = idtools.MkdirAndChown(mnt, 0100, idtools.IDPair{UID: int(rootUID), GID: int(rootGID)}); err != nil { + return unmountAll, errors.Wrapf(err, "error creating %q owned by the container's root user", mnt) + } + + // Make that directory private, and add it to the list of locations we + // unmount at cleanup time. + if err = mount.MakeRPrivate(mnt); err != nil { + return unmountAll, errors.Wrapf(err, "error marking filesystem at %q as private", mnt) + } + unmount = append([]string{mnt}, unmount...) + + // Create a bind mount for the root filesystem and add it to the list. + rootfs := filepath.Join(mnt, "rootfs") + if err = os.Mkdir(rootfs, 0000); err != nil { + return unmountAll, errors.Wrapf(err, "error creating directory %q", rootfs) + } + if err = unix.Mount(rootPath, rootfs, "", unix.MS_BIND|unix.MS_REC|unix.MS_PRIVATE, ""); err != nil { + return unmountAll, errors.Wrapf(err, "error bind mounting root filesystem from %q to %q", rootPath, rootfs) + } + logrus.Debugf("bind mounted %q to %q", rootPath, rootfs) + unmount = append([]string{rootfs}, unmount...) + spec.Root.Path = rootfs + + // Do the same for everything we're binding in. + mounts := make([]specs.Mount, 0, len(spec.Mounts)) + for i := range spec.Mounts { + // If we're not using an intermediate, leave it in the list. + if leaveBindMountAlone(spec.Mounts[i]) { + mounts = append(mounts, spec.Mounts[i]) + continue + } + // Check if the source is a directory or something else. + info, err := os.Stat(spec.Mounts[i].Source) + if err != nil { + if os.IsNotExist(err) { + logrus.Warnf("couldn't find %q on host to bind mount into container", spec.Mounts[i].Source) + continue + } + return unmountAll, errors.Wrapf(err, "error checking if %q is a directory", spec.Mounts[i].Source) + } + stage := filepath.Join(mnt, fmt.Sprintf("buildah-bind-target-%d", i)) + if info.IsDir() { + // If the source is a directory, make one to use as the + // mount target. + if err = os.Mkdir(stage, 0000); err != nil { + return unmountAll, errors.Wrapf(err, "error creating directory %q", stage) + } + } else { + // If the source is not a directory, create an empty + // file to use as the mount target. + file, err := os.OpenFile(stage, os.O_WRONLY|os.O_CREATE, 0000) + if err != nil { + return unmountAll, errors.Wrapf(err, "error creating file %q", stage) + } + file.Close() + } + // Bind mount the source from wherever it is to a place where + // we know the runtime helper will be able to get to it... + if err = unix.Mount(spec.Mounts[i].Source, stage, "", unix.MS_BIND|unix.MS_REC|unix.MS_PRIVATE, ""); err != nil { + return unmountAll, errors.Wrapf(err, "error bind mounting bind object from %q to %q", spec.Mounts[i].Source, stage) + } + logrus.Debugf("bind mounted %q to %q", spec.Mounts[i].Source, stage) + spec.Mounts[i].Source = stage + // ... and update the source location that we'll pass to the + // runtime to our intermediate location. + mounts = append(mounts, spec.Mounts[i]) + unmount = append([]string{stage}, unmount...) + } + spec.Mounts = mounts + + return unmountAll, nil +} + +// Decide if the mount should not be redirected to an intermediate location first. +func leaveBindMountAlone(mount specs.Mount) bool { + // If we know we shouldn't do a redirection for this mount, skip it. + if util.StringInSlice(NoBindOption, mount.Options) { + return true + } + // If we're not bind mounting it in, we don't need to do anything for it. + if mount.Type != "bind" && !util.StringInSlice("bind", mount.Options) && !util.StringInSlice("rbind", mount.Options) { + return true + } + return false +} + +// UnmountMountpoints unmounts the given mountpoints and anything that's hanging +// off of them, rather aggressively. If a mountpoint also appears in the +// mountpointsToRemove slice, the mountpoints are removed after they are +// unmounted. +func UnmountMountpoints(mountpoint string, mountpointsToRemove []string) error { + mounts, err := mount.GetMounts() + if err != nil { + return errors.Wrapf(err, "error retrieving list of mounts") + } + // getChildren returns the list of mount IDs that hang off of the + // specified ID. + getChildren := func(id int) []int { + var list []int + for _, info := range mounts { + if info.Parent == id { + list = append(list, info.ID) + } + } + return list + } + // getTree returns the list of mount IDs that hang off of the specified + // ID, and off of those mount IDs, etc. + getTree := func(id int) []int { + mounts := []int{id} + i := 0 + for i < len(mounts) { + children := getChildren(mounts[i]) + mounts = append(mounts, children...) + i++ + } + return mounts + } + // getMountByID looks up the mount info with the specified ID + getMountByID := func(id int) *mount.Info { + for i := range mounts { + if mounts[i].ID == id { + return mounts[i] + } + } + return nil + } + // getMountByPoint looks up the mount info with the specified mountpoint + getMountByPoint := func(mountpoint string) *mount.Info { + for i := range mounts { + if mounts[i].Mountpoint == mountpoint { + return mounts[i] + } + } + return nil + } + // find the top of the tree we're unmounting + top := getMountByPoint(mountpoint) + if top == nil { + return errors.Wrapf(err, "%q is not mounted", mountpoint) + } + // add all of the mounts that are hanging off of it + tree := getTree(top.ID) + // unmount each mountpoint, working from the end of the list (leaf nodes) to the top + for i := range tree { + var st unix.Stat_t + id := tree[len(tree)-i-1] + mount := getMountByID(id) + // check if this mountpoint is mounted + if err := unix.Lstat(mount.Mountpoint, &st); err != nil { + if os.IsNotExist(err) { + logrus.Debugf("mountpoint %q is not present(?), skipping", mount.Mountpoint) + continue + } + return errors.Wrapf(err, "error checking if %q is mounted", mount.Mountpoint) + } + if uint64(mount.Major) != uint64(st.Dev) || uint64(mount.Minor) != uint64(st.Dev) { //nolint:unconvert // (required for some OS/arch combinations) + logrus.Debugf("%q is apparently not really mounted, skipping", mount.Mountpoint) + continue + } + // do the unmount + if err := unix.Unmount(mount.Mountpoint, 0); err != nil { + // if it was busy, detach it + if errno, ok := err.(syscall.Errno); ok && errno == syscall.EBUSY { + err = unix.Unmount(mount.Mountpoint, unix.MNT_DETACH) + } + if err != nil { + // if it was invalid (not mounted), hide the error, else return it + if errno, ok := err.(syscall.Errno); !ok || errno != syscall.EINVAL { + logrus.Warnf("error unmounting %q: %v", mount.Mountpoint, err) + continue + } + } + } + // if we're also supposed to remove this thing, do that, too + if util.StringInSlice(mount.Mountpoint, mountpointsToRemove) { + if err := os.Remove(mount.Mountpoint); err != nil { + return errors.Wrapf(err, "error removing %q", mount.Mountpoint) + } + } + } + return nil +} diff --git a/vendor/github.com/containers/buildah/bind/mount_unsupported.go b/vendor/github.com/containers/buildah/bind/mount_unsupported.go new file mode 100644 index 00000000000..88ca2ca8b8a --- /dev/null +++ b/vendor/github.com/containers/buildah/bind/mount_unsupported.go @@ -0,0 +1,13 @@ +// +build !linux + +package bind + +import ( + "github.com/opencontainers/runtime-spec/specs-go" +) + +// SetupIntermediateMountNamespace returns a no-op unmountAll() and no error. +func SetupIntermediateMountNamespace(spec *specs.Spec, bundlePath string) (unmountAll func() error, err error) { + stripNoBindOption(spec) + return func() error { return nil }, nil +} diff --git a/vendor/github.com/containers/buildah/bind/util.go b/vendor/github.com/containers/buildah/bind/util.go new file mode 100644 index 00000000000..5115368d774 --- /dev/null +++ b/vendor/github.com/containers/buildah/bind/util.go @@ -0,0 +1,27 @@ +package bind + +import ( + "github.com/containers/buildah/util" + "github.com/opencontainers/runtime-spec/specs-go" +) + +const ( + // NoBindOption is an option which, if present in a Mount structure's + // options list, will cause SetupIntermediateMountNamespace to not + // redirect it through a bind mount. + NoBindOption = "nobuildahbind" +) + +func stripNoBindOption(spec *specs.Spec) { + for i := range spec.Mounts { + if util.StringInSlice(NoBindOption, spec.Mounts[i].Options) { + prunedOptions := make([]string, 0, len(spec.Mounts[i].Options)) + for _, option := range spec.Mounts[i].Options { + if option != NoBindOption { + prunedOptions = append(prunedOptions, option) + } + } + spec.Mounts[i].Options = prunedOptions + } + } +} diff --git a/vendor/github.com/containers/buildah/btrfs_installed_tag.sh b/vendor/github.com/containers/buildah/btrfs_installed_tag.sh new file mode 100644 index 00000000000..f2f2b33c83a --- /dev/null +++ b/vendor/github.com/containers/buildah/btrfs_installed_tag.sh @@ -0,0 +1,7 @@ +#!/usr/bin/env bash +${CPP:-${CC:-cc} -E} ${CPPFLAGS} - > /dev/null 2> /dev/null << EOF +#include +EOF +if test $? -ne 0 ; then + echo exclude_graphdriver_btrfs +fi diff --git a/vendor/github.com/containers/buildah/btrfs_tag.sh b/vendor/github.com/containers/buildah/btrfs_tag.sh new file mode 100644 index 00000000000..ea753d4d02a --- /dev/null +++ b/vendor/github.com/containers/buildah/btrfs_tag.sh @@ -0,0 +1,7 @@ +#!/usr/bin/env bash +${CPP:-${CC:-cc} -E} ${CPPFLAGS} - > /dev/null 2> /dev/null << EOF +#include +EOF +if test $? -ne 0 ; then + echo btrfs_noversion +fi diff --git a/vendor/github.com/containers/buildah/buildah.go b/vendor/github.com/containers/buildah/buildah.go new file mode 100644 index 00000000000..8a850e90866 --- /dev/null +++ b/vendor/github.com/containers/buildah/buildah.go @@ -0,0 +1,526 @@ +package buildah + +import ( + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "sort" + "time" + + "github.com/containers/buildah/define" + "github.com/containers/buildah/docker" + nettypes "github.com/containers/common/libnetwork/types" + "github.com/containers/image/v5/types" + encconfig "github.com/containers/ocicrypt/config" + "github.com/containers/storage" + "github.com/containers/storage/pkg/ioutils" + v1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const ( + // Package is the name of this package, used in help output and to + // identify working containers. + Package = define.Package + // Version for the Package. Bump version in contrib/rpm/buildah.spec + // too. + Version = define.Version + // The value we use to identify what type of information, currently a + // serialized Builder structure, we are using as per-container state. + // This should only be changed when we make incompatible changes to + // that data structure, as it's used to distinguish containers which + // are "ours" from ones that aren't. + containerType = Package + " 0.0.1" + // The file in the per-container directory which we use to store our + // per-container state. If it isn't there, then the container isn't + // one of our build containers. + stateFile = Package + ".json" +) + +// PullPolicy takes the value PullIfMissing, PullAlways, PullIfNewer, or PullNever. +type PullPolicy = define.PullPolicy + +const ( + // PullIfMissing is one of the values that BuilderOptions.PullPolicy + // can take, signalling that the source image should be pulled from a + // registry if a local copy of it is not already present. + PullIfMissing = define.PullIfMissing + // PullAlways is one of the values that BuilderOptions.PullPolicy can + // take, signalling that a fresh, possibly updated, copy of the image + // should be pulled from a registry before the build proceeds. + PullAlways = define.PullAlways + // PullIfNewer is one of the values that BuilderOptions.PullPolicy + // can take, signalling that the source image should only be pulled + // from a registry if a local copy is not already present or if a + // newer version the image is present on the repository. + PullIfNewer = define.PullIfNewer + // PullNever is one of the values that BuilderOptions.PullPolicy can + // take, signalling that the source image should not be pulled from a + // registry if a local copy of it is not already present. + PullNever = define.PullNever +) + +// NetworkConfigurationPolicy takes the value NetworkDefault, NetworkDisabled, +// or NetworkEnabled. +type NetworkConfigurationPolicy = define.NetworkConfigurationPolicy + +const ( + // NetworkDefault is one of the values that BuilderOptions.ConfigureNetwork + // can take, signalling that the default behavior should be used. + NetworkDefault = define.NetworkDefault + // NetworkDisabled is one of the values that BuilderOptions.ConfigureNetwork + // can take, signalling that network interfaces should NOT be configured for + // newly-created network namespaces. + NetworkDisabled = define.NetworkDisabled + // NetworkEnabled is one of the values that BuilderOptions.ConfigureNetwork + // can take, signalling that network interfaces should be configured for + // newly-created network namespaces. + NetworkEnabled = define.NetworkEnabled +) + +// Builder objects are used to represent containers which are being used to +// build images. They also carry potential updates which will be applied to +// the image's configuration when the container's contents are used to build an +// image. +type Builder struct { + store storage.Store + + // Logger is the logrus logger to write log messages with + Logger *logrus.Logger `json:"-"` + + // Args define variables that users can pass at build-time to the builder + Args map[string]string + // Type is used to help identify a build container's metadata. It + // should not be modified. + Type string `json:"type"` + // FromImage is the name of the source image which was used to create + // the container, if one was used. It should not be modified. + FromImage string `json:"image,omitempty"` + // FromImageID is the ID of the source image which was used to create + // the container, if one was used. It should not be modified. + FromImageID string `json:"image-id"` + // FromImageDigest is the digest of the source image which was used to + // create the container, if one was used. It should not be modified. + FromImageDigest string `json:"image-digest"` + // Config is the source image's configuration. It should not be + // modified. + Config []byte `json:"config,omitempty"` + // Manifest is the source image's manifest. It should not be modified. + Manifest []byte `json:"manifest,omitempty"` + + // Container is the name of the build container. It should not be modified. + Container string `json:"container-name,omitempty"` + // ContainerID is the ID of the build container. It should not be modified. + ContainerID string `json:"container-id,omitempty"` + // MountPoint is the last location where the container's root + // filesystem was mounted. It should not be modified. + MountPoint string `json:"mountpoint,omitempty"` + // ProcessLabel is the SELinux process label associated with the container + ProcessLabel string `json:"process-label,omitempty"` + // MountLabel is the SELinux mount label associated with the container + MountLabel string `json:"mount-label,omitempty"` + + // ImageAnnotations is a set of key-value pairs which is stored in the + // image's manifest. + ImageAnnotations map[string]string `json:"annotations,omitempty"` + // ImageCreatedBy is a description of how this container was built. + ImageCreatedBy string `json:"created-by,omitempty"` + // ImageHistoryComment is a description of how our added layers were built. + ImageHistoryComment string `json:"history-comment,omitempty"` + + // Image metadata and runtime settings, in multiple formats. + OCIv1 v1.Image `json:"ociv1,omitempty"` + Docker docker.V2Image `json:"docker,omitempty"` + // DefaultMountsFilePath is the file path holding the mounts to be mounted in "host-path:container-path" format. + DefaultMountsFilePath string `json:"defaultMountsFilePath,omitempty"` + + // Isolation controls how we handle "RUN" statements and the Run() method. + Isolation define.Isolation + // NamespaceOptions controls how we set up the namespaces for processes that we run in the container. + NamespaceOptions define.NamespaceOptions + // ConfigureNetwork controls whether or not network interfaces and + // routing are configured for a new network namespace (i.e., when not + // joining another's namespace and not just using the host's + // namespace), effectively deciding whether or not the process has a + // usable network. + ConfigureNetwork define.NetworkConfigurationPolicy + // CNIPluginPath is the location of CNI plugin helpers, if they should be + // run from a location other than the default location. + CNIPluginPath string + // CNIConfigDir is the location of CNI configuration files, if the files in + // the default configuration directory shouldn't be used. + CNIConfigDir string + + // NetworkInterface is the libnetwork network interface used to setup CNI or netavark networks. + NetworkInterface nettypes.ContainerNetwork `json:"-"` + + // ID mapping options to use when running processes in the container with non-host user namespaces. + IDMappingOptions define.IDMappingOptions + // Capabilities is a list of capabilities to use when running commands in the container. + Capabilities []string + // PrependedEmptyLayers are history entries that we'll add to a + // committed image, after any history items that we inherit from a base + // image, but before the history item for the layer that we're + // committing. + PrependedEmptyLayers []v1.History + // AppendedEmptyLayers are history entries that we'll add to a + // committed image after the history item for the layer that we're + // committing. + AppendedEmptyLayers []v1.History + CommonBuildOpts *define.CommonBuildOptions + // TopLayer is the top layer of the image + TopLayer string + // Format for the build Image + Format string + // TempVolumes are temporary mount points created during container runs + TempVolumes map[string]bool + // ContentDigester counts the digest of all Add()ed content + ContentDigester CompositeDigester + // Devices are the additional devices to add to the containers + Devices define.ContainerDevices +} + +// BuilderInfo are used as objects to display container information +type BuilderInfo struct { + Type string + FromImage string + FromImageID string + FromImageDigest string + Config string + Manifest string + Container string + ContainerID string + MountPoint string + ProcessLabel string + MountLabel string + ImageAnnotations map[string]string + ImageCreatedBy string + OCIv1 v1.Image + Docker docker.V2Image + DefaultMountsFilePath string + Isolation string + NamespaceOptions define.NamespaceOptions + Capabilities []string + ConfigureNetwork string + CNIPluginPath string + CNIConfigDir string + IDMappingOptions define.IDMappingOptions + History []v1.History + Devices define.ContainerDevices +} + +// GetBuildInfo gets a pointer to a Builder object and returns a BuilderInfo object from it. +// This is used in the inspect command to display Manifest and Config as string and not []byte. +func GetBuildInfo(b *Builder) BuilderInfo { + history := copyHistory(b.OCIv1.History) + history = append(history, copyHistory(b.PrependedEmptyLayers)...) + history = append(history, copyHistory(b.AppendedEmptyLayers)...) + sort.Strings(b.Capabilities) + return BuilderInfo{ + Type: b.Type, + FromImage: b.FromImage, + FromImageID: b.FromImageID, + FromImageDigest: b.FromImageDigest, + Config: string(b.Config), + Manifest: string(b.Manifest), + Container: b.Container, + ContainerID: b.ContainerID, + MountPoint: b.MountPoint, + ProcessLabel: b.ProcessLabel, + MountLabel: b.MountLabel, + ImageAnnotations: b.ImageAnnotations, + ImageCreatedBy: b.ImageCreatedBy, + OCIv1: b.OCIv1, + Docker: b.Docker, + DefaultMountsFilePath: b.DefaultMountsFilePath, + Isolation: b.Isolation.String(), + NamespaceOptions: b.NamespaceOptions, + ConfigureNetwork: fmt.Sprintf("%v", b.ConfigureNetwork), + CNIPluginPath: b.CNIPluginPath, + CNIConfigDir: b.CNIConfigDir, + IDMappingOptions: b.IDMappingOptions, + Capabilities: b.Capabilities, + History: history, + Devices: b.Devices, + } +} + +// CommonBuildOptions are resources that can be defined by flags for both buildah from and build +type CommonBuildOptions = define.CommonBuildOptions + +// BuilderOptions are used to initialize a new Builder. +type BuilderOptions struct { + // Args define variables that users can pass at build-time to the builder + Args map[string]string + // FromImage is the name of the image which should be used as the + // starting point for the container. It can be set to an empty value + // or "scratch" to indicate that the container should not be based on + // an image. + FromImage string + // ContainerSuffix is the suffix to add for generated container names + ContainerSuffix string + // Container is a desired name for the build container. + Container string + // PullPolicy decides whether or not we should pull the image that + // we're using as a base image. It should be PullIfMissing, + // PullAlways, or PullNever. + PullPolicy define.PullPolicy + // Registry is a value which is prepended to the image's name, if it + // needs to be pulled and the image name alone can not be resolved to a + // reference to a source image. No separator is implicitly added. + Registry string + // BlobDirectory is the name of a directory in which we'll attempt + // to store copies of layer blobs that we pull down, if any. It should + // already exist. + BlobDirectory string + // Logger is the logrus logger to write log messages with + Logger *logrus.Logger `json:"-"` + // Mount signals to NewBuilder() that the container should be mounted + // immediately. + Mount bool + // SignaturePolicyPath specifies an override location for the signature + // policy which should be used for verifying the new image as it is + // being written. Except in specific circumstances, no value should be + // specified, indicating that the shared, system-wide default policy + // should be used. + SignaturePolicyPath string + // ReportWriter is an io.Writer which will be used to log the reading + // of the source image from a registry, if we end up pulling the image. + ReportWriter io.Writer + // github.com/containers/image/types SystemContext to hold credentials + // and other authentication/authorization information. + SystemContext *types.SystemContext + // DefaultMountsFilePath is the file path holding the mounts to be + // mounted in "host-path:container-path" format + DefaultMountsFilePath string + // Isolation controls how we handle "RUN" statements and the Run() + // method. + Isolation define.Isolation + // NamespaceOptions controls how we set up namespaces for processes that + // we might need to run using the container's root filesystem. + NamespaceOptions define.NamespaceOptions + // ConfigureNetwork controls whether or not network interfaces and + // routing are configured for a new network namespace (i.e., when not + // joining another's namespace and not just using the host's + // namespace), effectively deciding whether or not the process has a + // usable network. + ConfigureNetwork define.NetworkConfigurationPolicy + // CNIPluginPath is the location of CNI plugin helpers, if they should be + // run from a location other than the default location. + CNIPluginPath string + // CNIConfigDir is the location of CNI configuration files, if the files in + // the default configuration directory shouldn't be used. + CNIConfigDir string + + // NetworkInterface is the libnetwork network interface used to setup CNI or netavark networks. + NetworkInterface nettypes.ContainerNetwork `json:"-"` + + // ID mapping options to use if we're setting up our own user namespace. + IDMappingOptions *define.IDMappingOptions + // Capabilities is a list of capabilities to use when + // running commands in the container. + Capabilities []string + CommonBuildOpts *define.CommonBuildOptions + // Format for the container image + Format string + // Devices are the additional devices to add to the containers + Devices define.ContainerDevices + //DefaultEnv for containers + DefaultEnv []string + // MaxPullRetries is the maximum number of attempts we'll make to pull + // any one image from the external registry if the first attempt fails. + MaxPullRetries int + // PullRetryDelay is how long to wait before retrying a pull attempt. + PullRetryDelay time.Duration + // OciDecryptConfig contains the config that can be used to decrypt an image if it is + // encrypted if non-nil. If nil, it does not attempt to decrypt an image. + OciDecryptConfig *encconfig.DecryptConfig + // ProcessLabel is the SELinux process label associated with the container + ProcessLabel string + // MountLabel is the SELinux mount label associated with the container + MountLabel string +} + +// ImportOptions are used to initialize a Builder from an existing container +// which was created elsewhere. +type ImportOptions struct { + // Container is the name of the build container. + Container string + // SignaturePolicyPath specifies an override location for the signature + // policy which should be used for verifying the new image as it is + // being written. Except in specific circumstances, no value should be + // specified, indicating that the shared, system-wide default policy + // should be used. + SignaturePolicyPath string +} + +// ImportFromImageOptions are used to initialize a Builder from an image. +type ImportFromImageOptions struct { + // Image is the name or ID of the image we'd like to examine. + Image string + // SignaturePolicyPath specifies an override location for the signature + // policy which should be used for verifying the new image as it is + // being written. Except in specific circumstances, no value should be + // specified, indicating that the shared, system-wide default policy + // should be used. + SignaturePolicyPath string + // github.com/containers/image/types SystemContext to hold information + // about which registries we should check for completing image names + // that don't include a domain portion. + SystemContext *types.SystemContext +} + +// NewBuilder creates a new build container. +func NewBuilder(ctx context.Context, store storage.Store, options BuilderOptions) (*Builder, error) { + if options.CommonBuildOpts == nil { + options.CommonBuildOpts = &CommonBuildOptions{} + } + return newBuilder(ctx, store, options) +} + +// ImportBuilder creates a new build configuration using an already-present +// container. +func ImportBuilder(ctx context.Context, store storage.Store, options ImportOptions) (*Builder, error) { + return importBuilder(ctx, store, options) +} + +// ImportBuilderFromImage creates a new builder configuration using an image. +// The returned object can be modified and examined, but it can not be saved +// or committed because it is not associated with a working container. +func ImportBuilderFromImage(ctx context.Context, store storage.Store, options ImportFromImageOptions) (*Builder, error) { + return importBuilderFromImage(ctx, store, options) +} + +// OpenBuilder loads information about a build container given its name or ID. +func OpenBuilder(store storage.Store, container string) (*Builder, error) { + cdir, err := store.ContainerDirectory(container) + if err != nil { + return nil, err + } + buildstate, err := ioutil.ReadFile(filepath.Join(cdir, stateFile)) + if err != nil { + return nil, err + } + b := &Builder{} + if err = json.Unmarshal(buildstate, &b); err != nil { + return nil, errors.Wrapf(err, "error parsing %q, read from %q", string(buildstate), filepath.Join(cdir, stateFile)) + } + if b.Type != containerType { + return nil, errors.Errorf("container %q is not a %s container (is a %q container)", container, define.Package, b.Type) + } + + netInt, err := getNetworkInterface(store, b.CNIConfigDir, b.CNIPluginPath) + if err != nil { + return nil, err + } + b.NetworkInterface = netInt + b.store = store + b.fixupConfig(nil) + b.setupLogger() + return b, nil +} + +// OpenBuilderByPath loads information about a build container given a +// path to the container's root filesystem +func OpenBuilderByPath(store storage.Store, path string) (*Builder, error) { + containers, err := store.Containers() + if err != nil { + return nil, err + } + abs, err := filepath.Abs(path) + if err != nil { + return nil, err + } + builderMatchesPath := func(b *Builder, path string) bool { + return (b.MountPoint == path) + } + for _, container := range containers { + cdir, err := store.ContainerDirectory(container.ID) + if err != nil { + return nil, err + } + buildstate, err := ioutil.ReadFile(filepath.Join(cdir, stateFile)) + if err != nil { + if os.IsNotExist(err) { + logrus.Debugf("error reading %q: %v, ignoring container %q", filepath.Join(cdir, stateFile), err, container.ID) + continue + } + return nil, err + } + b := &Builder{} + err = json.Unmarshal(buildstate, &b) + if err == nil && b.Type == containerType && builderMatchesPath(b, abs) { + b.store = store + b.fixupConfig(nil) + b.setupLogger() + return b, nil + } + if err != nil { + logrus.Debugf("error parsing %q, read from %q: %v", string(buildstate), filepath.Join(cdir, stateFile), err) + } else if b.Type != containerType { + logrus.Debugf("container %q is not a %s container (is a %q container)", container.ID, define.Package, b.Type) + } + } + return nil, storage.ErrContainerUnknown +} + +// OpenAllBuilders loads all containers which have a state file that we use in +// their data directory, typically so that they can be listed. +func OpenAllBuilders(store storage.Store) (builders []*Builder, err error) { + containers, err := store.Containers() + if err != nil { + return nil, err + } + for _, container := range containers { + cdir, err := store.ContainerDirectory(container.ID) + if err != nil { + return nil, err + } + buildstate, err := ioutil.ReadFile(filepath.Join(cdir, stateFile)) + if err != nil { + if os.IsNotExist(err) { + logrus.Debugf("error reading %q: %v, ignoring container %q", filepath.Join(cdir, stateFile), err, container.ID) + continue + } + return nil, err + } + b := &Builder{} + err = json.Unmarshal(buildstate, &b) + if err == nil && b.Type == containerType { + b.store = store + b.setupLogger() + b.fixupConfig(nil) + builders = append(builders, b) + continue + } + if err != nil { + logrus.Debugf("error parsing %q, read from %q: %v", string(buildstate), filepath.Join(cdir, stateFile), err) + } else if b.Type != containerType { + logrus.Debugf("container %q is not a %s container (is a %q container)", container.ID, define.Package, b.Type) + } + } + return builders, nil +} + +// Save saves the builder's current state to the build container's metadata. +// This should not need to be called directly, as other methods of the Builder +// object take care of saving their state. +func (b *Builder) Save() error { + buildstate, err := json.Marshal(b) + if err != nil { + return err + } + cdir, err := b.store.ContainerDirectory(b.ContainerID) + if err != nil { + return err + } + if err = ioutils.AtomicWriteFile(filepath.Join(cdir, stateFile), buildstate, 0600); err != nil { + return errors.Wrapf(err, "error saving builder state to %q", filepath.Join(cdir, stateFile)) + } + return nil +} diff --git a/vendor/github.com/containers/buildah/changelog.txt b/vendor/github.com/containers/buildah/changelog.txt new file mode 100644 index 00000000000..909e4a9e79f --- /dev/null +++ b/vendor/github.com/containers/buildah/changelog.txt @@ -0,0 +1,2300 @@ +- Changelog for v1.24.5 (2022-07-14) + * Bump github.com/containers/storage to v1.38.5 + * drop commas from changelog dates because `rpmspec -q` doesn't like them + +- Changelog for v1.24.4 (2022-05-11) + * vendor: bump c/image to v5.19.3 + * vendor: bump c/ocicrypt to v1.1.4 + * vendor: bump golang.org/x/crypto to 7b82a4e + * Resolves GHSA-8c26-wmh5-6g9v - CVE-2022-27191 + +- Changelog for v1.24.3 (2022-03-01) + * vendor: bump c/common to 0.47.5 + * Add a test for CVE-2022-27651 + * do not set the inheritable capabilities + * Bump github.com/prometheus/client_golang to v1.11.1 + * vendor: bump c/image to v5.19.2 + * vendor: bump c/storage to v1.38.3 + +- Changelog for v1.24.2 (2022-02-16) + * Increase subuid/subgid to 65535 + * history: only add proxy vars to history if specified + * run_linux: use --systemd-cgroup + * buildah: new global option --cgroup-manager + * Makefile: build with systemd when available + * build(deps): bump github.com/fsouza/go-dockerclient from 1.7.7 to 1.7.8 + * Bump c/common to v0.47.4 + * Cirrus: Use updated VM images + * conformance: add a few "replace-directory-with-symlink" tests + * Bump back to v1.25.0-dev + +- Changelog for v1.24.1 (2022-02-03) + * executor: Add support for inline --platform within Dockerfile + * caps: fix buildah run --cap-add=all + * Update vendor of openshift/imagebuilder + * Bump version of containers/image and containers/common + * Update vendor of containers/common + * System tests: fix accidental vandalism of source dir + * build(deps): bump github.com/containers/storage from 1.38.1 to 1.38.2 + * imagebuildah.BuildDockerfiles(): create the jobs semaphore + * build(deps): bump github.com/onsi/gomega from 1.18.0 to 1.18.1 + * overlay: always honor mountProgram + * overlay: move mount program invocation to separate function + * overlay: move mount program lookup to separate function + * Bump to v1.25.0-dev [NO TESTS NEEDED] + +- Changelog for v1.24.0 (2022-01-26) + * Update vendor of containers/common + * build(deps): bump github.com/golangci/golangci-lint in /tests/tools + * Github-workflow: Report both failures and errors. + * build(deps): bump github.com/containers/image/v5 from 5.18.0 to 5.19.0 + * Update docs/buildah-build.1.md + * [CI:DOCS] Fix typos and improve language + * buildah bud --network add support for custom networks + * Make pull commands be consistent + * docs/buildah-build.1.md: don't imply that -v isn't just a RUN thing + * build(deps): bump github.com/onsi/gomega from 1.17.0 to 1.18.0 + * Vendor in latest containers/image + * Run codespell on code + * .github/dependabot.yml: add tests/tools go.mod + * CI: rm git-validation, add GHA job to validate PRs + * tests/tools: bump go-md2man to v2.0.1 + * tests/tools/Makefile: simplify + * tests/tools: bump onsi/ginkgo to v1.16.5 + * vendor: bump c/common and others + * mount: add support for custom upper and workdir with overlay mounts + * linux: fix lookup for runtime + * overlay: add MountWithOptions to API which extends support for advanced overlay + * Allow processing of SystemContext from FlagSet + * .golangci.yml: enable unparam linter + * util/resolveName: rm bool return + * tests/tools: bump golangci-lint + * .gitignore: fixups + * all: fix capabilities.NewPid deprecation warnings + * bind/mount.go: fix linter comment + * all: fix gosimple warning S1039 + * tests/e2e/buildah_suite_test.go: fix gosimple warnings + * imagebuildah/executor.go: fix gosimple warning + * util.go: fix gosimple warning + * build(deps): bump github.com/opencontainers/runc from 1.0.3 to 1.1.0 + * Enable git-daemon tests + * Allow processing of id options from FlagSet + * Cirrus: Re-order tasks for more parallelism + * Cirrus: Freshen VM images + * Fix platform handling for empty os/arch values + * Allow processing of network options from FlagSet + * Fix permissions on secrets directory + * Update containers/image and containers/common + * bud.bats: use a local git daemon for the git protocol test + * Allow processing of common options from FlagSet + * Cirrus: Run int. tests in parallel with unit + * vendor c/common + * Fix default CNI paths + * build(deps): bump github.com/fsouza/go-dockerclient from 1.7.6 to 1.7.7 + * multi-stage: enable mounting stages across each other with selinux enabled + * executor: Share selinux label of first stage with other stages in a build + * buildkit: add from field to bind and cache mounts so images can be used as source + * Use config.ProxyEnv from containers/common + * use libnetwork from c/common for networking + * setup the netns in the buildah parent process + * build(deps): bump github.com/containerd/containerd from 1.5.8 to 1.5.9 + * build(deps): bump github.com/fsouza/go-dockerclient from 1.7.4 to 1.7.6 + * build: fix libsubid test + * Allow callers to replace the ContainerSuffix + * parse: allow parsing anomaly non-human value for memory control group + * .cirrus: remove static_build from ci + * stage_executor: re-use all possible layers from cache for squashed builds + * build(deps): bump github.com/spf13/cobra from 1.2.1 to 1.3.0 + * Allow rootless buildah to set resource limits on cgroup V2 + * build(deps): bump github.com/docker/docker + * tests: move buildkit mount tests files from TESTSDIR to TESTDIR before modification + * build(deps): bump github.com/opencontainers/runc from 1.0.2 to 1.0.3 + * Wire logger through to config + * copier.Put: check for is-not-a-directory using lstat, not stat + * Turn on rootless cgroupv2 tests + * Grab all of the containers.conf settings for namespaces. + * image: set MediaType in OCI manifests + * copier: RemoveAll possibly-directories + * Simple README fix + * images: accept multiple filter with logical AND + * build(deps): bump github.com/containernetworking/cni from 0.8.1 to 1.0.1 + * UPdate vendor of container/storage + * build(deps): bump github.com/onsi/gomega from 1.16.0 to 1.17.0 + * build(deps): bump github.com/containers/image/v5 from 5.16.1 to 5.17.0 + * Make LocalIP public function so Podman can use it + * Fix UnsetEnv for buildah bud + * Tests should rely only on static/unchanging images + * run: ensure that stdio pipes are labeled correctly + * build(deps): bump github.com/docker/docker + * Cirrus: Bump up to Fedora 35 & Ubuntu 21.10 + * chroot: don't use the generate default seccomp filter for unit tests + * build(deps): bump github.com/containerd/containerd from 1.5.7 to 1.5.8 + * ssh-agent: Increase timeout before we explicitly close connection + * docs/tutorials: update + * Clarify that manifest defaults to localhost as the registry name + * "config": remove a stray bit of debug output + * "commit": fix a flag typo + * Fix an error message: unlocking vs locking + * Expand the godoc for CommonBuildOptions.Secrets + * chroot: accept an "rw" option + * Add --unsetenv option to buildah commit and build + * define.TempDirForURL(): show CombinedOutput when a command fails + * config: support the variant field + * rootless: do not bind mount /sys if not needed + * Fix tutorial to specify command on buildah run line + * build: history should not contain ARG values + * docs: Use guaranteed path for go-md2man + * run: honor --network=none from builder if nothing specified + * networkpolicy: Should be enabled instead of default when explictly set + * Add support for env var secret sources + * build(deps): bump github.com/docker/docker + * fix: another non-portable shebang + * Rootless containers users should use additional groups + * Support overlayfs path contains colon + * Report ignorefile location when no content added + * Add support for host.containers.internal in the /etc/hosts + * build(deps): bump github.com/onsi/ginkgo from 1.16.4 to 1.16.5 + * imagebuildah: fix nil deref + * buildkit: add support for mount=type=cache + * Default secret mode to 400 + * [CI:DOCS] Include manifest example usage + * docs: update buildah-from, buildah-pull 'platform' option compatibility notes + * docs: update buildah-build 'platform' option compatibility notes + * De-dockerize the man page as much as possible + * [CI:DOCS] Touch up Containerfile man page to show ARG can be 1st + * docs: Fix and Update Containerfile man page with supported mount types + * mount: add tmpcopyup to tmpfs mount option + * buildkit: Add support for --mount=type=tmpfs + * build(deps): bump github.com/opencontainers/selinux from 1.8.5 to 1.9.1 + * Fix command doc links in README.md + * build(deps): bump github.com/containers/image/v5 from 5.16.0 to 5.16.1 + * build: Add support for buildkit like --mount=type=bind + * Bump containerd to v1.5.7 + * build(deps): bump github.com/docker/docker + * tests: stop pulling php, composer + * Fix .containerignore link file + * Cirrus: Fix defunct package metadata breaking cache + * build(deps): bump github.com/containers/storage from 1.36.0 to 1.37.0 + * buildah build: add --all-platforms + * Add man page for Containerfile and .containerignore + * Plumb the remote logger throughut Buildah + * Replace fmt.Sprintf("%d", x) with strconv.Itoa(x) + * Run: Cleanup run directory after every RUN step + * build(deps): bump github.com/containers/common from 0.45.0 to 0.46.0 + * Makefile: adjust -ldflags/-gcflags/-gccgoflags depending on the go implementation + * Makefile: check for `-race` using `-mod=vendor` + * imagebuildah: fix an attempt to write to a nil map + * push: support to specify the compression format + * conformance: allow test cases to specify dockerUseBuildKit + * build(deps): bump github.com/containers/common from 0.44.1 to 0.45.0 + * build(deps): bump github.com/containers/common from 0.44.0 to 0.44.1 + * unmarshalConvertedConfig(): handle zstd compression + * tests/copy/copy: wire up compression options + * Update to github.com/vbauerster/mpb v7.1.5 + * Add flouthoc to OWNERS + * build: Add additional step nodes when labels are modified + * Makefile: turn on race detection whenever it's available + * conformance: add more tests for exclusion short-circuiting + * Update VM Images + Drop prior-ubuntu testing + * Bump to v1.24.0-dev + +- Changelog for v1.23.0 (2021-09-13) + * Vendor in containers/common v0.44.0 + * build(deps): bump github.com/containers/storage from 1.35.0 to 1.36.0 + * Update 05-openshift-rootless-build.md + * build(deps): bump github.com/opencontainers/selinux from 1.8.4 to 1.8.5 + * .cirrus.yml: run cross_build_task on Big Sur + * Makefile: update cross targets + * Add support for rootless overlay mounts + * Cirrus: Increase unit-test timeout + * Docs: Clarify rmi w/ manifest/index use + * build: mirror --authfile to filesystem if pointing to FD instead of file + * Fix build with .git url with branch + * manifest: rm should remove only manifests not referenced images. + * vendor: bump c/common to v0.43.3-0.20210902095222-a7acc160fb25 + * Avoid rehashing and noop compression writer + * corrected man page section; .conf file to mention its man page + * copy: add --max-parallel-downloads to tune that copy option + * copier.Get(): try to avoid descending into directories + * tag: Support tagging manifest list instead of resolving to images + * Install new manpages to correct sections + * conformance: tighten up exception specifications + * Add support for libsubid + * Add epoch time field to buildah images + * Fix ownership of /home/build/.local/share/containers + * build(deps): bump github.com/containers/image/v5 from 5.15.2 to 5.16.0 + * Rename bud to build, while keeping an alias for to bud. + * Replace golang.org/x/crypto/ssh/terminal with golang.org/x/term + * build(deps): bump github.com/opencontainers/runc from 1.0.1 to 1.0.2 + * build(deps): bump github.com/onsi/gomega from 1.15.0 to 1.16.0 + * build(deps): bump github.com/fsouza/go-dockerclient from 1.7.3 to 1.7.4 + * build(deps): bump github.com/containers/common from 0.43.1 to 0.43.2 + * Move DiscoverContainerfile to pkg/util directory + * build(deps): bump github.com/containers/image/v5 from 5.15.1 to 5.15.2 + * Remove some references to Docker + * build(deps): bump github.com/containers/image/v5 from 5.15.0 to 5.15.1 + * imagebuildah: handle --manifest directly + * build(deps): bump github.com/containers/common from 0.42.1 to 0.43.1 + * build(deps): bump github.com/opencontainers/selinux from 1.8.3 to 1.8.4 + * executor: make sure imageMap is updated with terminatedStage + * tests/serve/serve.go: use a kernel-assigned port + * Bump go for vendor-in-container from 1.13 to 1.16 + * imagebuildah: move multiple-platform building internal + * Adds GenerateStructure helper function to support rootfs-overlay. + * Run codespell to fix spelling + * Implement SSH RUN mount + * build(deps): bump github.com/onsi/gomega from 1.14.0 to 1.15.0 + * Fix resolv.conf content with run --net=private + * run: fix nil deref using the option's logger + * build(deps): bump github.com/containerd/containerd from 1.5.1 to 1.5.5 + * make vendor-in-container + * bud: teach --platform to take a list + * set base-image annotations + * build(deps): bump github.com/opencontainers/selinux from 1.8.2 to 1.8.3 + * [CI:DOCS] Fix CHANGELOG.md + * Bump to v1.23.0-dev [NO TESTS NEEDED] + * Accept repositories on login/logout + +- Changelog for v1.22.0 (2021-08-02) + * c/image, c/storage, c/common vendor before Podman 3.3 release + * WIP: tests: new assert() + * Proposed patch for 3399 (shadowutils) + * Fix handling of --restore shadow-utils + * build(deps): bump github.com/containers/image/v5 from 5.13.2 to 5.14.0 + * runtime-flag (debug) test: handle old & new runc + * build(deps): bump github.com/containers/storage from 1.32.6 to 1.33.0 + * Allow dst and destination for target in secret mounts + * Multi-arch: Always push updated version-tagged img + * Add a few tests on cgroups V2 + * imagebuildah.stageExecutor.prepare(): remove pseudonym check + * refine dangling filter + * Chown with environment variables not set should fail + * Just restore protections of shadow-utils + * build(deps): bump github.com/opencontainers/runc from 1.0.0 to 1.0.1 + * Remove specific kernel version number requirement from install.md + * Multi-arch image workflow: Make steps generic + * chroot: fix environment value leakage to intermediate processes + * Update nix pin with `make nixpkgs` + * buildah source - create and manage source images + * Update cirrus-cron notification GH workflow + * Reuse code from containers/common/pkg/parse + * Cirrus: Freshen VM images + * build(deps): bump github.com/containers/storage from 1.32.5 to 1.32.6 + * Fix excludes exception begining with / or ./ + * Fix syntax for --manifest example + * build(deps): bump github.com/onsi/gomega from 1.13.0 to 1.14.0 + * vendor containers/common@main + * Cirrus: Drop dependence on fedora-minimal + * Adjust conformance-test error-message regex + * Workaround appearance of differing debug messages + * Cirrus: Install docker from package cache + * build(deps): bump github.com/containers/ocicrypt from 1.1.1 to 1.1.2 + * Switch rusagelogfile to use options.Out + * build(deps): bump github.com/containers/storage from 1.32.4 to 1.32.5 + * Turn stdio back to blocking when command finishes + * Add support for default network creation + * Cirrus: Updates for master->main rename + * Change references from master to main + * Add `--env` and `--workingdir` flags to run command + * build(deps): bump github.com/opencontainers/runc + * [CI:DOCS] buildah bud: spelling --ignore-file requires parameter + * [CI:DOCS] push/pull: clarify supported transports + * Remove unused function arguments + * Create mountOptions for mount command flags + * Extract version command implementation to function + * Add --json flags to `mount` and `version` commands + * build(deps): bump github.com/containers/storage from 1.32.2 to 1.32.3 + * build(deps): bump github.com/containers/common from 0.40.0 to 0.40.1 + * copier.Put(): set xattrs after ownership + * buildah add/copy: spelling + * build(deps): bump github.com/containers/common from 0.39.0 to 0.40.0 + * buildah copy and buildah add should support .containerignore + * Remove unused util.StartsWithValidTransport + * Fix documentation of the --format option of buildah push + * Don't use alltransports.ParseImageName with known transports + * build(deps): bump github.com/containers/image/v5 from 5.13.0 to 5.13.1 + * man pages: clarify `rmi` removes dangling parents + * tests: make it easer to override the location of the copy helper + * build(deps): bump github.com/containers/image/v5 from 5.12.0 to 5.13.0 + * [CI:DOCS] Fix links to c/image master branch + * imagebuildah: use the specified logger for logging preprocessing warnings + * Fix copy into workdir for a single file + * Fix docs links due to branch rename + * Update nix pin with `make nixpkgs` + * build(deps): bump github.com/fsouza/go-dockerclient from 1.7.2 to 1.7.3 + * build(deps): bump github.com/opencontainers/selinux from 1.8.1 to 1.8.2 + * build(deps): bump go.etcd.io/bbolt from 1.3.5 to 1.3.6 + * build(deps): bump github.com/containers/storage from 1.32.1 to 1.32.2 + * build(deps): bump github.com/mattn/go-shellwords from 1.0.11 to 1.0.12 + * build(deps): bump github.com/onsi/ginkgo from 1.16.3 to 1.16.4 + * fix(docs): typo + * Move to v1.22.0-dev + * Fix handling of auth.json file while in a user namespace + * Add rusage-logfile flag to optionally send rusage to a file + * imagebuildah: redo step logging + * build(deps): bump github.com/onsi/ginkgo from 1.16.2 to 1.16.3 + * build(deps): bump github.com/containers/storage from 1.32.0 to 1.32.1 + * Add volumes to make running buildah within a container easier + * build(deps): bump github.com/onsi/gomega from 1.12.0 to 1.13.0 + * Add and use a "copy" helper instead of podman load/save + * Bump github.com/containers/common from 0.38.4 to 0.39.0 + * containerImageRef/containerImageSource: don't buffer uncompressed layers + * containerImageRef(): squashed images have no parent images + * Sync. workflow across skopeo, buildah, and podman + * Bump github.com/containers/storage from 1.31.1 to 1.31.2 + * Bump github.com/opencontainers/runc from 1.0.0-rc94 to 1.0.0-rc95 + * Bump to v1.21.1-dev [NO TESTS NEEDED] + +- Changelog for v1.21.0 (2021-05-19) + * Don't blow up if cpp detects errors + * Vendor in containers/common v0.38.4 + * Remove 'buildah run --security-opt' from completion + * update c/common + * Fix handling of --default-mounts-file + * update vendor of containers/storage v1.31.1 + * Bump github.com/containers/storage from 1.30.3 to 1.31.0 + * Send logrus messages back to caller when building + * github: Fix bad repo. ref in workflow config + * Check earlier for bad image tags name + * buildah bud: fix containers/podman/issues/10307 + * Bump github.com/containers/storage from 1.30.1 to 1.30.3 + * Cirrus: Support [CI:DOCS] test skipping + * Notification email for cirrus-cron build failures + * Bump github.com/opencontainers/runc from 1.0.0-rc93 to 1.0.0-rc94 + * Fix race condition + * Fix copy race while walking paths + * Preserve ownership of lower directory when doing an overlay mount + * Bump github.com/onsi/gomega from 1.11.0 to 1.12.0 + * Update nix pin with `make nixpkgs` + * codespell cleanup + * Multi-arch github-action workflow unification + * Bump github.com/containers/image/v5 from 5.11.1 to 5.12.0 + * Bump github.com/onsi/ginkgo from 1.16.1 to 1.16.2 + * imagebuildah: ignore signatures when tagging images + * update to latest libimage + * Bump github.com/containers/common from 0.37.0 to 0.37.1 + * Bump github.com/containers/storage from 1.30.0 to 1.30.1 + * Upgrade to GitHub-native Dependabot + * Document location of auth.json file if XDG_RUNTIME_DIR is not set + * run.bats: fix flake in run-user test + * Cirrus: Update F34beta -> F34 + * pr-should-include-tests: try to make work in buildah + * runUsingRuntime: when relaying error from the runtime, mention that + * Run(): avoid Mkdir() into the rootfs + * imagebuildah: replace archive with chrootarchive + * imagebuildah.StageExecutor.volumeCacheSaveVFS(): set up bind mounts + * conformance: use :Z with transient mounts when SELinux is enabled + * bud.bats: fix a bats warning + * imagebuildah: create volume directories when using overlays + * imagebuildah: drop resolveSymlink() + * namespaces test - refactoring and cleanup + * Refactor 'idmapping' system test + * Cirrus: Update Ubuntu images to 21.04 + * Tiny fixes in bud system tests + * Add compabitility wrappers for removed packages + * Fix expected message at pulling image + * Fix system tests of 'bud' subcommand + * [CI:DOCS] Update steps for CentOS runc users + * Add support for secret mounts + * Add buildah manifest rm command + * restore push/pull and util API + * [CI:DOCS] Remove older distro docs + * Rename rhel secrets to subscriptions + * vendor in openshift/imagebuilder + * Remove buildah bud --loglevel ... + * use new containers/common/libimage package + * Fix copier when using globs + * Test namespace flags of 'bud' subcommand + * Add system test of 'bud' subcommand + * Output names of multiple tags in buildah bud + * push to docker test: don't get fooled by podman + * copier: add Remove() + * build(deps): bump github.com/containers/image/v5 from 5.10.5 to 5.11.1 + * Restore log timestamps + * Add system test of 'buildah help' with a tiny fix + * tests: copy.bats: fix infinite hang + * Do not force hard code to crun in rootless mode + * build(deps): bump github.com/openshift/imagebuilder from 1.2.0 to 1.2.1 + * build(deps): bump github.com/containers/ocicrypt from 1.1.0 to 1.1.1 + * build(deps): bump github.com/containers/common from 0.35.4 to 0.36.0 + * Fix arg missing warning in bud + * Check without flag in 'from --cgroup-parent' test + * Minor fixes to Buildah as a library tutorial documentation + * Add system test of 'buildah version' for packaged buildah + * Add a few system tests of 'buildah from' + * Log the final error with %+v at logging level "trace" + * copier: add GetOptions.NoCrossDevice + * Update nix pin with `make nixpkgs` + * Bump to v1.20.2-dev + +- Changelog for v1.20.1 (2021-04-13) + * Run container with isolation type set at 'from' + * bats helpers.bash - minor refactoring + * Bump containers/storage vendor to v1.29.0 + * build(deps): bump github.com/onsi/ginkgo from 1.16.0 to 1.16.1 + * Cirrus: Update VMs w/ F34beta + * CLI add/copy: add a --from option + * build(deps): bump github.com/onsi/ginkgo from 1.15.2 to 1.16.0 + * Add authentication system tests for 'commit' and 'bud' + * fix local image lookup for custom platform + * Double-check existence of OCI runtimes + * Cirrus: Make use of shared get_ci_vm container + * Add system tests of "buildah run" + * Update nix pin with `make nixpkgs` + * Remove some stuttering on returns errors + * Setup alias for --tty to --terminal + * Add conformance tests for COPY /... + * Put a few more minutes on the clock for the CI conformance test + * Add a conformance test for COPY --from $symlink + * Add conformance tests for COPY "" + * Check for symlink in builtin volume + * Sort all mounts by destination directory + * System-test cleanup + * Export parse.Platform string to be used by podman-remote + * blobcache: fix sequencing error + * build(deps): bump github.com/containers/common from 0.35.3 to 0.35.4 + * Fix URL in demos/buildah_multi_stage.sh + * Add a few system tests + * [NO TESTS NEEDED] Use --recurse-modules when building git context + * Bump to v1.20.1-dev + +- Changelog for v1.20.0 (2021-03-25) + * vendor in containers/storage v1.28.1 + * build(deps): bump github.com/containers/common from 0.35.2 to 0.35.3 + * tests: prefetch: use buildah, not podman, for pulls + * Use faster way to check image tag existence during multi-arch build + * Add information about multi-arch images to the Readme + * COPY --chown: expand the conformance test + * pkg/chrootuser: use a bufio.Scanner + * [CI:DOCS] Fix rootful typo in docs + * build(deps): bump github.com/onsi/ginkgo from 1.15.1 to 1.15.2 + * Add documentation and testing for .containerignore + * build(deps): bump github.com/sirupsen/logrus from 1.8.0 to 1.8.1 + * build(deps): bump github.com/hashicorp/go-multierror from 1.1.0 to 1.1.1 + * Lookup Containerfile if user specifies a directory + * Add Tag format placeholder to docs + * copier: ignore sockets + * image: propagate errors from extractRootfs + * Remove system test of 'buildah containers -a' + * Clarify userns options are usable only as root in man pages + * Fix system test of 'containers -a' + * Remove duplicated code in addcopy + * build(deps): bump github.com/onsi/ginkgo from 1.15.0 to 1.15.1 + * build(deps): bump github.com/onsi/gomega from 1.10.5 to 1.11.0 + * build(deps): bump github.com/fsouza/go-dockerclient from 1.7.1 to 1.7.2 + * Update multi-arch buildah build setup with new logic + * Update nix pin with `make nixpkgs` + * overlay.bats: fix the "overlay source permissions" test + * imagebuildah: use overlay for volumes when using overlay + * Make PolicyMap and PullPolicy names align + * copier: add GetOptions.IgnoreUnreadable + * Check local image to match system context + * fix: Containerfiles - smaller set of userns u/gids + * Set upperdir permissions based on source + * Shrink the vendoring size of pkc/cli + * Clarify image name match failure message + * ADD/COPY: create the destination directory first, chroot to it + * copier.GetOptions: add NoDerefSymLinks + * copier: add an Eval function + * Update system test for 'from --cap-add/drop' + * copier: fix a renaming bug + * copier: return child process stderr if we can't JSON decode the response + * Add some system tests + * build(deps): bump github.com/containers/storage from 1.26.0 to 1.27.0 + * complement add/copy --chmod documentation + * buildah login and logout, do not need to enter user namespace + * Add multi-arch image build + * chmod/chown added/fixed in bash completions + * OWNERS: add @lsm5 + * buildah add/copy --chmod dockerfile implementation + * bump github.com/openshift/imagebuilder from 1.1.8 to 1.2.0 + * buildah add/copy --chmod cli implementation for files and urls + * Make sure we set the buildah version label + * Isolation strings, should match user input + * [CI:DOCS] buildah-from.md: remove dup arch,os + * build(deps): bump github.com/containers/image/v5 from 5.10.2 to 5.10.3 + * Cirrus: Temp. disable prior-fedora (F32) testing + * pr-should-include-tests: recognized "renamed" tests + * build(deps): bump github.com/sirupsen/logrus from 1.7.0 to 1.8.0 + * build(deps): bump github.com/fsouza/go-dockerclient from 1.7.0 to 1.7.1 + * build(deps): bump github.com/containers/common from 0.34.2 to 0.35.0 + * Fix reaping of stages with no instructions + * add stale bot + * Add base image name to comment + * build(deps): bump github.com/spf13/cobra from 1.1.1 to 1.1.3 + * Don't fail copy to emptydir + * buildah: use volatile containers + * vendor: update containers/storage + * Eliminate the use of containers/building import in pkg subdirs + * Add more support for removing config + * Improve messages about --cache-from not being supported + * Revert patch to allow COPY/ADD of empty dirs. + * Don't fail copy to emptydir + * Fix tutorial for rootless mode + * Fix caching layers with build args + * Vendor in containers/image v5.10.2 + * build(deps): bump github.com/containers/common from 0.34.0 to 0.34.2 + * build(deps): bump github.com/onsi/ginkgo from 1.14.2 to 1.15.0 + * 'make validate': require PRs to include tests + * build(deps): bump github.com/onsi/gomega from 1.10.4 to 1.10.5 + * build(deps): bump github.com/containers/storage from 1.24.5 to 1.25.0 + * Use chown function for U volume flag from containers/common repository + * --iidfile: print hash prefix + * bump containernetworking/cni to v0.8.1 - fix for CVE-2021-20206 + * run: fix check for host pid namespace + * Finish plumbing for buildah bud --manifest + * buildah manifest add localimage should work + * Stop testing directory permissions with latest docker + * Fix build arg check + * build(deps): bump github.com/containers/ocicrypt from 1.0.3 to 1.1.0 + * [ci:docs] Fix man page for buildah push + * Update nix pin with `make nixpkgs` + * Bump to containers/image v5.10.1 + * Rebuild layer if a change in ARG is detected + * Bump golang.org/x/crypto to the latest + * Add Ashley and Urvashi to Approvers + * local image lookup by digest + * Use build-arg ENV val from local environment if set + * Pick default OCI Runtime from containers.conf + * Added required devel packages + * Cirrus: Native OSX Build + * Cirrus: Two minor cleanup items + * Workaround for RHEL gating test failure + * build(deps): bump github.com/stretchr/testify from 1.6.1 to 1.7.0 + * build(deps): bump github.com/mattn/go-shellwords from 1.0.10 to 1.0.11 + * Reset upstream branch to dev version + * If destination does not exists, do not throw error + +- Changelog for v1.19.0 (2021-01-08) + * Update vendor of containers/storage and containers/common + * Buildah inspect should be able to inspect manifests + * Make buildah push support pushing manifests lists and digests + * Fix handling of TMPDIR environment variable + * Add support for --manifest flags + * Upper directory should match mode of destination directory + * Only grab the OS, Arch if the user actually specified them + * Use --arch and --os and --variant options to select architecture and os + * Cirrus: Track libseccomp and golang version + * copier.PutOptions: add an "IgnoreDevices" flag + * fix: `rmi --prune` when parent image is in store. + * build(deps): bump github.com/containers/storage from 1.24.3 to 1.24.4 + * build(deps): bump github.com/containers/common from 0.31.1 to 0.31.2 + * Allow users to specify stdin into containers + * Drop log message on failure to mount on /sys file systems to info + * Spelling + * SELinux no longer requires a tag. + * build(deps): bump github.com/opencontainers/selinux from 1.6.0 to 1.8.0 + * build(deps): bump github.com/containers/common from 0.31.0 to 0.31.1 + * Update nix pin with `make nixpkgs` + * Switch references of /var/run -> /run + * Allow FROM to be overriden with from option + * copier: don't assume we can chroot() on Unixy systems + * copier: add PutOptions.NoOverwriteDirNonDir, Get/PutOptions.Rename + * copier: handle replacing directories with not-directories + * copier: Put: skip entries with zero-length names + * build(deps): bump github.com/containers/storage from 1.24.2 to 1.24.3 + * Add U volume flag to chown source volumes + * Turn off PRIOR_UBUNTU Test until vm is updated + * pkg, cli: rootless uses correct isolation + * build(deps): bump github.com/onsi/gomega from 1.10.3 to 1.10.4 + * update installation doc to reflect current status + * Move away from using docker.io + * enable short-name aliasing + * build(deps): bump github.com/containers/storage from 1.24.1 to 1.24.2 + * build(deps): bump github.com/containers/common from 0.30.0 to 0.31.0 + * Throw errors when using bogus --network flags + * pkg/supplemented test: replace our null blobinfocache + * build(deps): bump github.com/containers/common from 0.29.0 to 0.30.0 + * inserts forgotten quotation mark + * Not prefer use local image create/add manifest + * Add container information to .containerenv + * Add --ignorefile flag to use alternate .dockerignore flags + * Add a source debug build + * Fix crash on invalid filter commands + * build(deps): bump github.com/containers/common from 0.27.0 to 0.29.0 + * Switch to using containers/common pkg's + * fix: non-portable shebang #2812 + * Remove copy/paste errors that leaked `Podman` into man pages. + * Add suggests cpp to spec file + * Apply suggestions from code review + * update docs for debian testing and unstable + * imagebuildah: disable pseudo-terminals for RUN + * Compute diffID for mapped-layer at creating image source + * intermediateImageExists: ignore images whose history we can't read + * Bump to v1.19.0-dev + * build(deps): bump github.com/containers/common from 0.26.3 to 0.27.0 + +- Changelog for v1.18.0 (2020-11-16) + * Fix testing error caused by simultanious merge + * Vendor in containers/storage v1.24.0 + * short-names aliasing + * Add --policy flag to buildah pull + * Stop overwrapping and stuttering + * copier.Get(): ignore ENOTSUP/ENOSYS when listing xattrs + * Run: don't forcibly disable UTS namespaces in rootless mode + * test: ensure non-directory in a Dockerfile path is handled correctly + * Add a few tests for `pull` command + * Fix buildah config --cmd to handle array + * build(deps): bump github.com/containers/storage from 1.23.8 to 1.23.9 + * Fix NPE when Dockerfile path contains non-directory entries + * Update buildah bud man page from podman build man page + * Move declaration of decryption-keys to common cli + * Run: correctly call copier.Mkdir + * util: digging UID/GID out of os.FileInfo should work on Unix + * imagebuildah.getImageTypeAndHistoryAndDiffIDs: cache results + * Verify userns-uid-map and userns-gid-map input + * Use CPP, CC and flags in dep check scripts + * Avoid overriding LDFLAGS in Makefile + * ADD: handle --chown on URLs + * Update nix pin with `make nixpkgs` + * (*Builder).Run: MkdirAll: handle EEXIST error + * copier: try to force loading of nsswitch modules before chroot() + * fix MkdirAll usage + * build(deps): bump github.com/containers/common from 0.26.2 to 0.26.3 + * build(deps): bump github.com/containers/storage from 1.23.7 to 1.23.8 + * Use osusergo build tag for static build + * imagebuildah: cache should take image format into account + * Bump to v1.18.0-dev + +- Changelog for v1.17.0 (2020-10-29) + * Handle cases where other tools mount/unmount containers + * overlay.MountReadOnly: support RO overlay mounts + * overlay: use fusermount for rootless umounts + * overlay: fix umount + * Switch default log level of Buildah to Warn. Users need to see these messages + * Drop error messages about OCI/Docker format to Warning level + * build(deps): bump github.com/containers/common from 0.26.0 to 0.26.2 + * tests/testreport: adjust for API break in storage v1.23.6 + * build(deps): bump github.com/containers/storage from 1.23.5 to 1.23.7 + * build(deps): bump github.com/fsouza/go-dockerclient from 1.6.5 to 1.6.6 + * copier: put: ignore Typeflag="g" + * Use curl to get repo file (fix #2714) + * build(deps): bump github.com/containers/common from 0.25.0 to 0.26.0 + * build(deps): bump github.com/spf13/cobra from 1.0.0 to 1.1.1 + * Remove docs that refer to bors, since we're not using it + * Buildah bud should not use stdin by default + * bump containerd, docker, and golang.org/x/sys + * Makefile: cross: remove windows.386 target + * copier.copierHandlerPut: don't check length when there are errors + * Stop excessive wrapping + * CI: require that conformance tests pass + * bump(github.com/openshift/imagebuilder) to v1.1.8 + * Skip tlsVerify insecure BUILD_REGISTRY_SOURCES + * Fix build path wrong https://github.com/containers/podman/issues/7993 + * refactor pullpolicy to avoid deps + * build(deps): bump github.com/containers/common from 0.24.0 to 0.25.0 + * CI: run gating tasks with a lot more memory + * ADD and COPY: descend into excluded directories, sometimes + * copier: add more context to a couple of error messages + * copier: check an error earlier + * copier: log stderr output as debug on success + * Update nix pin with `make nixpkgs` + * Set directory ownership when copied with ID mapping + * build(deps): bump github.com/sirupsen/logrus from 1.6.0 to 1.7.0 + * build(deps): bump github.com/containers/common from 0.23.0 to 0.24.0 + * Cirrus: Remove bors artifacts + * Sort build flag definitions alphabetically + * ADD: only expand archives at the right time + * Remove configuration for bors + * Shell Completion for podman build flags + * Bump c/common to v0.24.0 + * New CI check: xref --help vs man pages + * CI: re-enable several linters + * Move --userns-uid-map/--userns-gid-map description into buildah man page + * add: preserve ownerships and permissions on ADDed archives + * Makefile: tweak the cross-compile target + * Bump containers/common to v0.23.0 + * chroot: create bind mount targets 0755 instead of 0700 + * Change call to Split() to safer SplitN() + * chroot: fix handling of errno seccomp rules + * build(deps): bump github.com/containers/image/v5 from 5.5.2 to 5.6.0 + * Add In Progress section to contributing + * integration tests: make sure tests run in ${topdir}/tests + * Run(): ignore containers.conf's environment configuration + * Warn when setting healthcheck in OCI format + * Cirrus: Skip git-validate on branches + * tools: update git-validation to the latest commit + * tools: update golangci-lint to v1.18.0 + * Add a few tests of push command + * Add(): fix handling of relative paths with no ContextDir + * build(deps): bump github.com/containers/common from 0.21.0 to 0.22.0 + * Lint: Use same linters as podman + * Validate: reference HEAD + * Fix buildah mount to display container names not ids + * Update nix pin with `make nixpkgs` + * Add missing --format option in buildah from man page + * Fix up code based on codespell + * build(deps): bump github.com/openshift/imagebuilder from 1.1.6 to 1.1.7 + * build(deps): bump github.com/containers/storage from 1.23.4 to 1.23.5 + * Improve buildah completions + * Cirrus: Fix validate commit epoch + * Fix bash completion of manifest flags + * Uniform some man pages + * Update Buildah Tutorial to address BZ1867426 + * Update bash completion of `manifest add` sub command + * copier.Get(): hard link targets shouldn't be relative paths + * build(deps): bump github.com/onsi/gomega from 1.10.1 to 1.10.2 + * Pass timestamp down to history lines + * Timestamp gets updated everytime you inspect an image + * bud.bats: use absolute paths in newly-added tests + * contrib/cirrus/lib.sh: don't use CN for the hostname + * tests: Add some tests + * Update `manifest add` man page + * Extend flags of `manifest add` + * build(deps): bump github.com/containers/storage from 1.23.3 to 1.23.4 + * build(deps): bump github.com/onsi/ginkgo from 1.14.0 to 1.14.1 + * Bump to v1.17.0-dev + * CI: expand cross-compile checks + +- Changelog for v1.16.0 (2020-09-03) + * fix build on 32bit arches + * containerImageRef.NewImageSource(): don't always force timestamps + * Add fuse module warning to image readme + * Heed our retry delay option values when retrying commit/pull/push + * Switch to containers/common for seccomp + * Use --timestamp rather then --omit-timestamp + * docs: remove outdated notice + * docs: remove outdated notice + * build-using-dockerfile: add a hidden --log-rusage flag + * build(deps): bump github.com/containers/image/v5 from 5.5.1 to 5.5.2 + * Discard ReportWriter if user sets options.Quiet + * build(deps): bump github.com/containers/common from 0.19.0 to 0.20.3 + * Fix ownership of content copied using COPY --from + * newTarDigester: zero out timestamps in tar headers + * Update nix pin with `make nixpkgs` + * bud.bats: correct .dockerignore integration tests + * Use pipes for copying + * run: include stdout in error message + * run: use the correct error for errors.Wrapf + * copier: un-export internal types + * copier: add Mkdir() + * in_podman: don't get tripped up by $CIRRUS_CHANGE_TITLE + * docs/buildah-commit.md: tweak some wording, add a --rm example + * imagebuildah: don’t blank out destination names when COPYing + * Replace retry functions with common/pkg/retry + * StageExecutor.historyMatches: compare timestamps using .Equal + * Update vendor of containers/common + * Fix errors found in coverity scan + * Change namespace handling flags to better match podman commands + * conformance testing: ignore buildah.BuilderIdentityAnnotation labels + * Vendor in containers/storage v1.23.0 + * Add buildah.IsContainer interface + * Avoid feeding run_buildah to pipe + * fix(buildahimage): add xz dependency in buildah image + * Bump github.com/containers/common from 0.15.2 to 0.18.0 + * Howto for rootless image building from OpenShift + * Add --omit-timestamp flag to buildah bud + * Update nix pin with `make nixpkgs` + * Shutdown storage on failures + * Handle COPY --from when an argument is used + * Bump github.com/seccomp/containers-golang from 0.5.0 to 0.6.0 + * Cirrus: Use newly built VM images + * Bump github.com/opencontainers/runc from 1.0.0-rc91 to 1.0.0-rc92 + * Enhance the .dockerignore man pages + * conformance: add a test for COPY from subdirectory + * fix bug manifest inspct + * Add documentation for .dockerignore + * Add BuilderIdentityAnnotation to identify buildah version + * DOC: Add quay.io/containers/buildah image to README.md + * Update buildahimages readme + * fix spelling mistake in "info" command result display + * Don't bind /etc/host and /etc/resolv.conf if network is not present + * blobcache: avoid an unnecessary NewImage() + * Build static binary with `buildGoModule` + * copier: split StripSetidBits into StripSetuidBit/StripSetgidBit/StripStickyBit + * tarFilterer: handle multiple archives + * Fix a race we hit during conformance tests + * Rework conformance testing + * Update 02-registries-repositories.md + * test-unit: invoke cmd/buildah tests with --flags + * parse: fix a type mismatch in a test + * Fix compilation of tests/testreport/testreport + * build.sh: log the version of Go that we're using + * test-unit: increase the test timeout to 40/45 minutes + * Add the "copier" package + * Fix & add notes regarding problematic language in codebase + * Add dependency on github.com/stretchr/testify/require + * CompositeDigester: add the ability to filter tar streams + * BATS tests: make more robust + * vendor golang.org/x/text@v0.3.3 + * Switch golang 1.12 to golang 1.13 + * imagebuildah: wait for stages that might not have even started yet + * chroot, run: not fail on bind mounts from /sys + * chroot: do not use setgroups if it is blocked + * Set engine env from containers.conf + * imagebuildah: return the right stage's image as the "final" image + * Fix a help string + * Deduplicate environment variables + * switch containers/libpod to containers/podman + * Bump github.com/containers/ocicrypt from 1.0.2 to 1.0.3 + * Bump github.com/opencontainers/selinux from 1.5.2 to 1.6.0 + * Mask out /sys/dev to prevent information leak + * linux: skip errors from the runtime kill + * Mask over the /sys/fs/selinux in mask branch + * Add VFS additional image store to container + * tests: add auth tests + * Allow "readonly" as alias to "ro" in mount options + * Ignore OS X specific consistency mount option + * Bump github.com/onsi/ginkgo from 1.13.0 to 1.14.0 + * Bump github.com/containers/common from 0.14.0 to 0.15.2 + * Rootless Buildah should default to IsolationOCIRootless + * imagebuildah: fix inheriting multi-stage builds + * Make imagebuildah.BuildOptions.Architecture/OS optional + * Make imagebuildah.BuildOptions.Jobs optional + * Resolve a possible race in imagebuildah.Executor.startStage() + * Switch scripts to use containers.conf + * Bump openshift/imagebuilder to v1.1.6 + * Bump go.etcd.io/bbolt from 1.3.4 to 1.3.5 + * buildah, bud: support --jobs=N for parallel execution + * executor: refactor build code inside new function + * Add bud regression tests + * Cirrus: Fix missing htpasswd in registry img + * docs: clarify the 'triples' format + * CHANGELOG.md: Fix markdown formatting + * Add nix derivation for static builds + * Bump to v1.16.0-dev + * add version centos7 for compatible + +- Changelog for v1.15.0 (2020-06-17) + * Bump github.com/containers/common from 0.12.0 to 0.13.1 + * Bump github.com/containers/storage from 1.20.1 to 1.20.2 + * Bump github.com/seccomp/containers-golang from 0.4.1 to 0.5.0 + * Bump github.com/stretchr/testify from 1.6.0 to 1.6.1 + * Bump github.com/opencontainers/runc from 1.0.0-rc9 to 1.0.0-rc90 + * Add CVE-2020-10696 to CHANGELOG.md and changelog.txt + * Bump github.com/stretchr/testify from 1.5.1 to 1.6.0 + * Bump github.com/onsi/ginkgo from 1.12.2 to 1.12.3 + * Vendor in containers/common v0.12.0 + * fix lighttpd example + * Vendor in new go.etcd.io/bbolt + * Bump github.com/onsi/ginkgo from 1.12.1 to 1.12.2 + * Bump imagebuilder for ARG fix + * Bump github.com/containers/common from 0.11.2 to 0.11.4 + * remove dependency on openshift struct + * Warn on unset build arguments + * vendor: update seccomp/containers-golang to v0.4.1 + * Ammended docs + * Updated docs + * clean up comments + * update exit code for tests + * Implement commit for encryption + * implementation of encrypt/decrypt push/pull/bud/from + * fix resolve docker image name as transport + * Bump github.com/opencontainers/go-digest from 1.0.0-rc1 to 1.0.0 + * Bump github.com/onsi/ginkgo from 1.12.0 to 1.12.1 + * Bump github.com/containers/storage from 1.19.1 to 1.19.2 + * Bump github.com/containers/image/v5 from 5.4.3 to 5.4.4 + * Add preliminary profiling support to the CLI + * Bump github.com/containers/common from 0.10.0 to 0.11.2 + * Evaluate symlinks in build context directory + * fix error info about get signatures for containerImageSource + * Add Security Policy + * Cirrus: Fixes from review feedback + * Bump github.com/containers/storage from 1.19.0 to 1.19.1 + * Bump github.com/sirupsen/logrus from 1.5.0 to 1.6.0 + * imagebuildah: stages shouldn't count as their base images + * Update containers/common v0.10.0 + * Bump github.com/fsouza/go-dockerclient from 1.6.4 to 1.6.5 + * Add registry to buildahimage Dockerfiles + * Cirrus: Use pre-installed VM packages + F32 + * Cirrus: Re-enable all distro versions + * Cirrus: Update to F31 + Use cache images + * golangci-lint: Disable gosimple + * Lower number of golangci-lint threads + * Fix permissions on containers.conf + * Don't force tests to use runc + * Bump github.com/containers/common from 0.9.1 to 0.9.5 + * Return exit code from failed containers + * Bump github.com/containers/storage from 1.18.2 to 1.19.0 + * Bump github.com/containers/common from 0.9.0 to 0.9.1 + * cgroup_manager should be under [engine] + * Use c/common/pkg/auth in login/logout + * Cirrus: Temporarily disable Ubuntu 19 testing + * Add containers.conf to stablebyhand build + * Update gitignore to exclude test Dockerfiles + * Bump github.com/fsouza/go-dockerclient from 1.6.3 to 1.6.4 + * Bump github.com/containers/common from 0.8.1 to 0.9.0 + * Bump back to v1.15.0-dev + * Remove warning for systemd inside of container + +- Changelog for v1.14.8 (2020-04-09) + * Run (make vendor) + * Run (make -C tests/tools vendor) + * Run (go mod tidy) before (go mod vendor) again + * Fix (make vendor) + * Bump validation + * Bump back to v1.15.0-dev + +- Changelog for v1.14.7 (2020-04-07) + * Bump github.com/containers/image/v5 from 5.3.1 to 5.4.3 + * make vendor: run `tidy` after `vendor` + * Do not skip the directory when the ignore pattern matches + * Bump github.com/containers/common from 0.7.0 to 0.8.1 + * Downgrade siruspen/logrus from 1.4.2 + * Fix errorf conventions + * dockerignore tests : remove symlinks, rework + * Bump back to v1.15.0-dev + +- Changelog for v1.14.6 (2020-04-02) + * bud.bats - cleanup, refactoring + * vendor in latest containers/storage 1.18.0 and containers/common v0.7.0 + * Bump github.com/spf13/cobra from 0.0.6 to 0.0.7 + * Bump github.com/containers/storage from 1.16.5 to 1.17.0 + * Bump github.com/containers/image/v5 from 5.2.1 to 5.3.1 + * Fix Amazon install step + * Bump back to v1.15.0-dev + * Fix bud-build-arg-cache test + * Make image history work correctly with new args handling + * Don't add args to the RUN environment from the Builder + * Update github.com/openshift/imagebuilder to v1.1.4 + * Add .swp files to .gitignore + +- Changelog for v1.14.5 (2020-03-26) + * revert #2246 FIPS mode change + * Bump back to v1.15.0-dev + * image with dup layers: we now have one on quay + * digest test : make more robust + +- Changelog for v1.14.4 (2020-03-25) + * Fix fips-mode check for RHEL8 boxes + * Fix potential CVE in tarfile w/ symlink (Edit 02-Jun-2020: Addresses CVE-2020-10696) + * Fix .dockerignore with globs and ! commands + * update install steps for Amazon Linux 2 + * Bump github.com/openshift/imagebuilder from 1.1.2 to 1.1.3 + * Add comment for RUN command in volume ownership test + * Run stat command directly for volume ownership test + * vendor in containers/common v0.6.1 + * Cleanup go.sum + * Bump back to v1.15.0-dev + +- Changelog for v1.14.3 (2020-03-17) + * Update containers/storage to v1.16.5 + * Bump github.com/containers/storage from 1.16.2 to 1.16.4 + * Bump github.com/openshift/imagebuilder from 1.1.1 to 1.1.2 + * Update github.com/openshift/imagebuilder vendoring + * Update unshare man page to fix script example + * Fix compilation errors on non linux platforms + * Bump containers/common and opencontainers/selinux versions + * Add tests for volume ownership + * Preserve volume uid and gid through subsequent commands + * Fix FORWARD_NULL errors found by Coverity + * Bump github.com/containers/storage from 1.16.1 to 1.16.2 + * Fix errors found by codespell + * Bump back to v1.15.0-dev + * Add Pull Request Template + +- Changelog for v1.14.2 (2020-03-03) + * Add Buildah pull request template + * Bump to containers/storage v1.16.1 + * run_linux: fix tight loop if file is not pollable + * Bump github.com/opencontainers/selinux from 1.3.2 to 1.3.3 + * Bump github.com/containers/common from 0.4.1 to 0.4.2 + * Bump back to v1.15.0-dev + * Add Containerfile to build a versioned stable image on quay.io + +- Changelog for v1.14.1 (2020-02-27) + * Search for local runtime per values in containers.conf + * Set correct ownership on working directory + * BATS : in teardown, umount stale mounts + * Bump github.com/spf13/cobra from 0.0.5 to 0.0.6 + * Bump github.com/fsouza/go-dockerclient from 1.6.1 to 1.6.3 + * Bump github.com/stretchr/testify from 1.4.0 to 1.5.1 + * Replace unix with syscall to allow vendoring into libpod + * Update to containers/common v0.4.1 + * Improve remote manifest retrieval + * Fix minor spelling errors in containertools README + * Clear the right variable in buildahimage + * Correct a couple of incorrect format specifiers + * Update to containers/common v0.3.0 + * manifest push --format: force an image type, not a list type + * run: adjust the order in which elements are added to $PATH + * getDateAndDigestAndSize(): handle creation time not being set + * Bump github.com/containers/common from 0.2.0 to 0.2.1 + * include installation steps for CentOS 8 and Stream + * include installation steps for CentOS7 and forks + * Adjust Ubuntu install info to also work on Pop!_OS + * Make the commit id clear like Docker + * Show error on copied file above context directory in build + * Bump github.com/containers/image/v5 from 5.2.0 to 5.2.1 + * pull/from/commit/push: retry on most failures + * Makefile: fix install.cni.sudo + * Repair buildah so it can use containers.conf on the server side + * Bump github.com/mattn/go-shellwords from 1.0.9 to 1.0.10 + * Bump github.com/fsouza/go-dockerclient from 1.6.0 to 1.6.1 + * Fixing formatting & build instructions + * Add Code of Conduct + * Bors: Fix no. req. github reviews + * Cirrus+Bors: Simplify temp branch skipping + * Bors-ng: Add documentation and status-icon + * Bump github.com/onsi/ginkgo from 1.11.0 to 1.12.0 + * fix XDG_RUNTIME_DIR for authfile + * Cirrus: Disable F29 testing + * Cirrus: Add jq package + * Cirrus: Fix lint + validation using wrong epoch + * Stop using fedorproject registry + * Bors: Workaround ineffective required statuses + * Bors: Enable app + Disable Travis + * Cirrus: Add standardized log-collection + * Cirrus: Improve automated lint + validation + * Allow passing options to golangci-lint + * Cirrus: Fixes from review feedback + * Cirrus: Temporarily ignore VM testing failures + * Cirrus: Migrate off papr + implement VM testing + * Cirrus: Update packages + fixes for get_ci_vm.sh + * Show validation command-line + * Skip overlay test w/ vfs driver + * use alpine, not centos, for various tests + * Flake handling: cache and prefetch images + * Bump to v1.15.0-dev + +- Changelog for v1.14.0 (2020-02-05) + * bump github.com/mtrmac/gpgme + * Update containers/common to v0.1.4 + * manifest push: add --format option + * Bump github.com/onsi/gomega from 1.8.1 to 1.9.0 + * vendor github.com/containers/image/v5@v5.2.0 + * info test: deal with random key order + * Bump back to v1.14.0-dev + +- Changelog for v1.13.2 (2020-01-29) + * sign.bats: set GPG_TTY=/dev/null + * Fix parse_unsupported.go + * getDateAndDigestAndSize(): use manifest.Digest + * Bump github.com/opencontainers/selinux from 1.3.0 to 1.3.1 + * Bump github.com/containers/common from 0.1.0 to 0.1.2 + * Touch up os/arch doc + * chroot: handle slightly broken seccomp defaults + * buildahimage: specify fuse-overlayfs mount options + * Bump github.com/mattn/go-shellwords from 1.0.7 to 1.0.9 + * copy.bats: make sure we detect failures due to missing source + * parse: don't complain about not being able to rename something to itself + * Makefile: use a $(GO_TEST) macro, fix a typo + * manifests: unit test fix + * Fix build for 32bit platforms + * Allow users to set OS and architecture on bud + * Fix COPY in containerfile with envvar + * Bump c/storage to v1.15.7 + * add --sign-by to bud/commit/push, --remove-signatures for pull/push + * Remove cut/paste error in CHANGELOG.md + * Update vendor of containers/common to v0.1.0 + * update install instructions for Debian, Raspbian and Ubuntu + * Add support for containers.conf + * Bump back to v1.14.0-dev + +- Changelog for v1.13.1 (2020-01-14) + * Bump github.com/containers/common from 0.0.5 to 0.0.7 + * Bump github.com/onsi/ginkgo from 1.10.3 to 1.11.0 + * Bump github.com/pkg/errors from 0.8.1 to 0.9.0 + * Bump github.com/onsi/gomega from 1.7.1 to 1.8.1 + * Add codespell support + * copyFileWithTar: close source files at the right time + * copy: don't digest files that we ignore + * Check for .dockerignore specifically + * Travis: rm go 1.12.x + * Don't setup excludes, if their is only one pattern to match + * set HOME env to /root on chroot-isolation by default + * docs: fix references to containers-*.5 + * update openshift/api + * fix bug Add check .dockerignore COPY file + * buildah bud --volume: run from tmpdir, not source dir + * Fix imageNamePrefix to give consistent names in buildah-from + * cpp: use -traditional and -undef flags + * Fix image reference in tutorial 4 + * discard outputs coming from onbuild command on buildah-from --quiet + * make --format columnizing consistent with buildah images + * Bump to v1.14.0-dev + +- Changelog for v1.13.0 (2019-12-27) + * Bump to c/storage v1.15.5 + * Update container/storage to v1.15.4 + * Fix option handling for volumes in build + * Rework overlay pkg for use with libpod + * Fix buildahimage builds for buildah + * Add support for FIPS-Mode backends + * Set the TMPDIR for pulling/pushing image to $TMPDIR + * WIP: safer test for pull --all-tags + * BATS major cleanup: blobcache.bats: refactor + * BATS major cleanup: part 4: manual stuff + * BATS major cleanup, step 3: yet more run_buildah + * BATS major cleanup, part 2: use more run_buildah + * BATS major cleanup, part 1: log-level + * Bump github.com/containers/image/v5 from 5.0.0 to 5.1.0 + * Bump github.com/containers/common from 0.0.3 to 0.0.5 + * Bump to v1.13.0-dev + +- Changelog for v1.12.0 (2019-12-13) + * Allow ADD to use http src + * Bump to c/storage v.1.15.3 + * install.md: update golang dependency + * imgtype: reset storage opts if driver overridden + * Start using containers/common + * overlay.bats typo: fuse-overlays should be fuse-overlayfs + * chroot: Unmount with MNT_DETACH instead of UnmountMountpoints() + * bind: don't complain about missing mountpoints + * imgtype: check earlier for expected manifest type + * Vendor containers/storage fix + * Vendor containers/storage v1.15.1 + * Add history names support + * PR takeover of #1966 + * Tests: Add inspect test check steps + * Tests: Add container name and id check in containers test steps + * Test: Get permission in add test + * Tests: Add a test for tag by id + * Tests: Add test cases for push test + * Tests: Add image digest test + * Tests: Add some buildah from tests + * Tests: Add two commit test + * Tests: Add buildah bud with --quiet test + * Tests: Add two test for buildah add + * Bump back to v1.12.0-dev + +- Changelog for v1.11.6 (2019-12-03) + * Handle missing equal sign in --from and --chown flags for COPY/ADD + * bud COPY does not download URL + * Bump github.com/onsi/gomega from 1.7.0 to 1.7.1 + * Fix .dockerignore exclude regression + * Ran buildah through codespell + * commit(docker): always set ContainerID and ContainerConfig + * Touch up commit man page image parameter + * Add builder identity annotations. + * info: use util.Runtime() + * Bump github.com/onsi/ginkgo from 1.10.2 to 1.10.3 + * Bump back to v1.12.0-dev + +- Changelog for v1.11.5 (2019-11-11) + * Enhance error on unsafe symbolic link targets + * Add OCIRuntime to info + * Check nonexsit authfile + * Only output image id if running buildah bud --quiet + * Fix --pull=true||false and add --pull-never to bud and from (retry) + * cgroups v2: tweak or skip tests + * Prepwork: new 'skip' helpers for tests + * Handle configuration blobs for manifest lists + * unmarshalConvertedConfig: avoid using the updated image's ref + * Add completions for Manifest commands + * Add disableFips option to secrets pkg + * Update bud.bats test archive test + * Add test for caching based on content digest + * Builder.untarPath(): always evaluate b.ContentDigester.Hash() + * Bump github.com/onsi/ginkgo from 1.10.1 to 1.10.2 + * Fix another broken test: copy-url-mtime + * yet more fixes + * Actual bug fix for 'add' test: fix the expected mode + * BATS tests - lots of mostly minor cleanup + * build: drop support for ostree + * Add support for make vendor-in-container + * imgtype: exit with error if storage fails + * remove XDG_RUNTIME_DIR from default authfile path + * fix troubleshooting redirect instructions + * Bump back to v1.12.0-dev + +- Changelog for v1.11.4 (2019-10-28) + * buildah: add a "manifest" command + * manifests: add the module + * pkg/supplemented: add a package for grouping images together + * pkg/manifests: add a manifest list build/manipulation API + * Update for ErrUnauthorizedForCredentials API change in containers/image + * Update for manifest-lists API changes in containers/image + * version: also note the version of containers/image + * Move to containers/image v5.0.0 + * Enable --device directory as src device + * Fix git build with branch specified + * Bump github.com/openshift/imagebuilder from 1.1.0 to 1.1.1 + * Bump github.com/fsouza/go-dockerclient from 1.4.4 to 1.5.0 + * Add clarification to the Tutorial for new users + * Silence "using cache" to ensure -q is fully quiet + * Add OWNERS File to Buildah + * Bump github.com/containers/storage from 1.13.4 to 1.13.5 + * Move runtime flag to bud from common + * Commit: check for storage.ErrImageUnknown using errors.Cause() + * Fix crash when invalid COPY --from flag is specified. + * Bump back to v1.12.0-dev + +- Changelog for v1.11.3 (2019-10-04) + * Update c/image to v4.0.1 + * Bump github.com/spf13/pflag from 1.0.3 to 1.0.5 + * Fix --build-args handling + * Bump github.com/spf13/cobra from 0.0.3 to 0.0.5 + * Bump github.com/cyphar/filepath-securejoin from 0.2.1 to 0.2.2 + * Bump github.com/onsi/ginkgo from 1.8.0 to 1.10.1 + * Bump github.com/fsouza/go-dockerclient from 1.3.0 to 1.4.4 + * Add support for retrieving context from stdin "-" + * Ensure bud remote context cleans up on error + * info: add cgroups2 + * Bump github.com/seccomp/libseccomp-golang from 0.9.0 to 0.9.1 + * Bump github.com/mattn/go-shellwords from 1.0.5 to 1.0.6 + * Bump github.com/stretchr/testify from 1.3.0 to 1.4.0 + * Bump github.com/opencontainers/selinux from 1.2.2 to 1.3.0 + * Bump github.com/etcd-io/bbolt from 1.3.2 to 1.3.3 + * Bump github.com/onsi/gomega from 1.5.0 to 1.7.0 + * update c/storage to v1.13.4 + * Print build 'STEP' line to stdout, not stderr + * Fix travis-ci on forks + * Vendor c/storage v1.13.3 + * Use Containerfile by default + * Added tutorial on how to include Buildah as library + * util/util: Fix "configuraitno" -> "configuration" log typo + * Bump back to v1.12.0-dev + +- Changelog for v1.11.2 (2019-09-13) + * Add some cleanup code + * Move devices code to unit specific directory. + * Bump back to v1.12.0-dev + +- Changelog for v1.11.1 (2019-09-11) + * Add --devices flag to bud and from + * Downgrade .papr to highest atomic verion + * Add support for /run/.containerenv + * Truncate output of too long image names + * Preserve file and directory mount permissions + * Bump fedora version from 28 to 30 + * makeImageRef: ignore EmptyLayer if Squash is set + * Set TMPDIR to /var/tmp by default + * replace --debug=false with --log-level=error + * Allow mounts.conf entries for equal source and destination paths + * fix label and annotation for 1-line Dockerfiles + * Enable interfacer linter and fix lints + * install.md: mention goproxy + * Makefile: use go proxy + * Bump to v1.12.0-dev + +- Changelog for v1.11.0 (2019-08-29) + * tests/bud.bats: add --signature-policy to some tests + * Vendor github.com/openshift/api + * pull/commit/push: pay attention to $BUILD_REGISTRY_SOURCES + * Add `--log-level` command line option and deprecate `--debug` + * add support for cgroupsV2 + * Correctly detect ExitError values from Run() + * Disable empty logrus timestamps to reduce logger noise + * Remove outdated deps Makefile target + * Remove gofmt.sh in favor of golangci-lint + * Remove govet.sh in favor of golangci-lint + * Allow to override build date with SOURCE_DATE_EPOCH + * Update shebangs to take env into consideration + * Fix directory pull image names + * Add --digestfile and Re-add push statement as debug + * README: mention that Podman uses Buildah's API + * Use content digests in ADD/COPY history entries + * add: add a DryRun flag to AddAndCopyOptions + * Fix possible runtime panic on bud + * Add security-related volume options to validator + * use correct path for ginkgo + * Add bud 'without arguments' integration tests + * Update documentation about bud + * add: handle hard links when copying with .dockerignore + * add: teach copyFileWithTar() about symlinks and directories + * Allow buildah bud to be called without arguments + * imagebuilder: fix detection of referenced stage roots + * Touch up go mod instructions in install + * run_linux: fix mounting /sys in a userns + * Vendor Storage v1.13.2 + * Cirrus: Update VM images + * Fix handling of /dev/null masked devices + * Update `bud`/`from` help to contain indicator for `--dns=none` + * Bump back to v1.11.0-dev + +- Changelog for v1.10.1 (2019-08-08) + * Bump containers/image to v3.0.2 to fix keyring issue + * Bug fix for volume minus syntax + * Bump container/storage v1.13.1 and containers/image v3.0.1 + * bump github.com/containernetworking/cni to v0.7.1 + * Add overlayfs to fuse-overlayfs tip + * Add automatic apparmor tag discovery + * Fix bug whereby --get-login has no effect + * Bump to v1.11.0-dev + +- Changelog for v1.10.0 (2019-08-02) + * vendor github.com/containers/image@v3.0.0 + * Remove GO111MODULE in favor of `-mod=vendor` + * Vendor in containers/storage v1.12.16 + * Add '-' minus syntax for removal of config values + * tests: enable overlay tests for rootless + * rootless, overlay: use fuse-overlayfs + * vendor github.com/containers/image@v2.0.1 + * Added '-' syntax to remove volume config option + * delete `successfully pushed` message + * Add golint linter and apply fixes + * vendor github.com/containers/storage@v1.12.15 + * Change wait to sleep in buildahimage readme + * Handle ReadOnly images when deleting images + * Add support for listing read/only images + +- Changelog for v1.9.2 (2019-07-19) + * from/import: record the base image's digest, if it has one + * Fix CNI version retrieval to not require network connection + * Add misspell linter and apply fixes + * Add goimports linter and apply fixes + * Add stylecheck linter and apply fixes + * Add unconvert linter and apply fixes + * image: make sure we don't try to use zstd compression + * run.bats: skip the "z" flag when testing --mount + * Update to runc v1.0.0-rc8 + * Update to match updated runtime-tools API + * bump github.com/opencontainers/runtime-tools to v0.9.0 + * Build e2e tests using the proper build tags + * Add unparam linter and apply fixes + * Run: correct a typo in the --cap-add help text + * unshare: add a --mount flag + * fix push check image name is not empty + * Bump to v1.9.2-dev + +- Changelog for v1.9.1 (2019-07-12) + * add: fix slow copy with no excludes + * Add errcheck linter and fix missing error check + * Improve tests/tools/Makefile parallelism and abstraction + * Fix response body not closed resource leak + * Switch to golangci-lint + * Add gomod instructions and mailing list links + * On Masked path, check if /dev/null already mounted before mounting + * Update to containers/storage v1.12.13 + * Refactor code in package imagebuildah + * Add rootless podman with NFS issue in documentation + * Add --mount for buildah run + * import method ValidateVolumeOpts from libpod + * Fix typo + * Makefile: set GO111MODULE=off + * rootless: add the built-in slirp DNS server + * Update docker/libnetwork to get rid of outdated sctp package + * Update buildah-login.md + * migrate to go modules + * install.md: mention go modules + * tests/tools: go module for test binaries + * fix --volume splits comma delimited option + * Add bud test for RUN with a priv'd command + * vendor logrus v1.4.2 + * pkg/cli: panic when flags can't be hidden + * pkg/unshare: check all errors + * pull: check error during report write + * run_linux.go: ignore unchecked errors + * conformance test: catch copy error + * chroot/run_test.go: export funcs to actually be executed + * tests/imgtype: ignore error when shutting down the store + * testreport: check json error + * bind/util.go: remove unused func + * rm chroot/util.go + * imagebuildah: remove unused `dedupeStringSlice` + * StageExecutor: EnsureContainerPath: catch error from SecureJoin() + * imagebuildah/build.go: return instead of branching + * rmi: avoid redundant branching + * conformance tests: nilness: allocate map + * imagebuildah/build.go: avoid redundant `filepath.Join()` + * imagebuildah/build.go: avoid redundant `os.Stat()` + * imagebuildah: omit comparison to bool + * fix "ineffectual assignment" lint errors + * docker: ignore "repeats json tag" lint error + * pkg/unshare: use `...` instead of iterating a slice + * conformance: bud test: use raw strings for regexes + * conformance suite: remove unused func/var + * buildah test suite: remove unused vars/funcs + * testreport: fix golangci-lint errors + * util: remove redundant `return` statement + * chroot: only log clean-up errors + * images_test: ignore golangci-lint error + * blobcache: log error when draining the pipe + * imagebuildah: check errors in deferred calls + * chroot: fix error handling in deferred funcs + * cmd: check all errors + * chroot/run_test.go: check errors + * chroot/run.go: check errors in deferred calls + * imagebuildah.Executor: remove unused onbuild field + * docker/types.go: remove unused struct fields + * util: use strings.ContainsRune instead of index check + * Cirrus: Initial implementation + * Bump to v1.9.1-dev + +- Changelog for v1.9.0 (2019-06-15) + * buildah-run: fix-out-of-range panic (2) + * Bump back to v1.9.0-dev + +- Changelog for v1.8.4 (2019-06-13) + Update containers/image to v2.0.0 + run: fix hang with run and --isolation=chroot + run: fix hang when using run + chroot: drop unused function call + remove --> before imgageID on build + Always close stdin pipe + Write deny to setgroups when doing single user mapping + Avoid including linux/memfd.h + Add a test for the symlink pointing to a directory + Add missing continue + Fix the handling of symlinks to absolute paths + Only set default network sysctls if not rootless + Support --dns=none like podman + fix bug --cpu-shares parsing typo + Fix validate complaint + Update vendor on containers/storage to v1.12.10 + Create directory paths for COPY thereby ensuring correct perms + imagebuildah: use a stable sort for comparing build args + imagebuildah: tighten up cache checking + bud.bats: add a test verying the order of --build-args + add -t to podman run + imagebuildah: simplify screening by top layers + imagebuildah: handle ID mappings for COPY --from + imagebuildah: apply additionalTags ourselves + bud.bats: test additional tags with cached images + bud.bats: add a test for WORKDIR and COPY with absolute destinations + Cleanup Overlay Mounts content + +- Changelog for v1.8.3 (2019-06-04) + * Add support for file secret mounts + * Add ability to skip secrets in mounts file + * allow 32bit builds + * fix tutorial instructions + * imagebuilder: pass the right contextDir to Add() + * add: use fileutils.PatternMatcher for .dockerignore + * bud.bats: add another .dockerignore test + * unshare: fallback to single usermapping + * addHelperSymlink: clear the destination on os.IsExist errors + * bud.bats: test replacing symbolic links + * imagebuildah: fix handling of destinations that end with '/' + * bud.bats: test COPY with a final "/" in the destination + * linux: add check for sysctl before using it + * unshare: set _CONTAINERS_ROOTLESS_GID + * Rework buildahimamges + * build context: support https git repos + * Add a test for ENV special chars behaviour + * Check in new Dockerfiles + * Apply custom SHELL during build time + * config: expand variables only at the command line + * SetEnv: we only need to expand v once + * Add default /root if empty on chroot iso + * Add support for Overlay volumes into the container. + * Export buildah validate volume functions so it can share code with libpod + * Bump baseline test to F30 + * Fix rootless handling of /dev/shm size + * Avoid fmt.Printf() in the library + * imagebuildah: tighten cache checking back up + * Handle WORKDIR with dangling target + * Default Authfile to proper path + * Make buildah run --isolation follow BUILDAH_ISOLATION environment + * Vendor in latest containers/storage and containers/image + * getParent/getChildren: handle layerless images + * imagebuildah: recognize cache images for layerless images + * bud.bats: test scratch images with --layers caching + * Get CHANGELOG.md updates + * Add some symlinks to test our .dockerignore logic + * imagebuildah: addHelper: handle symbolic links + * commit/push: use an everything-allowed policy + * Correct manpage formatting in files section + * Remove must be root statement from buildah doc + * Change image names to stable, testing and upstream + * Bump back to v1.9.0-dev + +- Changelog for v1.8.2 (2019-05-02) + * Vendor Storage 1.12.6 + * Create scratch file in TESTDIR + * Test bud-copy-dot with --layers picks up changed file + * Bump back to 1.9.0-dev + +- Changelog for v1.8.1 (2019-05-01) + * Don't create directory on container + * Replace kubernetes/pause in tests with k8s.gcr.io/pause + * imagebuildah: don't remove intermediate images if we need them + * Rework buildahimagegit to buildahimageupstream + * Fix Transient Mounts + * Handle WORKDIRs that are symlinks + * allow podman to build a client for windows + * Touch up 1.9-dev to 1.9.0-dev + * Bump to 1.9-dev + +- Changelog for v1.8.0 (2019-04-26) + * Resolve symlink when checking container path + * commit: commit on every instruction, but not always with layers + * CommitOptions: drop the unused OnBuild field + * makeImageRef: pass in the whole CommitOptions structure + * cmd: API cleanup: stores before images + * run: check if SELinux is enabled + * Fix buildahimages Dockerfiles to include support for additionalimages mounted from host. + * Detect changes in rootdir + * Fix typo in buildah-pull(1) + * Vendor in latest containers/storage + * Keep track of any build-args used during buildah bud --layers + * commit: always set a parent ID + * imagebuildah: rework unused-argument detection + * fix bug dest path when COPY .dockerignore + * Move Host IDMAppings code from util to unshare + * Add BUILDAH_ISOLATION rootless back + * Travis CI: fail fast, upon error in any step + * imagebuildah: only commit images for intermediate stages if we have to + * Use errors.Cause() when checking for IsNotExist errors + * auto pass http_proxy to container + * Bump back to 1.8-dev + +- Changelog for v1.7.3 (2019-04-16) + * imagebuildah: don't leak image structs + * Add Dockerfiles for buildahimages + * Bump to Replace golang 1.10 with 1.12 + * add --dns* flags to buildah bud + * Add hack/build_speed.sh test speeds on building container images + * Create buildahimage Dockerfile for Quay + * rename 'is' to 'expect_output' + * squash.bats: test squashing in multi-layered builds + * bud.bats: test COPY --from in a Dockerfile while using the cache + * commit: make target image names optional + * Fix bud-args to allow comma separation + * oops, missed some tests in commit.bats + * new helper: expect_line_count + * New tests for #1467 (string slices in cmdline opts) + * Workarounds for dealing with travis; review feedback + * BATS tests - extensive but minor cleanup + * imagebuildah: defer pulling images for COPY --from + * imagebuildah: centralize COMMIT and image ID output + * Travis: do not use traviswait + * imagebuildah: only initialize imagebuilder configuration once per stage + * Make cleaner error on Dockerfile build errors + * unshare: move to pkg/ + * unshare: move some code from cmd/buildah/unshare + * Fix handling of Slices versus Arrays + * imagebuildah: reorganize stage and per-stage logic + * imagebuildah: add empty layers for instructions + * Add missing step in installing into Ubuntu + * fix bug in .dockerignore support + * imagebuildah: deduplicate prepended "FROM" instructions + * Touch up intro + * commit: set created-by to the shell if it isn't set + * commit: check that we always set a "created-by" + * docs/buildah.md: add "containers-" prefixes under "SEE ALSO" + * Bump back to 1.8-dev + +- Changelog for v1.7.2 (2019-03-28) + * mount: do not create automatically a namespace + * buildah: correctly create the userns if euid!=0 + * imagebuildah.Build: consolidate cleanup logic + * CommitOptions: drop the redundant Store field + * Move pkg/chrootuser from libpod to buildah. + * imagebuildah: record image IDs and references more often + * vendor imagebuilder v1.1.0 + * imagebuildah: fix requiresStart/noRunsRemaining confusion + * imagebuildah: check for unused args across stages + * bump github.com/containernetworking/cni to v0.7.0-rc2 + * imagebuildah: use "useCache" instead of "noCache" + * imagebuildah.resolveNameToImageRef(): take name as a parameter + * Export fields of the DokcerIgnore struct + * imagebuildah: drop the duplicate containerIDs list + * rootless: by default use the host network namespace + * imagebuildah: split Executor and per-stage execution + * imagebuildah: move some fields around + * golint: make golint happy + * docs: 01-intro.md: add missing . in Dockerfile examples + * fix bug using .dockerignore + * Do not create empty mounts.conf file + * images: suppress a spurious blank line with no images + * from: distinguish between ADD and COPY + * fix bug to not separate each --label value with comma + * buildah-bud.md: correct a typo, note a default + * Remove mistaken code that got merged in other PR + * add sample registries.conf to docs + * escape shell variables in README example + * slirp4netns: set mtu to 65520 + * images: imageReposToMap() already adds : + * imagebuildah.ReposToMap: move to cmd + * Build: resolve copyFrom references earlier + * Allow rootless users to use the cache directory in homedir + * bud.bats: use the per-test temp directory + * bud.bats: log output before counting length + * Simplify checks for leftover args + * Print commitID with --layers + * fix bug images use the template to print results + * rootless: honor --net host + * onsi/gomeage add missing files + * vendor latest openshift/imagebuilder + * Remove noop from squash help + * Prepend a comment to files setup in container + * imagebuildah resolveSymlink: fix handling of relative links + * Errors should be printed to stderr + * Add recommends for slirp4netns and fuse-overlay + * Update pull and pull-always flags + * Hide from users command options that we don't want them to use. + * Update secrets fipsmode patch to work on rootless containers + * fix unshare option handling and documentation + * Vendor in latest containers/storage + * Hard-code docker.Transport use in pull --all-tags + * Use a types.ImageReference instead of (transport, name) strings in pullImage etc. + * Move the computation of srcRef before first pullAndFindImage + * Don't throw away user-specified tag for pull --all-tags + * CHANGES BEHAVIOR: Remove the string format input to localImageNameForReference + * Don't try to parse imageName as transport:image in pullImage + * Use reference.WithTag instead of manual string manipulation in Pull + * Don't pass image = transport:repo:tag, transport=transport to pullImage + * Fix confusing variable naming in Pull + * Don't try to parse image name as a transport:image + * Fix error reporting when parsing trans+image + * Remove 'transport == ""' handling from the pull path + * Clean up "pulls" of local image IDs / ID prefixes + * Simplify ExpandNames + * Document the semantics of transport+name returned by ResolveName + * UPdate gitvalidation epoch + * Bump back to 1.8-dev + +- Changelog for v1.7.1 (2019-02-26) + * vendor containers/image v1.5 + * Move secrets code from libpod into buildah + * Update CHANGELOG.md with the past changes + * README.md: fix typo + * Fix a few issues found by tests/validate/gometalinter.sh + * Neutralize buildah/unshare on non-Linux platforms + * Explicitly specify a directory to find(1) + * README.md: rephrase Buildah description + * Stop printing default twice in cli --help + * install.md: add section about vendoring + * Bump to 1.8-dev + +- Changelog for v1.7 (2019-02-21) + * vendor containers/image v1.4 + * Make "images --all" faster + * Remove a misleading comment + * Remove quiet option from pull options + * Make sure buildah pull --all-tags only works with docker transport + * Support oci layout format + * Fix pulling of images within buildah + * Fix tls-verify polarity + * Travis: execute make vendor and hack/tree_status.sh + * vendor.conf: remove unused dependencies + * add missing vendor/github.com/containers/libpod/vendor.conf + * vendor.conf: remove github.com/inconshreveable/mousetrap + * make vendor: always fetch the latest vndr + * add hack/tree_status.sh script + * Bump c/Storage to 1.10 + * Add --all-tags test to pull + * mount: make error clearer + * Remove global flags from cli help + * Set --disable-compression to true as documented + * Help document using buildah mount in rootless mode + * healthcheck start-period: update documentation + * Vendor in latest c/storage and c/image + * dumpbolt: handle nested buckets + * Fix buildah commit compress by default + * Test on xenial, not trusty + * unshare: reexec using a memfd copy instead of the binary + * Add --target to bud command + * Fix example for setting multiple environment variables + * main: fix rootless mode + * buildah: force umask 022 + * pull.bats: specify registry config when using registries + * pull.bats: use the temporary directory, not /tmp + * unshare: do not set rootless mode if euid=0 + * Touch up cli help examples and a few nits + * Add an undocumented dumpbolt command + * Move tar commands into containers/storage + * Fix bud issue with 2 line Dockerfile + * Add package install descriptions + * Note configuration file requirements + * Replace urfave/cli with cobra + * cleanup vendor.conf + * Vendor in latest containers/storage + * Add Quiet to PullOptions and PushOptions + * cmd/commit: add flag omit-timestamp to allow for deterministic builds + * Add options for empty-layer history entries + * Make CLI help descriptions and usage a bit more consistent + * vndr opencontainers/selinux + * Bump baseline test Fedora to 29 + * Bump to v1.7-dev-1 + * Bump to v1.6-1 + * Add support for ADD --chown + * imagebuildah: make EnsureContainerPath() check/create the right one + * Bump 1.7-dev + * Fix contrib/rpm/bulidah.spec changelog date + +- Changelog for v1.6-1 (2019-01-18) + * Add support for ADD --chown + * imagebuildah: make EnsureContainerPath() check/create the right one + * Fix contrib/rpm/bulidah.spec changelog date + * Vendor in latest containers/storage + * Revendor everything + * Revendor in latest code by release + * unshare: do not set USER=root + * run: ignore EIO when flushing at the end, avoid double log + * build-using-dockerfile,commit: disable compression by default + * Update some comments + * Make rootless work under no_pivot_root + * Add CreatedAtRaw date field for use with Format + * Properly format images JSON output + * pull: add all-tags option + * Fix support for multiple Short options + * pkg/blobcache: add synchronization + * Skip empty files in file check of conformance test + * Use NoPivot also for RUN, not only for run + * Remove no longer used isReferenceInsecure / isRegistryInsecure + * Do not set OCIInsecureSkipTLSVerify based on registries.conf + * Remove duplicate entries from images JSON output + * vendor parallel-copy from containers/image + * blobcache.bats: adjust explicit push tests + * Handle one line Dockerfile with layers + * We should only warn if user actually requests Hostname be set in image + * Fix compiler Warning about comparing different size types + * imagebuildah: don't walk if rootdir and path are equal + * Add aliases for buildah containers, so buildah list, ls and ps work + * vendor: use faster version instead compress/gzip + * vendor: update libpod + * Properly handle Hostname inside of RUN command + * docs: mention how to mount in rootless mode + * tests: use fully qualified name for centos image + * travis.yml: use the fully qualified name for alpine + * mount: allow mount only when using vfs + * Add some tests for buildah pull + * Touch up images -q processing + * Refactor: Use library shared idtools.ParseIDMap() instead of bundling it + * bump GITVALIDATE_EPOCH + * cli.BudFlags: add `--platform` nop + * Makefile: allow packagers to more easily add tags + * Makefile: soften the requirement on git + * tests: add containers json test + * Inline blobCache.putBlob into blobCacheDestination.PutBlob + * Move saveStream and putBlob near blobCacheDestination.PutBlob + * Remove BlobCache.PutBlob + * Update for API changes + * Vendor c/image after merging c/image#536 + * Handle 'COPY --from' in Dockerfile + * Vendor in latest content from github.com/containers/storage + * Clarify docker.io default in push with docker-daemon + * Test blob caching + * Wire in a hidden --blob-cache option + * Use a blob cache when we're asked to use one + * Add --disable-compression to 'build-using-dockerfile' + * Add a blob cache implementation + * vendor: update containers/storage + * Update for sysregistriesv2 API changes + * Update containers/image to 63a1cbdc5e6537056695cf0d627c0a33b334df53 + * clean up makefile variables + * Fix file permission + * Complete the instructions for the command + * Show warning when a build arg not used + * Assume user 0 group 0, if /etc/passwd file in container. + * Add buildah info command + * Enable -q when --filter is used for images command + * Add v1.5 Release Announcement + * Fix dangling filter for images command + * Fix completions to print Names as well as IDs + * tests: Fix file permissions + * Bump 1.6-dev + +- Changelog for v1.5-1 (2018-11-21) + * Bump min go to 1.10 in install.md + * vendor: update ostree-go + * Update docker build command line in conformance test + * Print command in SystemExec as debug information + * Add some skip word for inspect check in conformance test + * Update regex for multi stage base test + * Sort CLI flags + * vendor: update containers/storage + * Add note to install about non-root on RHEL/CentOS + * Update imagebuild depdency to support heading ARGs in Dockerfile + * rootless: do not specify --rootless to the OCI runtime + * Export resolvesymlink function + * Exclude --force-rm from common bud cli flags + * run: bind mount /etc/hosts and /etc/resolv.conf if not in a volume + * rootless: use slirp4netns to setup the network namespace + * Instructions for completing the pull command + * Fix travis to not run environment variable patch + * rootless: only discard network configuration names + * run: only set up /etc/hosts or /etc/resolv.conf with network + * common: getFormat: match entire string not only the prefix + * vendor: update libpod + * Change validation EPOCH + * Fixing broken link for container-registries.conf + * Restore rootless isolation test for from volume ro test + * ostree: fix tag for build constraint + * Handle directories better in bud -f + * vndr in latest containers/storage + * Fix unshare gofmt issue + * runSetupBuiltinVolumes(): break up volume setup + * common: support a per-user registries conf file + * unshare: do not override the configuration + * common: honor the rootless configuration file + * unshare: create a new mount namespace + * unshare: support libpod rootless pkg + * Use libpod GetDefaultStorage to report proper storage config + * Allow container storage to manage the SELinux labels + * Resolve image names with default transport in from command + * run: When the value of isolation is set, use the set value instead of the default value. + * Vendor in latest containers/storage and opencontainers/selinux + * Remove no longer valid todo + * Check for empty buildTime in version + * Change gofmt so it runs on all but 1.10 + * Run gofmt only on Go 1.11 + * Walk symlinks when checking cached images for copied/added files + * ReserveSELinuxLabels(): handle wrapped errors from OpenBuilder + * Set WorkingDir to empty, not / for conformance + * Update calls in e2e to addres 1101 + * imagebuilder.BuildDockerfiles: return the image ID + * Update for changes in the containers/image API + * bump(github.com/containers/image) + * Allow setting --no-pivot default with an env var + * Add man page and bash completion, for --no-pivot + * Add the --no-pivot flag to the run command + * Improve reporting about individual pull failures + * Move the "short name but no search registries" error handling to resolveImage + * Return a "search registries were needed but empty" indication in util.ResolveName + * Simplify handling of the "tried to pull an image but found nothing" case in newBuilder + * Don't even invoke the pull loop if options.FromImage == "" + * Eliminate the long-running ref and img variables in resolveImage + * In resolveImage, return immediately on success + * Fix From As in Dockerfile + * Vendor latest containers/image + * Vendor in latest libpod + * Sort CLI flags of buildah bud + * Change from testing with golang 1.9 to 1.11. + * unshare: detect when unprivileged userns are disabled + * Optimize redundant code + * fix missing format param + * chroot: fix the args check + * imagebuildah: make ResolveSymLink public + * Update copy chown test + * buildah: use the same logic for XDG_RUNTIME_DIR as podman + * V1.4 Release Announcement + * Podman --privileged selinux is broken + * papr: mount source at gopath + * parse: Modify the return value + * parse: modify the verification of the isolation value + * Make sure we log or return every error + * pullImage(): when completing an image name, try docker:// + * Fix up Tutorial 3 to account for format + * Vendor in latest containers/storage and containers/image + * docs/tutorials/01-intro.md: enhanced installation instructions + * Enforce "blocked" for registries for the "docker" transport + * Correctly set DockerInsecureSkipTLSVerify when pulling images + * chroot: set up seccomp and capabilities after supplemental groups + * chroot: fix capabilities list setup and application + * .papr.yml: log the podman version + * namespaces.bats: fix handling of uidmap/gidmap options in pairs + * chroot: only create user namespaces when we know we need them + * Check /proc/sys/user/max_user_namespaces on unshare(NEWUSERNS) + * bash/buildah: add isolation option to the from command + +- Changelog for v1.4 (2018-10-02) + * from: fix isolation option + * Touchup pull manpage + * Export buildah ReserveSELinuxLables so podman can use it + * Add buildah.io to README.md and doc fixes + * Update rmi man for prune changes + * Ignore file not found removal error in bud + * bump(github.com/containers/{storage,image}) + * NewImageSource(): only create one Diff() at a time + * Copy ExposedPorts from base image into the config + * tests: run conformance test suite in Travis + * Change rmi --prune to not accept an imageID + * Clear intermediate container IDs after each stage + * Request podman version for build issues + * unshare: keep the additional groups of the user + * Builtin volumes should be owned by the UID/GID of the container + * Get rid of dangling whitespace in markdown files + * Move buildah from projecatatomic/buildah to containers/buildah + * nitpick: parse.validateFlags loop in bud cli + * bash: Completion options + * Add signature policy to push tests + * vendor in latest containers/image + * Fix grammar in Container Tools Guide + * Don't build btrfs if it is not installed + * new: Return image-pulling errors from resolveImage + * pull: Return image-pulling errors from pullImage + * Add more volume mount tests + * chroot: create missing parent directories for volume mounts + * Push: Allow an empty destination + * Add Podman relationship to readme, create container tools guide + * Fix arg usage in buildah-tag + * Add flags/arguments order verification to other commands + * Handle ErrDuplicateName errors from store.CreateContainer() + * Evaluate symbolic links on Add/Copy Commands + * Vendor in latest containers/image and containers/storage + * Retain bounding set when running containers as non root + * run container-diff tests in Travis + * buildah-images.md: Fix option contents + * push: show image digest after push succeed + * Vendor in latest containers/storage,image,libpod and runc + * Change references to cri-o to point at new repository + * Exclude --layers from the common bug cli flags + * demos: Increase the executable permissions + * run: clear default seccomp filter if not enabled + * Bump maximum cyclomatic complexity to 45 + * stdin: on HUP, read everything + * nitpick: use tabs in tests/helpers.bash + * Add flags/arguments order verification to one arg commands + * nitpick: decrease cognitive complexity in buildah-bud + * rename: Avoid renaming the same name as other containers + * chroot isolation: chroot() before setting up seccomp + * Small nitpick at the "if" condition in tag.go + * cmd/images: Modify json option + * cmd/images: Disallow the input of image when using the -a option + * Fix examples to include context directory + * Update containers/image to fix commit layer issue + * cmd/containers: End loop early when using the json option + * Make buildah-from error message clear when flags are after arg + * Touch up README.md for conformance tests + * Update container/storage for lock fix + * cmd/rm: restore the correct containerID display + * Remove debug lines + * Remove docker build image after each test + * Add README for conformance test + * Update the MakeOptions to accept all command options for buildah + * Update regrex to fit the docker output in test "run with JSON" + * cmd/buildah: Remove redundant variable declarations + * Warn about using Commands in Dockerfile that are not supported by OCI. + * Add buildah bud conformance test + * Fix rename to also change container name in builder + * Makefile: use $(GO) env-var everywhere + * Cleanup code to more closely match Docker Build images + * Document BUILDAH_* environment variables in buildah bud --help output + * Return error immediately if error occurs in Prepare step + * Fix --layers ADD from url issue + * Add "Sign your PRs" TOC item to contributing.md. + * Display the correct ID after deleting image + * rmi: Modify the handling of errors + * Let util.ResolveName() return parsing errors + * Explain Open Container Initiative (OCI) acronym, add link + * Update vendor for urfave/cli back to master + * Handle COPY --chown in Dockerfile + * Switch to Recommends container-selinux + * Update vendor for containernetworking, imagebuildah and podman + * Document STORAGE_DRIVER and STORAGE_OPTS environment variable + * Change references to projectatomic/libpod to containers/libpod + * Add container PATH retrieval example + * Expand variables names for --env + * imagebuildah: provide a way to provide stdin for RUN + * Remove an unused srcRef.NewImageSource in pullImage + * chroot: correct a comment + * chroot: bind mount an empty directory for masking + * Don't bother with --no-pivot for rootless isolation + * CentOS need EPEL repo + * Export a Pull() function + * Remove stream options, since docker build does not have it + * release v1.3: mention openSUSE + * Add Release Announcements directory + * Bump to v1.4-dev + +- Changelog for v1.3 (2018-08-04) + * Revert pull error handling from 881 + * bud should not search context directory for Dockerfile + * Set BUILDAH_ISOLATION=rootless when running unprivileged + * .papr.sh: Also test with BUILDAH_ISOLATION=rootless + * Skip certain tests when we're using "rootless" isolation + * .travis.yml: run integration tests with BUILDAH_ISOLATION=chroot + * Add and implement IsolationOCIRootless + * Add a value for IsolationOCIRootless + * Fix rmi to remove intermediate images associated with an image + * Return policy error on pull + * Update containers/image to 216acb1bcd2c1abef736ee322e17147ee2b7d76c + * Switch to github.com/containers/image/pkg/sysregistriesv2 + * unshare: make adjusting the OOM score optional + * Add flags validation + * chroot: handle raising process limits + * chroot: make the resource limits name map module-global + * Remove rpm.bats, we need to run this manually + * Set the default ulimits to match Docker + * buildah: no args is out of bounds + * unshare: error message missed the pid + * preprocess ".in" suffixed Dockerfiles + * Fix the the in buildah-config man page + * Only test rpmbuild on latest fedora + * Add support for multiple Short options + * Update to latest urvave/cli + * Add additional SELinux tests + * Vendor in latest github.com/containers/{image;storage} + * Stop testing with golang 1.8 + * Fix volume cache issue with buildah bud --layers + * Create buildah pull command + * Increase the deadline for gometalinter during 'make validate' + * .papr.sh: Also test with BUILDAH_ISOLATION=chroot + * .travis.yml: run integration tests with BUILDAH_ISOLATION=chroot + * Add a Dockerfile + * Set BUILDAH_ISOLATION=chroot when running unprivileged + * Add and implement IsolationChroot + * Update github.com/opencontainers/runc + * maybeReexecUsingUserNamespace: add a default for root + * Allow ping command without NET_RAW Capabilities + * rmi.storageImageID: fix Wrapf format warning + * Allow Dockerfile content to come from stdin + * Vendor latest container/storage to fix overlay mountopt + * userns: assign additional IDs sequentially + * Remove default dev/pts + * Add OnBuild test to baseline test + * tests/run.bats(volumes): use :z when SELinux is enabled + * Avoid a stall in runCollectOutput() + * Use manifest from container/image + * Vendor in latest containers/image and containers/storage + * add rename command + * Completion command + * Update CHANGELOG.md + * Update vendor for runc to fix 32 bit builds + * bash completion: remove shebang + * Update vendor for runc to fix 32 bit builds + +- Changelog for v1.2 (2018-07-14) + * Vendor in lates containers/image + * build-using-dockerfile: let -t include transports again + * Block use of /proc/acpi and /proc/keys from inside containers + * Fix handling of --registries-conf + * Fix becoming a maintainer link + * add optional CI test fo darwin + * Don't pass a nil error to errors.Wrapf() + * image filter test: use kubernetes/pause as a "since" + * Add --cidfile option to from + * vendor: update containers/storage + * Contributors need to find the CONTRIBUTOR.md file easier + * Add a --loglevel option to build-with-dockerfile + * Create Development plan + * cmd: Code improvement + * allow buildah cross compile for a darwin target + * Add unused function param lint check + * docs: Follow man-pages(7) suggestions for SYNOPSIS + * Start using github.com/seccomp/containers-golang + * umount: add all option to umount all mounted containers + * runConfigureNetwork(): remove an unused parameter + * Update github.com/opencontainers/selinux + * Fix buildah bud --layers + * Force ownership of /etc/hosts and /etc/resolv.conf to 0:0 + * main: if unprivileged, reexec in a user namespace + * Vendor in latest imagebuilder + * Reduce the complexity of the buildah.Run function + * mount: output it before replacing lastError + * Vendor in latest selinux-go code + * Implement basic recognition of the "--isolation" option + * Run(): try to resolve non-absolute paths using $PATH + * Run(): don't include any default environment variables + * build without seccomp + * vendor in latest runtime-tools + * bind/mount_unsupported.go: remove import errors + * Update github.com/opencontainers/runc + * Add Capabilities lists to BuilderInfo + * Tweaks for commit tests + * commit: recognize committing to second storage locations + * Fix ARGS parsing for run commands + * Add info on registries.conf to from manpage + * Switch from using docker to podman for testing in .papr + * buildah: set the HTTP User-Agent + * ONBUILD tutorial + * Add information about the configuration files to the install docs + * Makefile: add uninstall + * Add tilde info for push to troubleshooting + * mount: support multiple inputs + * Use the right formatting when adding entries to /etc/hosts + * Vendor in latest go-selinux bindings + * Allow --userns-uid-map/--userns-gid-map to be global options + * bind: factor out UnmountMountpoints + * Run(): simplify runCopyStdio() + * Run(): handle POLLNVAL results + * Run(): tweak terminal mode handling + * Run(): rename 'copyStdio' to 'copyPipes' + * Run(): don't set a Pdeathsig for the runtime + * Run(): add options for adding and removing capabilities + * Run(): don't use a callback when a slice will do + * setupSeccomp(): refactor + * Change RunOptions.Stdin/Stdout/Stderr to just be Reader/Writers + * Escape use of '_' in .md docs + * Break out getProcIDMappings() + * Break out SetupIntermediateMountNamespace() + * Add Multi From Demo + * Use the c/image conversion code instead of converting configs manually + * Don't throw away the manifest MIME type and guess again + * Consolidate loading manifest and config in initConfig + * Pass a types.Image to Builder.initConfig + * Require an image ID in importBuilderDataFromImage + * Use c/image/manifest.GuessMIMEType instead of a custom heuristic + * Do not ignore any parsing errors in initConfig + * Explicitly handle "from scratch" images in Builder.initConfig + * Fix parsing of OCI images + * Simplify dead but dangerous-looking error handling + * Don't ignore v2s1 history if docker_version is not set + * Add --rm and --force-rm to buildah bud + * Add --all,-a flag to buildah images + * Separate stdio buffering from writing + * Remove tty check from images --format + * Add environment variable BUILDAH_RUNTIME + * Add --layers and --no-cache to buildah bud + * Touch up images man + * version.md: fix DESCRIPTION + * tests: add containers test + * tests: add images test + * images: fix usage + * fix make clean error + * Change 'registries' to 'container registries' in man + * add commit test + * Add(): learn to record hashes of what we add + * Minor update to buildah config documentation for entrypoint + * Bump to v1.2-dev + * Add registries.conf link to a few man pages + +- Changelog for v1.1 (2018-06-08) + * Drop capabilities if running container processes as non root + * Print Warning message if cmd will not be used based on entrypoint + * Update 01-intro.md + * Shouldn't add insecure registries to list of search registries + * Report errors on bad transports specification when pushing images + * Move parsing code out of common for namespaces and into pkg/parse.go + * Add disable-content-trust noop flag to bud + * Change freenode chan to buildah + * runCopyStdio(): don't close stdin unless we saw POLLHUP + * Add registry errors for pull + * runCollectOutput(): just read until the pipes are closed on us + * Run(): provide redirection for stdio + * rmi, rm: add test + * add mount test + * Add parameter judgment for commands that do not require parameters + * Add context dir to bud command in baseline test + * run.bats: check that we can run with symlinks in the bundle path + * Give better messages to users when image can not be found + * use absolute path for bundlePath + * Add environment variable to buildah --format + * rm: add validation to args and all option + * Accept json array input for config entrypoint + * Run(): process RunOptions.Mounts, and its flags + * Run(): only collect error output from stdio pipes if we created some + * Add OnBuild support for Dockerfiles + * Quick fix on demo readme + * run: fix validate flags + * buildah bud should require a context directory or URL + * Touchup tutorial for run changes + * Validate common bud and from flags + * images: Error if the specified imagename does not exist + * inspect: Increase err judgments to avoid panic + * add test to inspect + * buildah bud picks up ENV from base image + * Extend the amount of time travis_wait should wait + * Add a make target for Installing CNI plugins + * Add tests for namespace control flags + * copy.bats: check ownerships in the container + * Fix SELinux test errors when SELinux is enabled + * Add example CNI configurations + * Run: set supplemental group IDs + * Run: use a temporary mount namespace + * Use CNI to configure container networks + * add/secrets/commit: Use mappings when setting permissions on added content + * Add CLI options for specifying namespace and cgroup setup + * Always set mappings when using user namespaces + * Run(): break out creation of stdio pipe descriptors + * Read UID/GID mapping information from containers and images + * Additional bud CI tests + * Run integration tests under travis_wait in Travis + * build-using-dockerfile: add --annotation + * Implement --squash for build-using-dockerfile and commit + * Vendor in latest container/storage for devicemapper support + * add test to inspect + * Vendor github.com/onsi/ginkgo and github.com/onsi/gomega + * Test with Go 1.10, too + * Add console syntax highlighting to troubleshooting page + * bud.bats: print "$output" before checking its contents + * Manage "Run" containers more closely + * Break Builder.Run()'s "run runc" bits out + * util.ResolveName(): handle completion for tagged/digested image names + * Handle /etc/hosts and /etc/resolv.conf properly in container + * Documentation fixes + * Make it easier to parse our temporary directory as an image name + * Makefile: list new pkg/ subdirectoris as dependencies for buildah + * containerImageSource: return more-correct errors + * API cleanup: PullPolicy and TerminalPolicy should be types + * Make "run --terminal" and "run -t" aliases for "run --tty" + * Vendor github.com/containernetworking/cni v0.6.0 + * Update github.com/containers/storage + * Update github.com/projectatomic/libpod + * Add support for buildah bud --label + * buildah push/from can push and pull images with no reference + * Vendor in latest containers/image + * Update gometalinter to fix install.tools error + * Update troubleshooting with new run workaround + * Added a bud demo and tidied up + * Attempt to download file from url, if fails assume Dockerfile + * Add buildah bud CI tests for ENV variables + * Re-enable rpm .spec version check and new commit test + * Update buildah scratch demo to support el7 + * Added Docker compatibility demo + * Update to F28 and new run format in baseline test + * Touchup man page short options across man pages + * Added demo dir and a demo. chged distrorlease + * builder-inspect: fix format option + * Add cpu-shares short flag (-c) and cpu-shares CI tests + * Minor fixes to formatting in rpm spec changelog + * Fix rpm .spec changelog formatting + * CI tests and minor fix for cache related noop flags + * buildah-from: add effective value to mount propagation + +- Changelog for v1.0 (2018-05-06) + * Declare Buildah 1.0 + * Add cache-from and no-cache noops, and fix doco + * Update option and documentation for --force-rm + * Adding noop for --force-rm to match --rm + * Add buildah bud ENTRYPOINT,CMD,RUN tests + * Adding buildah bud RUN test scenarios + * Extend tests for empty buildah run command + * Fix formatting error in run.go + * Update buildah run to make command required + * Expanding buildah run cmd/entrypoint tests + * Update test cases for buildah run behaviour + * Remove buildah run cmd and entrypoint execution + * Add Files section with registries.conf to pertinent man pages + * tests/config: perfect test + * tests/from: add name test + * Do not print directly to stdout in Commit() + * Touch up auth test commands + * Force "localhost" as a default registry + * Drop util.GetLocalTime() + * Vendor in latest containers/image + * Validate host and container paths passed to --volume + * test/from: add add-host test + * Add --compress, --rm, --squash flags as a noop for bud + * Add FIPS mode secret to buildah run and bud + * Add config --comment/--domainname/--history-comment/--hostname + * 'buildah config': stop replacing Created-By whenever it's not specified + * Modify man pages so they compile correctly in mandb + * Add description on how to do --isolation to buildah-bud man page + * Add support for --iidfile to bud and commit + * Refactor buildah bud for vendoring + * Fail if date or git not installed + * Revert update of entrypoint behaviour to match docker + * Vendor in latest imagebuilder code to fix multiple stage builds + * Add /bin/sh -c to entrypoint in config + * image_test: Improve the test + * Fix README example of buildah config + * buildah-image: add validation to 'format' + * Simple changes to allow buildah to pass make validate + * Clarify the use of buildah config options + * containers_test: Perfect testing + * buildah images and podman images are listing different sizes + * buildah-containers: add tests and example to the man page + * buildah-containers: add validation to 'format' + * Clarify the use of buildah config options + * Minor fix for lighttpd example in README + * Add tls-verification to troubleshooting + * Modify buildah rmi to account for changes in containers/storage + * Vendor in latest containers/image and containers/storage + * addcopy: add src validation + * Remove tarball as an option from buildah push --help + * Fix secrets patch + * Update entrypoint behaviour to match docker + * Display imageId after commit + * config: add support for StopSignal + * Fix docker login issue in travis.yml + * Allow referencing stages as index and names + * Add multi-stage builds tests + * Add multi-stage builds support + * Add accessor functions for comment and stop signal + * Vendor in latest imagebuilder, to get mixed case AS support + * Allow umount to have multi-containers + * Update buildah push doc + * buildah bud walks symlinks + * Imagename is required for commit atm, update manpage + +- Changelog for v0.16.0 (2018-04-08) + * Bump to v0.16.0 + * Remove requires for ostree-lib in rpm spec file + * Add support for shell + * buildah.spec should require ostree-libs + * Vendor in latest containers/image + * bash: prefer options + * Change image time to locale, add troubleshooting.md, add logo to other mds + * buildah-run.md: fix error SYNOPSIS + * docs: fix error example + * Allow --cmd parameter to have commands as values + * Touchup README to re-enable logo + * Clean up README.md + * Make default-mounts-file a hidden option + * Document the mounts.conf file + * Fix man pages to format correctly + * Add various transport support to buildah from + * Add unit tests to run.go + * If the user overrides the storage driver, the options should be dropped + * Show Config/Manifest as JSON string in inspect when format is not set + * Switch which for that in README.md + * Remove COPR + * Fix wrong order of parameters + * Vendor in latest containers/image + * Remove shallowCopy(), which shouldn't be saving us time any more + * shallowCopy: avoid a second read of the container's layer diff --git a/vendor/github.com/containers/buildah/chroot/run.go b/vendor/github.com/containers/buildah/chroot/run.go new file mode 100644 index 00000000000..dcfbd0f2474 --- /dev/null +++ b/vendor/github.com/containers/buildah/chroot/run.go @@ -0,0 +1,1421 @@ +// +build linux + +package chroot + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" + "syscall" + "time" + "unsafe" + + "github.com/containers/buildah/bind" + "github.com/containers/buildah/copier" + "github.com/containers/buildah/util" + "github.com/containers/storage/pkg/ioutils" + "github.com/containers/storage/pkg/mount" + "github.com/containers/storage/pkg/reexec" + "github.com/containers/storage/pkg/unshare" + "github.com/opencontainers/runc/libcontainer/apparmor" + "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/syndtr/gocapability/capability" + "golang.org/x/sys/unix" + "golang.org/x/term" +) + +const ( + // runUsingChrootCommand is a command we use as a key for reexec + runUsingChrootCommand = "buildah-chroot-runtime" + // runUsingChrootExec is a command we use as a key for reexec + runUsingChrootExecCommand = "buildah-chroot-exec" +) + +var ( + rlimitsMap = map[string]int{ + "RLIMIT_AS": unix.RLIMIT_AS, + "RLIMIT_CORE": unix.RLIMIT_CORE, + "RLIMIT_CPU": unix.RLIMIT_CPU, + "RLIMIT_DATA": unix.RLIMIT_DATA, + "RLIMIT_FSIZE": unix.RLIMIT_FSIZE, + "RLIMIT_LOCKS": unix.RLIMIT_LOCKS, + "RLIMIT_MEMLOCK": unix.RLIMIT_MEMLOCK, + "RLIMIT_MSGQUEUE": unix.RLIMIT_MSGQUEUE, + "RLIMIT_NICE": unix.RLIMIT_NICE, + "RLIMIT_NOFILE": unix.RLIMIT_NOFILE, + "RLIMIT_NPROC": unix.RLIMIT_NPROC, + "RLIMIT_RSS": unix.RLIMIT_RSS, + "RLIMIT_RTPRIO": unix.RLIMIT_RTPRIO, + "RLIMIT_RTTIME": unix.RLIMIT_RTTIME, + "RLIMIT_SIGPENDING": unix.RLIMIT_SIGPENDING, + "RLIMIT_STACK": unix.RLIMIT_STACK, + } + rlimitsReverseMap = map[int]string{} +) + +func init() { + reexec.Register(runUsingChrootCommand, runUsingChrootMain) + reexec.Register(runUsingChrootExecCommand, runUsingChrootExecMain) + for limitName, limitNumber := range rlimitsMap { + rlimitsReverseMap[limitNumber] = limitName + } +} + +type runUsingChrootSubprocOptions struct { + Spec *specs.Spec + BundlePath string + UIDMappings []syscall.SysProcIDMap + GIDMappings []syscall.SysProcIDMap +} + +type runUsingChrootExecSubprocOptions struct { + Spec *specs.Spec + BundlePath string +} + +// RunUsingChroot runs a chrooted process, using some of the settings from the +// passed-in spec, and using the specified bundlePath to hold temporary files, +// directories, and mountpoints. +func RunUsingChroot(spec *specs.Spec, bundlePath, homeDir string, stdin io.Reader, stdout, stderr io.Writer) (err error) { + var confwg sync.WaitGroup + var homeFound bool + for _, env := range spec.Process.Env { + if strings.HasPrefix(env, "HOME=") { + homeFound = true + break + } + } + if !homeFound { + spec.Process.Env = append(spec.Process.Env, fmt.Sprintf("HOME=%s", homeDir)) + } + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + // Write the runtime configuration, mainly for debugging. + specbytes, err := json.Marshal(spec) + if err != nil { + return err + } + if err = ioutils.AtomicWriteFile(filepath.Join(bundlePath, "config.json"), specbytes, 0600); err != nil { + return errors.Wrapf(err, "error storing runtime configuration") + } + logrus.Debugf("config = %v", string(specbytes)) + + // Default to using stdin/stdout/stderr if we weren't passed objects to use. + if stdin == nil { + stdin = os.Stdin + } + if stdout == nil { + stdout = os.Stdout + } + if stderr == nil { + stderr = os.Stderr + } + + // Create a pipe for passing configuration down to the next process. + preader, pwriter, err := os.Pipe() + if err != nil { + return errors.Wrapf(err, "error creating configuration pipe") + } + config, conferr := json.Marshal(runUsingChrootSubprocOptions{ + Spec: spec, + BundlePath: bundlePath, + }) + if conferr != nil { + return errors.Wrapf(conferr, "error encoding configuration for %q", runUsingChrootCommand) + } + + // Set our terminal's mode to raw, to pass handling of special + // terminal input to the terminal in the container. + if spec.Process.Terminal && term.IsTerminal(unix.Stdin) { + state, err := term.MakeRaw(unix.Stdin) + if err != nil { + logrus.Warnf("error setting terminal state: %v", err) + } else { + defer func() { + if err = term.Restore(unix.Stdin, state); err != nil { + logrus.Errorf("unable to restore terminal state: %v", err) + } + }() + } + } + + // Raise any resource limits that are higher than they are now, before + // we drop any more privileges. + if err = setRlimits(spec, false, true); err != nil { + return err + } + + // Start the grandparent subprocess. + cmd := unshare.Command(runUsingChrootCommand) + cmd.Stdin, cmd.Stdout, cmd.Stderr = stdin, stdout, stderr + cmd.Dir = "/" + cmd.Env = []string{fmt.Sprintf("LOGLEVEL=%d", logrus.GetLevel())} + + logrus.Debugf("Running %#v in %#v", cmd.Cmd, cmd) + confwg.Add(1) + go func() { + _, conferr = io.Copy(pwriter, bytes.NewReader(config)) + pwriter.Close() + confwg.Done() + }() + cmd.ExtraFiles = append([]*os.File{preader}, cmd.ExtraFiles...) + err = cmd.Run() + confwg.Wait() + if err == nil { + return conferr + } + return err +} + +// main() for grandparent subprocess. Its main job is to shuttle stdio back +// and forth, managing a pseudo-terminal if we want one, for our child, the +// parent subprocess. +func runUsingChrootMain() { + var options runUsingChrootSubprocOptions + + runtime.LockOSThread() + + // Set logging. + if level := os.Getenv("LOGLEVEL"); level != "" { + if ll, err := strconv.Atoi(level); err == nil { + logrus.SetLevel(logrus.Level(ll)) + } + os.Unsetenv("LOGLEVEL") + } + + // Unpack our configuration. + confPipe := os.NewFile(3, "confpipe") + if confPipe == nil { + fmt.Fprintf(os.Stderr, "error reading options pipe\n") + os.Exit(1) + } + defer confPipe.Close() + if err := json.NewDecoder(confPipe).Decode(&options); err != nil { + fmt.Fprintf(os.Stderr, "error decoding options: %v\n", err) + os.Exit(1) + } + + if options.Spec == nil || options.Spec.Process == nil { + fmt.Fprintf(os.Stderr, "invalid options spec in runUsingChrootMain\n") + os.Exit(1) + } + + // Prepare to shuttle stdio back and forth. + rootUID32, rootGID32, err := util.GetHostRootIDs(options.Spec) + if err != nil { + logrus.Errorf("error determining ownership for container stdio") + os.Exit(1) + } + rootUID := int(rootUID32) + rootGID := int(rootGID32) + relays := make(map[int]int) + closeOnceRunning := []*os.File{} + var ctty *os.File + var stdin io.Reader + var stdinCopy io.WriteCloser + var stdout io.Writer + var stderr io.Writer + fdDesc := make(map[int]string) + if options.Spec.Process.Terminal { + // Create a pseudo-terminal -- open a copy of the master side. + ptyMasterFd, err := unix.Open("/dev/ptmx", os.O_RDWR, 0600) + if err != nil { + logrus.Errorf("error opening PTY master using /dev/ptmx: %v", err) + os.Exit(1) + } + // Set the kernel's lock to "unlocked". + locked := 0 + if result, _, err := unix.Syscall(unix.SYS_IOCTL, uintptr(ptyMasterFd), unix.TIOCSPTLCK, uintptr(unsafe.Pointer(&locked))); int(result) == -1 { + logrus.Errorf("error unlocking PTY descriptor: %v", err) + os.Exit(1) + } + // Get a handle for the other end. + ptyFd, _, err := unix.Syscall(unix.SYS_IOCTL, uintptr(ptyMasterFd), unix.TIOCGPTPEER, unix.O_RDWR|unix.O_NOCTTY) + if int(ptyFd) == -1 { + if errno, isErrno := err.(syscall.Errno); !isErrno || (errno != syscall.EINVAL && errno != syscall.ENOTTY) { + logrus.Errorf("error getting PTY descriptor: %v", err) + os.Exit(1) + } + // EINVAL means the kernel's too old to understand TIOCGPTPEER. Try TIOCGPTN. + ptyN, err := unix.IoctlGetInt(ptyMasterFd, unix.TIOCGPTN) + if err != nil { + logrus.Errorf("error getting PTY number: %v", err) + os.Exit(1) + } + ptyName := fmt.Sprintf("/dev/pts/%d", ptyN) + fd, err := unix.Open(ptyName, unix.O_RDWR|unix.O_NOCTTY, 0620) + if err != nil { + logrus.Errorf("error opening PTY %q: %v", ptyName, err) + os.Exit(1) + } + ptyFd = uintptr(fd) + } + // Make notes about what's going where. + relays[ptyMasterFd] = unix.Stdout + relays[unix.Stdin] = ptyMasterFd + fdDesc[ptyMasterFd] = "container terminal" + fdDesc[unix.Stdin] = "stdin" + fdDesc[unix.Stdout] = "stdout" + winsize := &unix.Winsize{} + // Set the pseudoterminal's size to the configured size, or our own. + if options.Spec.Process.ConsoleSize != nil { + // Use configured sizes. + winsize.Row = uint16(options.Spec.Process.ConsoleSize.Height) + winsize.Col = uint16(options.Spec.Process.ConsoleSize.Width) + } else { + if term.IsTerminal(unix.Stdin) { + // Use the size of our terminal. + winsize, err = unix.IoctlGetWinsize(unix.Stdin, unix.TIOCGWINSZ) + if err != nil { + logrus.Debugf("error reading current terminal's size") + winsize.Row = 0 + winsize.Col = 0 + } + } + } + if winsize.Row != 0 && winsize.Col != 0 { + if err = unix.IoctlSetWinsize(int(ptyFd), unix.TIOCSWINSZ, winsize); err != nil { + logrus.Warnf("error setting terminal size for pty") + } + // FIXME - if we're connected to a terminal, we should + // be passing the updated terminal size down when we + // receive a SIGWINCH. + } + // Open an *os.File object that we can pass to our child. + ctty = os.NewFile(ptyFd, "/dev/tty") + // Set ownership for the PTY. + if err = ctty.Chown(rootUID, rootGID); err != nil { + var cttyInfo unix.Stat_t + err2 := unix.Fstat(int(ptyFd), &cttyInfo) + from := "" + op := "setting" + if err2 == nil { + op = "changing" + from = fmt.Sprintf("from %d/%d ", cttyInfo.Uid, cttyInfo.Gid) + } + logrus.Warnf("error %s ownership of container PTY %sto %d/%d: %v", op, from, rootUID, rootGID, err) + } + // Set permissions on the PTY. + if err = ctty.Chmod(0620); err != nil { + logrus.Errorf("error setting permissions of container PTY: %v", err) + os.Exit(1) + } + // Make a note that our child (the parent subprocess) should + // have the PTY connected to its stdio, and that we should + // close it once it's running. + stdin = ctty + stdout = ctty + stderr = ctty + closeOnceRunning = append(closeOnceRunning, ctty) + } else { + // Create pipes for stdio. + stdinRead, stdinWrite, err := os.Pipe() + if err != nil { + logrus.Errorf("error opening pipe for stdin: %v", err) + } + stdoutRead, stdoutWrite, err := os.Pipe() + if err != nil { + logrus.Errorf("error opening pipe for stdout: %v", err) + } + stderrRead, stderrWrite, err := os.Pipe() + if err != nil { + logrus.Errorf("error opening pipe for stderr: %v", err) + } + // Make notes about what's going where. + relays[unix.Stdin] = int(stdinWrite.Fd()) + relays[int(stdoutRead.Fd())] = unix.Stdout + relays[int(stderrRead.Fd())] = unix.Stderr + fdDesc[int(stdinWrite.Fd())] = "container stdin pipe" + fdDesc[int(stdoutRead.Fd())] = "container stdout pipe" + fdDesc[int(stderrRead.Fd())] = "container stderr pipe" + fdDesc[unix.Stdin] = "stdin" + fdDesc[unix.Stdout] = "stdout" + fdDesc[unix.Stderr] = "stderr" + // Set ownership for the pipes. + if err = stdinRead.Chown(rootUID, rootGID); err != nil { + logrus.Errorf("error setting ownership of container stdin pipe: %v", err) + os.Exit(1) + } + if err = stdoutWrite.Chown(rootUID, rootGID); err != nil { + logrus.Errorf("error setting ownership of container stdout pipe: %v", err) + os.Exit(1) + } + if err = stderrWrite.Chown(rootUID, rootGID); err != nil { + logrus.Errorf("error setting ownership of container stderr pipe: %v", err) + os.Exit(1) + } + // Make a note that our child (the parent subprocess) should + // have the pipes connected to its stdio, and that we should + // close its ends of them once it's running. + stdin = stdinRead + stdout = stdoutWrite + stderr = stderrWrite + closeOnceRunning = append(closeOnceRunning, stdinRead, stdoutWrite, stderrWrite) + stdinCopy = stdinWrite + defer stdoutRead.Close() + defer stderrRead.Close() + } + for readFd, writeFd := range relays { + if err := unix.SetNonblock(readFd, true); err != nil { + logrus.Errorf("error setting descriptor %d (%s) non-blocking: %v", readFd, fdDesc[readFd], err) + return + } + if err := unix.SetNonblock(writeFd, false); err != nil { + logrus.Errorf("error setting descriptor %d (%s) blocking: %v", relays[writeFd], fdDesc[writeFd], err) + return + } + } + if err := unix.SetNonblock(relays[unix.Stdin], true); err != nil { + logrus.Errorf("error setting %d to nonblocking: %v", relays[unix.Stdin], err) + } + go func() { + buffers := make(map[int]*bytes.Buffer) + for _, writeFd := range relays { + buffers[writeFd] = new(bytes.Buffer) + } + pollTimeout := -1 + stdinClose := false + for len(relays) > 0 { + fds := make([]unix.PollFd, 0, len(relays)) + for fd := range relays { + fds = append(fds, unix.PollFd{Fd: int32(fd), Events: unix.POLLIN | unix.POLLHUP}) + } + _, err := unix.Poll(fds, pollTimeout) + if !util.LogIfNotRetryable(err, fmt.Sprintf("poll: %v", err)) { + return + } + removeFds := make(map[int]struct{}) + for _, rfd := range fds { + if rfd.Revents&unix.POLLHUP == unix.POLLHUP { + removeFds[int(rfd.Fd)] = struct{}{} + } + if rfd.Revents&unix.POLLNVAL == unix.POLLNVAL { + logrus.Debugf("error polling descriptor %s: closed?", fdDesc[int(rfd.Fd)]) + removeFds[int(rfd.Fd)] = struct{}{} + } + if rfd.Revents&unix.POLLIN == 0 { + if stdinClose && stdinCopy == nil { + continue + } + continue + } + b := make([]byte, 8192) + nread, err := unix.Read(int(rfd.Fd), b) + util.LogIfNotRetryable(err, fmt.Sprintf("read %s: %v", fdDesc[int(rfd.Fd)], err)) + if nread > 0 { + if wfd, ok := relays[int(rfd.Fd)]; ok { + nwritten, err := buffers[wfd].Write(b[:nread]) + if err != nil { + logrus.Debugf("buffer: %v", err) + continue + } + if nwritten != nread { + logrus.Debugf("buffer: expected to buffer %d bytes, wrote %d", nread, nwritten) + continue + } + } + // If this is the last of the data we'll be able to read + // from this descriptor, read as much as there is to read. + for rfd.Revents&unix.POLLHUP == unix.POLLHUP { + nr, err := unix.Read(int(rfd.Fd), b) + util.LogIfUnexpectedWhileDraining(err, fmt.Sprintf("read %s: %v", fdDesc[int(rfd.Fd)], err)) + if nr <= 0 { + break + } + if wfd, ok := relays[int(rfd.Fd)]; ok { + nwritten, err := buffers[wfd].Write(b[:nr]) + if err != nil { + logrus.Debugf("buffer: %v", err) + break + } + if nwritten != nr { + logrus.Debugf("buffer: expected to buffer %d bytes, wrote %d", nr, nwritten) + break + } + } + } + } + if nread == 0 { + removeFds[int(rfd.Fd)] = struct{}{} + } + } + pollTimeout = -1 + for wfd, buffer := range buffers { + if buffer.Len() > 0 { + nwritten, err := unix.Write(wfd, buffer.Bytes()) + util.LogIfNotRetryable(err, fmt.Sprintf("write %s: %v", fdDesc[wfd], err)) + if nwritten >= 0 { + _ = buffer.Next(nwritten) + } + } + if buffer.Len() > 0 { + pollTimeout = 100 + } + if wfd == relays[unix.Stdin] && stdinClose && buffer.Len() == 0 { + stdinCopy.Close() + delete(relays, unix.Stdin) + } + } + for rfd := range removeFds { + if rfd == unix.Stdin { + buffer, found := buffers[relays[unix.Stdin]] + if found && buffer.Len() > 0 { + stdinClose = true + continue + } + } + if !options.Spec.Process.Terminal && rfd == unix.Stdin { + stdinCopy.Close() + } + delete(relays, rfd) + } + } + }() + + // Set up mounts and namespaces, and run the parent subprocess. + status, err := runUsingChroot(options.Spec, options.BundlePath, ctty, stdin, stdout, stderr, closeOnceRunning) + if err != nil { + fmt.Fprintf(os.Stderr, "error running subprocess: %v\n", err) + os.Exit(1) + } + + // Pass the process's exit status back to the caller by exiting with the same status. + if status.Exited() { + if status.ExitStatus() != 0 { + fmt.Fprintf(os.Stderr, "subprocess exited with status %d\n", status.ExitStatus()) + } + os.Exit(status.ExitStatus()) + } else if status.Signaled() { + fmt.Fprintf(os.Stderr, "subprocess exited on %s\n", status.Signal()) + os.Exit(1) + } +} + +// runUsingChroot, still in the grandparent process, sets up various bind +// mounts and then runs the parent process in its own user namespace with the +// necessary ID mappings. +func runUsingChroot(spec *specs.Spec, bundlePath string, ctty *os.File, stdin io.Reader, stdout, stderr io.Writer, closeOnceRunning []*os.File) (wstatus unix.WaitStatus, err error) { + var confwg sync.WaitGroup + + // Create a new mount namespace for ourselves and bind mount everything to a new location. + undoIntermediates, err := bind.SetupIntermediateMountNamespace(spec, bundlePath) + if err != nil { + return 1, err + } + defer func() { + if undoErr := undoIntermediates(); undoErr != nil { + logrus.Debugf("error cleaning up intermediate mount NS: %v", err) + } + }() + + // Bind mount in our filesystems. + undoChroots, err := setupChrootBindMounts(spec, bundlePath) + if err != nil { + return 1, err + } + defer func() { + if undoErr := undoChroots(); undoErr != nil { + logrus.Debugf("error cleaning up intermediate chroot bind mounts: %v", err) + } + }() + + // Create a pipe for passing configuration down to the next process. + preader, pwriter, err := os.Pipe() + if err != nil { + return 1, errors.Wrapf(err, "error creating configuration pipe") + } + config, conferr := json.Marshal(runUsingChrootExecSubprocOptions{ + Spec: spec, + BundlePath: bundlePath, + }) + if conferr != nil { + fmt.Fprintf(os.Stderr, "error re-encoding configuration for %q", runUsingChrootExecCommand) + os.Exit(1) + } + + // Apologize for the namespace configuration that we're about to ignore. + logNamespaceDiagnostics(spec) + + // If we have configured ID mappings, set them here so that they can apply to the child. + hostUidmap, hostGidmap, err := unshare.GetHostIDMappings("") + if err != nil { + return 1, err + } + uidmap, gidmap := spec.Linux.UIDMappings, spec.Linux.GIDMappings + if len(uidmap) == 0 { + // No UID mappings are configured for the container. Borrow our parent's mappings. + uidmap = append([]specs.LinuxIDMapping{}, hostUidmap...) + for i := range uidmap { + uidmap[i].HostID = uidmap[i].ContainerID + } + } + if len(gidmap) == 0 { + // No GID mappings are configured for the container. Borrow our parent's mappings. + gidmap = append([]specs.LinuxIDMapping{}, hostGidmap...) + for i := range gidmap { + gidmap[i].HostID = gidmap[i].ContainerID + } + } + + // Start the parent subprocess. + cmd := unshare.Command(append([]string{runUsingChrootExecCommand}, spec.Process.Args...)...) + cmd.Stdin, cmd.Stdout, cmd.Stderr = stdin, stdout, stderr + cmd.Dir = "/" + cmd.Env = []string{fmt.Sprintf("LOGLEVEL=%d", logrus.GetLevel())} + cmd.UnshareFlags = syscall.CLONE_NEWUTS | syscall.CLONE_NEWNS + requestedUserNS := false + for _, ns := range spec.Linux.Namespaces { + if ns.Type == specs.UserNamespace { + requestedUserNS = true + } + } + if len(spec.Linux.UIDMappings) > 0 || len(spec.Linux.GIDMappings) > 0 || requestedUserNS { + cmd.UnshareFlags = cmd.UnshareFlags | syscall.CLONE_NEWUSER + cmd.UidMappings = uidmap + cmd.GidMappings = gidmap + cmd.GidMappingsEnableSetgroups = true + } + if ctty != nil { + cmd.Setsid = true + cmd.Ctty = ctty + } + cmd.OOMScoreAdj = spec.Process.OOMScoreAdj + cmd.ExtraFiles = append([]*os.File{preader}, cmd.ExtraFiles...) + cmd.Hook = func(int) error { + for _, f := range closeOnceRunning { + f.Close() + } + return nil + } + + logrus.Debugf("Running %#v in %#v", cmd.Cmd, cmd) + confwg.Add(1) + go func() { + _, conferr = io.Copy(pwriter, bytes.NewReader(config)) + pwriter.Close() + confwg.Done() + }() + err = cmd.Run() + confwg.Wait() + if err != nil { + if exitError, ok := err.(*exec.ExitError); ok { + if waitStatus, ok := exitError.ProcessState.Sys().(syscall.WaitStatus); ok { + if waitStatus.Exited() { + if waitStatus.ExitStatus() != 0 { + fmt.Fprintf(os.Stderr, "subprocess exited with status %d\n", waitStatus.ExitStatus()) + } + os.Exit(waitStatus.ExitStatus()) + } else if waitStatus.Signaled() { + fmt.Fprintf(os.Stderr, "subprocess exited on %s\n", waitStatus.Signal()) + os.Exit(1) + } + } + } + fmt.Fprintf(os.Stderr, "process exited with error: %v", err) + os.Exit(1) + } + + return 0, nil +} + +// main() for parent subprocess. Its main job is to try to make our +// environment look like the one described by the runtime configuration blob, +// and then launch the intended command as a child. +func runUsingChrootExecMain() { + args := os.Args[1:] + var options runUsingChrootExecSubprocOptions + var err error + + runtime.LockOSThread() + + // Set logging. + if level := os.Getenv("LOGLEVEL"); level != "" { + if ll, err := strconv.Atoi(level); err == nil { + logrus.SetLevel(logrus.Level(ll)) + } + os.Unsetenv("LOGLEVEL") + } + + // Unpack our configuration. + confPipe := os.NewFile(3, "confpipe") + if confPipe == nil { + fmt.Fprintf(os.Stderr, "error reading options pipe\n") + os.Exit(1) + } + defer confPipe.Close() + if err := json.NewDecoder(confPipe).Decode(&options); err != nil { + fmt.Fprintf(os.Stderr, "error decoding options: %v\n", err) + os.Exit(1) + } + + // Set the hostname. We're already in a distinct UTS namespace and are admins in the user + // namespace which created it, so we shouldn't get a permissions error, but seccomp policy + // might deny our attempt to call sethostname() anyway, so log a debug message for that. + if options.Spec == nil || options.Spec.Process == nil { + fmt.Fprintf(os.Stderr, "invalid options spec passed in\n") + os.Exit(1) + } + + if options.Spec.Hostname != "" { + if err := unix.Sethostname([]byte(options.Spec.Hostname)); err != nil { + logrus.Debugf("failed to set hostname %q for process: %v", options.Spec.Hostname, err) + } + } + + // Try to chroot into the root. Do this before we potentially block the syscall via the + // seccomp profile. + var oldst, newst unix.Stat_t + if err := unix.Stat(options.Spec.Root.Path, &oldst); err != nil { + fmt.Fprintf(os.Stderr, "error stat()ing intended root directory %q: %v\n", options.Spec.Root.Path, err) + os.Exit(1) + } + if err := unix.Chdir(options.Spec.Root.Path); err != nil { + fmt.Fprintf(os.Stderr, "error chdir()ing to intended root directory %q: %v\n", options.Spec.Root.Path, err) + os.Exit(1) + } + if err := unix.Chroot(options.Spec.Root.Path); err != nil { + fmt.Fprintf(os.Stderr, "error chroot()ing into directory %q: %v\n", options.Spec.Root.Path, err) + os.Exit(1) + } + if err := unix.Stat("/", &newst); err != nil { + fmt.Fprintf(os.Stderr, "error stat()ing current root directory: %v\n", err) + os.Exit(1) + } + if oldst.Dev != newst.Dev || oldst.Ino != newst.Ino { + fmt.Fprintf(os.Stderr, "unknown error chroot()ing into directory %q: %v\n", options.Spec.Root.Path, err) + os.Exit(1) + } + logrus.Debugf("chrooted into %q", options.Spec.Root.Path) + + // not doing because it's still shared: creating devices + // not doing because it's not applicable: setting annotations + // not doing because it's still shared: setting sysctl settings + // not doing because cgroupfs is read only: configuring control groups + // -> this means we can use the freezer to make sure there aren't any lingering processes + // -> this means we ignore cgroups-based controls + // not doing because we don't set any in the config: running hooks + // not doing because we don't set it in the config: setting rootfs read-only + // not doing because we don't set it in the config: setting rootfs propagation + logrus.Debugf("setting apparmor profile") + if err = setApparmorProfile(options.Spec); err != nil { + fmt.Fprintf(os.Stderr, "error setting apparmor profile for process: %v\n", err) + os.Exit(1) + } + if err = setSelinuxLabel(options.Spec); err != nil { + fmt.Fprintf(os.Stderr, "error setting SELinux label for process: %v\n", err) + os.Exit(1) + } + + logrus.Debugf("setting resource limits") + if err = setRlimits(options.Spec, false, false); err != nil { + fmt.Fprintf(os.Stderr, "error setting process resource limits for process: %v\n", err) + os.Exit(1) + } + + // Try to change to the directory. + cwd := options.Spec.Process.Cwd + if !filepath.IsAbs(cwd) { + cwd = "/" + cwd + } + cwd = filepath.Clean(cwd) + if err := unix.Chdir("/"); err != nil { + fmt.Fprintf(os.Stderr, "error chdir()ing into new root directory %q: %v\n", options.Spec.Root.Path, err) + os.Exit(1) + } + if err := unix.Chdir(cwd); err != nil { + fmt.Fprintf(os.Stderr, "error chdir()ing into directory %q under root %q: %v\n", cwd, options.Spec.Root.Path, err) + os.Exit(1) + } + logrus.Debugf("changed working directory to %q", cwd) + + // Drop privileges. + user := options.Spec.Process.User + if len(user.AdditionalGids) > 0 { + gids := make([]int, len(user.AdditionalGids)) + for i := range user.AdditionalGids { + gids[i] = int(user.AdditionalGids[i]) + } + logrus.Debugf("setting supplemental groups") + if err = syscall.Setgroups(gids); err != nil { + fmt.Fprintf(os.Stderr, "error setting supplemental groups list: %v", err) + os.Exit(1) + } + } else { + setgroups, _ := ioutil.ReadFile("/proc/self/setgroups") + if strings.Trim(string(setgroups), "\n") != "deny" { + logrus.Debugf("clearing supplemental groups") + if err = syscall.Setgroups([]int{}); err != nil { + fmt.Fprintf(os.Stderr, "error clearing supplemental groups list: %v", err) + os.Exit(1) + } + } + } + + logrus.Debugf("setting gid") + if err = syscall.Setresgid(int(user.GID), int(user.GID), int(user.GID)); err != nil { + fmt.Fprintf(os.Stderr, "error setting GID: %v", err) + os.Exit(1) + } + + if err = setSeccomp(options.Spec); err != nil { + fmt.Fprintf(os.Stderr, "error setting seccomp filter for process: %v\n", err) + os.Exit(1) + } + + logrus.Debugf("setting capabilities") + var keepCaps []string + if user.UID != 0 { + keepCaps = []string{"CAP_SETUID"} + } + if err := setCapabilities(options.Spec, keepCaps...); err != nil { + fmt.Fprintf(os.Stderr, "error setting capabilities for process: %v\n", err) + os.Exit(1) + } + + logrus.Debugf("setting uid") + if err = syscall.Setresuid(int(user.UID), int(user.UID), int(user.UID)); err != nil { + fmt.Fprintf(os.Stderr, "error setting UID: %v", err) + os.Exit(1) + } + + // Actually run the specified command. + cmd := exec.Command(args[0], args[1:]...) + cmd.Env = options.Spec.Process.Env + cmd.Stdin, cmd.Stdout, cmd.Stderr = os.Stdin, os.Stdout, os.Stderr + cmd.Dir = cwd + logrus.Debugf("Running %#v (PATH = %q)", cmd, os.Getenv("PATH")) + if err = cmd.Run(); err != nil { + if exitError, ok := err.(*exec.ExitError); ok { + if waitStatus, ok := exitError.ProcessState.Sys().(syscall.WaitStatus); ok { + if waitStatus.Exited() { + if waitStatus.ExitStatus() != 0 { + fmt.Fprintf(os.Stderr, "subprocess exited with status %d\n", waitStatus.ExitStatus()) + } + os.Exit(waitStatus.ExitStatus()) + } else if waitStatus.Signaled() { + fmt.Fprintf(os.Stderr, "subprocess exited on %s\n", waitStatus.Signal()) + os.Exit(1) + } + } + } + fmt.Fprintf(os.Stderr, "process exited with error: %v", err) + os.Exit(1) + } +} + +// logNamespaceDiagnostics knows which namespaces we want to create. +// Output debug messages when that differs from what we're being asked to do. +func logNamespaceDiagnostics(spec *specs.Spec) { + sawMountNS := false + sawUTSNS := false + for _, ns := range spec.Linux.Namespaces { + switch ns.Type { + case specs.CgroupNamespace: + if ns.Path != "" { + logrus.Debugf("unable to join cgroup namespace, sorry about that") + } else { + logrus.Debugf("unable to create cgroup namespace, sorry about that") + } + case specs.IPCNamespace: + if ns.Path != "" { + logrus.Debugf("unable to join IPC namespace, sorry about that") + } else { + logrus.Debugf("unable to create IPC namespace, sorry about that") + } + case specs.MountNamespace: + if ns.Path != "" { + logrus.Debugf("unable to join mount namespace %q, creating a new one", ns.Path) + } + sawMountNS = true + case specs.NetworkNamespace: + if ns.Path != "" { + logrus.Debugf("unable to join network namespace, sorry about that") + } else { + logrus.Debugf("unable to create network namespace, sorry about that") + } + case specs.PIDNamespace: + if ns.Path != "" { + logrus.Debugf("unable to join PID namespace, sorry about that") + } else { + logrus.Debugf("unable to create PID namespace, sorry about that") + } + case specs.UserNamespace: + if ns.Path != "" { + logrus.Debugf("unable to join user namespace, sorry about that") + } + case specs.UTSNamespace: + if ns.Path != "" { + logrus.Debugf("unable to join UTS namespace %q, creating a new one", ns.Path) + } + sawUTSNS = true + } + } + if !sawMountNS { + logrus.Debugf("mount namespace not requested, but creating a new one anyway") + } + if !sawUTSNS { + logrus.Debugf("UTS namespace not requested, but creating a new one anyway") + } +} + +// setApparmorProfile sets the apparmor profile for ourselves, and hopefully any child processes that we'll start. +func setApparmorProfile(spec *specs.Spec) error { + if !apparmor.IsEnabled() || spec.Process.ApparmorProfile == "" { + return nil + } + if err := apparmor.ApplyProfile(spec.Process.ApparmorProfile); err != nil { + return errors.Wrapf(err, "error setting apparmor profile to %q", spec.Process.ApparmorProfile) + } + return nil +} + +// setCapabilities sets capabilities for ourselves, to be more or less inherited by any processes that we'll start. +func setCapabilities(spec *specs.Spec, keepCaps ...string) error { + currentCaps, err := capability.NewPid2(0) + if err != nil { + return errors.Wrapf(err, "error reading capabilities of current process") + } + if err := currentCaps.Load(); err != nil { + return errors.Wrapf(err, "error loading capabilities") + } + caps, err := capability.NewPid2(0) + if err != nil { + return errors.Wrapf(err, "error reading capabilities of current process") + } + capMap := map[capability.CapType][]string{ + capability.BOUNDING: spec.Process.Capabilities.Bounding, + capability.EFFECTIVE: spec.Process.Capabilities.Effective, + capability.INHERITABLE: []string{}, + capability.PERMITTED: spec.Process.Capabilities.Permitted, + capability.AMBIENT: spec.Process.Capabilities.Ambient, + } + knownCaps := capability.List() + noCap := capability.Cap(-1) + for capType, capList := range capMap { + for _, capToSet := range capList { + cap := noCap + for _, c := range knownCaps { + if strings.EqualFold("CAP_"+c.String(), capToSet) { + cap = c + break + } + } + if cap == noCap { + return errors.Errorf("error mapping capability %q to a number", capToSet) + } + caps.Set(capType, cap) + } + for _, capToSet := range keepCaps { + cap := noCap + for _, c := range knownCaps { + if strings.EqualFold("CAP_"+c.String(), capToSet) { + cap = c + break + } + } + if cap == noCap { + return errors.Errorf("error mapping capability %q to a number", capToSet) + } + if currentCaps.Get(capType, cap) { + caps.Set(capType, cap) + } + } + } + if err = caps.Apply(capability.CAPS | capability.BOUNDS | capability.AMBS); err != nil { + return errors.Wrapf(err, "error setting capabilities") + } + return nil +} + +// parses the resource limits for ourselves and any processes that +// we'll start into a format that's more in line with the kernel APIs +func parseRlimits(spec *specs.Spec) (map[int]unix.Rlimit, error) { + if spec.Process == nil { + return nil, nil + } + parsed := make(map[int]unix.Rlimit) + for _, limit := range spec.Process.Rlimits { + resource, recognized := rlimitsMap[strings.ToUpper(limit.Type)] + if !recognized { + return nil, errors.Errorf("error parsing limit type %q", limit.Type) + } + parsed[resource] = unix.Rlimit{Cur: limit.Soft, Max: limit.Hard} + } + return parsed, nil +} + +// setRlimits sets any resource limits that we want to apply to processes that +// we'll start. +func setRlimits(spec *specs.Spec, onlyLower, onlyRaise bool) error { + limits, err := parseRlimits(spec) + if err != nil { + return err + } + for resource, desired := range limits { + var current unix.Rlimit + if err := unix.Getrlimit(resource, ¤t); err != nil { + return errors.Wrapf(err, "error reading %q limit", rlimitsReverseMap[resource]) + } + if desired.Max > current.Max && onlyLower { + // this would raise a hard limit, and we're only here to lower them + continue + } + if desired.Max < current.Max && onlyRaise { + // this would lower a hard limit, and we're only here to raise them + continue + } + if err := unix.Setrlimit(resource, &desired); err != nil { + return errors.Wrapf(err, "error setting %q limit to soft=%d,hard=%d (was soft=%d,hard=%d)", rlimitsReverseMap[resource], desired.Cur, desired.Max, current.Cur, current.Max) + } + } + return nil +} + +func makeReadOnly(mntpoint string, flags uintptr) error { + var fs unix.Statfs_t + // Make sure it's read-only. + if err := unix.Statfs(mntpoint, &fs); err != nil { + return errors.Wrapf(err, "error checking if directory %q was bound read-only", mntpoint) + } + if fs.Flags&unix.ST_RDONLY == 0 { + if err := unix.Mount(mntpoint, mntpoint, "bind", flags|unix.MS_REMOUNT, ""); err != nil { + return errors.Wrapf(err, "error remounting %s in mount namespace read-only", mntpoint) + } + } + return nil +} + +func isDevNull(dev os.FileInfo) bool { + if dev.Mode()&os.ModeCharDevice != 0 { + stat, _ := dev.Sys().(*syscall.Stat_t) + nullStat := syscall.Stat_t{} + if err := syscall.Stat(os.DevNull, &nullStat); err != nil { + logrus.Warnf("unable to stat /dev/null: %v", err) + return false + } + if stat.Rdev == nullStat.Rdev { + return true + } + } + return false +} + +// setupChrootBindMounts actually bind mounts things under the rootfs, and returns a +// callback that will clean up its work. +func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func() error, err error) { + var fs unix.Statfs_t + undoBinds = func() error { + if err2 := unix.Unmount(spec.Root.Path, unix.MNT_DETACH); err2 != nil { + retries := 0 + for (err2 == unix.EBUSY || err2 == unix.EAGAIN) && retries < 50 { + time.Sleep(50 * time.Millisecond) + err2 = unix.Unmount(spec.Root.Path, unix.MNT_DETACH) + retries++ + } + if err2 != nil { + logrus.Warnf("pkg/chroot: error unmounting %q (retried %d times): %v", spec.Root.Path, retries, err2) + if err == nil { + err = err2 + } + } + } + return err + } + + // Now bind mount all of those things to be under the rootfs's location in this + // mount namespace. + commonFlags := uintptr(unix.MS_BIND | unix.MS_REC | unix.MS_PRIVATE) + bindFlags := commonFlags | unix.MS_NODEV + devFlags := commonFlags | unix.MS_NOEXEC | unix.MS_NOSUID | unix.MS_RDONLY + procFlags := devFlags | unix.MS_NODEV + sysFlags := devFlags | unix.MS_NODEV + + // Bind /dev read-only. + subDev := filepath.Join(spec.Root.Path, "/dev") + if err := unix.Mount("/dev", subDev, "bind", devFlags, ""); err != nil { + if os.IsNotExist(err) { + err = os.Mkdir(subDev, 0755) + if err == nil { + err = unix.Mount("/dev", subDev, "bind", devFlags, "") + } + } + if err != nil { + return undoBinds, errors.Wrapf(err, "error bind mounting /dev from host into mount namespace") + } + } + // Make sure it's read-only. + if err = unix.Statfs(subDev, &fs); err != nil { + return undoBinds, errors.Wrapf(err, "error checking if directory %q was bound read-only", subDev) + } + if fs.Flags&unix.ST_RDONLY == 0 { + if err := unix.Mount(subDev, subDev, "bind", devFlags|unix.MS_REMOUNT, ""); err != nil { + return undoBinds, errors.Wrapf(err, "error remounting /dev in mount namespace read-only") + } + } + logrus.Debugf("bind mounted %q to %q", "/dev", filepath.Join(spec.Root.Path, "/dev")) + + // Bind /proc read-only. + subProc := filepath.Join(spec.Root.Path, "/proc") + if err := unix.Mount("/proc", subProc, "bind", procFlags, ""); err != nil { + if os.IsNotExist(err) { + err = os.Mkdir(subProc, 0755) + if err == nil { + err = unix.Mount("/proc", subProc, "bind", procFlags, "") + } + } + if err != nil { + return undoBinds, errors.Wrapf(err, "error bind mounting /proc from host into mount namespace") + } + } + logrus.Debugf("bind mounted %q to %q", "/proc", filepath.Join(spec.Root.Path, "/proc")) + + // Bind /sys read-only. + subSys := filepath.Join(spec.Root.Path, "/sys") + if err := unix.Mount("/sys", subSys, "bind", sysFlags, ""); err != nil { + if os.IsNotExist(err) { + err = os.Mkdir(subSys, 0755) + if err == nil { + err = unix.Mount("/sys", subSys, "bind", sysFlags, "") + } + } + if err != nil { + return undoBinds, errors.Wrapf(err, "error bind mounting /sys from host into mount namespace") + } + } + if err := makeReadOnly(subSys, sysFlags); err != nil { + return undoBinds, err + } + + mnts, _ := mount.GetMounts() + for _, m := range mnts { + if !strings.HasPrefix(m.Mountpoint, "/sys/") && + m.Mountpoint != "/sys" { + continue + } + subSys := filepath.Join(spec.Root.Path, m.Mountpoint) + if err := unix.Mount(m.Mountpoint, subSys, "bind", sysFlags, ""); err != nil { + msg := fmt.Sprintf("could not bind mount %q, skipping: %v", m.Mountpoint, err) + if strings.HasPrefix(m.Mountpoint, "/sys") { + logrus.Infof(msg) + } else { + logrus.Warningf(msg) + } + continue + } + if err := makeReadOnly(subSys, sysFlags); err != nil { + return undoBinds, err + } + } + logrus.Debugf("bind mounted %q to %q", "/sys", filepath.Join(spec.Root.Path, "/sys")) + + // Bind mount in everything we've been asked to mount. + for _, m := range spec.Mounts { + // Skip anything that we just mounted. + switch m.Destination { + case "/dev", "/proc", "/sys": + logrus.Debugf("already bind mounted %q on %q", m.Destination, filepath.Join(spec.Root.Path, m.Destination)) + continue + default: + if strings.HasPrefix(m.Destination, "/dev/") { + continue + } + if strings.HasPrefix(m.Destination, "/proc/") { + continue + } + if strings.HasPrefix(m.Destination, "/sys/") { + continue + } + } + // Skip anything that isn't a bind or tmpfs mount. + if m.Type != "bind" && m.Type != "tmpfs" && m.Type != "overlay" { + logrus.Debugf("skipping mount of type %q on %q", m.Type, m.Destination) + continue + } + // If the target is there, we can just mount it. + var srcinfo os.FileInfo + switch m.Type { + case "bind": + srcinfo, err = os.Stat(m.Source) + if err != nil { + return undoBinds, errors.Wrapf(err, "error examining %q for mounting in mount namespace", m.Source) + } + case "overlay": + fallthrough + case "tmpfs": + srcinfo, err = os.Stat("/") + if err != nil { + return undoBinds, errors.Wrapf(err, "error examining / to use as a template for a %s", m.Type) + } + } + target := filepath.Join(spec.Root.Path, m.Destination) + // Check if target is a symlink + stat, err := os.Lstat(target) + // If target is a symlink, follow the link and ensure the destination exists + if err == nil && stat != nil && (stat.Mode()&os.ModeSymlink != 0) { + target, err = copier.Eval(spec.Root.Path, m.Destination, copier.EvalOptions{}) + if err != nil { + return nil, errors.Wrapf(err, "evaluating symlink %q", target) + } + // Stat the destination of the evaluated symlink + _, err = os.Stat(target) + } + if err != nil { + // If the target can't be stat()ted, check the error. + if !os.IsNotExist(err) { + return undoBinds, errors.Wrapf(err, "error examining %q for mounting in mount namespace", target) + } + // The target isn't there yet, so create it. + if srcinfo.IsDir() { + if err = os.MkdirAll(target, 0755); err != nil { + return undoBinds, errors.Wrapf(err, "error creating mountpoint %q in mount namespace", target) + } + } else { + if err = os.MkdirAll(filepath.Dir(target), 0755); err != nil { + return undoBinds, errors.Wrapf(err, "error ensuring parent of mountpoint %q (%q) is present in mount namespace", target, filepath.Dir(target)) + } + var file *os.File + if file, err = os.OpenFile(target, os.O_WRONLY|os.O_CREATE, 0755); err != nil { + return undoBinds, errors.Wrapf(err, "error creating mountpoint %q in mount namespace", target) + } + file.Close() + } + } + requestFlags := bindFlags + expectedFlags := uintptr(0) + for _, option := range m.Options { + switch option { + case "nodev": + requestFlags |= unix.MS_NODEV + expectedFlags |= unix.ST_NODEV + case "dev": + requestFlags &= ^uintptr(unix.MS_NODEV) + expectedFlags &= ^uintptr(unix.ST_NODEV) + case "noexec": + requestFlags |= unix.MS_NOEXEC + expectedFlags |= unix.ST_NOEXEC + case "exec": + requestFlags &= ^uintptr(unix.MS_NOEXEC) + expectedFlags &= ^uintptr(unix.ST_NOEXEC) + case "nosuid": + requestFlags |= unix.MS_NOSUID + expectedFlags |= unix.ST_NOSUID + case "suid": + requestFlags &= ^uintptr(unix.MS_NOSUID) + expectedFlags &= ^uintptr(unix.ST_NOSUID) + case "ro": + requestFlags |= unix.MS_RDONLY + expectedFlags |= unix.ST_RDONLY + case "rw": + requestFlags &= ^uintptr(unix.MS_RDONLY) + expectedFlags &= ^uintptr(unix.ST_RDONLY) + } + } + switch m.Type { + case "bind": + // Do the bind mount. + logrus.Debugf("bind mounting %q on %q", m.Destination, filepath.Join(spec.Root.Path, m.Destination)) + if err := unix.Mount(m.Source, target, "", requestFlags, ""); err != nil { + return undoBinds, errors.Wrapf(err, "error bind mounting %q from host to %q in mount namespace (%q)", m.Source, m.Destination, target) + } + logrus.Debugf("bind mounted %q to %q", m.Source, target) + case "tmpfs": + // Mount a tmpfs. + if err := mount.Mount(m.Source, target, m.Type, strings.Join(append(m.Options, "private"), ",")); err != nil { + return undoBinds, errors.Wrapf(err, "error mounting tmpfs to %q in mount namespace (%q, %q)", m.Destination, target, strings.Join(m.Options, ",")) + } + logrus.Debugf("mounted a tmpfs to %q", target) + case "overlay": + // Mount a overlay. + if err := mount.Mount(m.Source, target, m.Type, strings.Join(append(m.Options, "private"), ",")); err != nil { + return undoBinds, errors.Wrapf(err, "error mounting overlay to %q in mount namespace (%q, %q)", m.Destination, target, strings.Join(m.Options, ",")) + } + logrus.Debugf("mounted a overlay to %q", target) + } + if err = unix.Statfs(target, &fs); err != nil { + return undoBinds, errors.Wrapf(err, "error checking if directory %q was bound read-only", target) + } + if uintptr(fs.Flags)&expectedFlags != expectedFlags { + if err := unix.Mount(target, target, "bind", requestFlags|unix.MS_REMOUNT, ""); err != nil { + return undoBinds, errors.Wrapf(err, "error remounting %q in mount namespace with expected flags", target) + } + } + } + + // Set up any read-only paths that we need to. If we're running inside + // of a container, some of these locations will already be read-only. + for _, roPath := range spec.Linux.ReadonlyPaths { + r := filepath.Join(spec.Root.Path, roPath) + target, err := filepath.EvalSymlinks(r) + if err != nil { + if os.IsNotExist(err) { + // No target, no problem. + continue + } + return undoBinds, errors.Wrapf(err, "error checking %q for symlinks before marking it read-only", r) + } + // Check if the location is already read-only. + var fs unix.Statfs_t + if err = unix.Statfs(target, &fs); err != nil { + if os.IsNotExist(err) { + // No target, no problem. + continue + } + return undoBinds, errors.Wrapf(err, "error checking if directory %q is already read-only", target) + } + if fs.Flags&unix.ST_RDONLY != 0 { + continue + } + // Mount the location over itself, so that we can remount it as read-only. + roFlags := uintptr(unix.MS_NODEV | unix.MS_NOEXEC | unix.MS_NOSUID | unix.MS_RDONLY) + if err := unix.Mount(target, target, "", roFlags|unix.MS_BIND|unix.MS_REC, ""); err != nil { + if os.IsNotExist(err) { + // No target, no problem. + continue + } + return undoBinds, errors.Wrapf(err, "error bind mounting %q onto itself in preparation for making it read-only", target) + } + // Remount the location read-only. + if err = unix.Statfs(target, &fs); err != nil { + return undoBinds, errors.Wrapf(err, "error checking if directory %q was bound read-only", target) + } + if fs.Flags&unix.ST_RDONLY == 0 { + if err := unix.Mount(target, target, "", roFlags|unix.MS_BIND|unix.MS_REMOUNT, ""); err != nil { + return undoBinds, errors.Wrapf(err, "error remounting %q in mount namespace read-only", target) + } + } + // Check again. + if err = unix.Statfs(target, &fs); err != nil { + return undoBinds, errors.Wrapf(err, "error checking if directory %q was remounted read-only", target) + } + if fs.Flags&unix.ST_RDONLY == 0 { + return undoBinds, errors.Wrapf(err, "error verifying that %q in mount namespace was remounted read-only", target) + } + } + + // Create an empty directory for to use for masking directories. + roEmptyDir := filepath.Join(bundlePath, "empty") + if len(spec.Linux.MaskedPaths) > 0 { + if err := os.Mkdir(roEmptyDir, 0700); err != nil { + return undoBinds, errors.Wrapf(err, "error creating empty directory %q", roEmptyDir) + } + } + + // Set up any masked paths that we need to. If we're running inside of + // a container, some of these locations will already be read-only tmpfs + // filesystems or bind mounted to os.DevNull. If we're not running + // inside of a container, and nobody else has done that, we'll do it. + for _, masked := range spec.Linux.MaskedPaths { + t := filepath.Join(spec.Root.Path, masked) + target, err := filepath.EvalSymlinks(t) + if err != nil { + target = t + } + // Get some info about the target. + targetinfo, err := os.Stat(target) + if err != nil { + if os.IsNotExist(err) { + // No target, no problem. + continue + } + return undoBinds, errors.Wrapf(err, "error examining %q for masking in mount namespace", target) + } + if targetinfo.IsDir() { + // The target's a directory. Check if it's a read-only filesystem. + var statfs unix.Statfs_t + if err = unix.Statfs(target, &statfs); err != nil { + return undoBinds, errors.Wrapf(err, "error checking if directory %q is a mountpoint", target) + } + isReadOnly := statfs.Flags&unix.MS_RDONLY != 0 + // Check if any of the IDs we're mapping could read it. + var stat unix.Stat_t + if err = unix.Stat(target, &stat); err != nil { + return undoBinds, errors.Wrapf(err, "error checking permissions on directory %q", target) + } + isAccessible := false + if stat.Mode&unix.S_IROTH|unix.S_IXOTH != 0 { + isAccessible = true + } + if !isAccessible && stat.Mode&unix.S_IROTH|unix.S_IXOTH != 0 { + if len(spec.Linux.GIDMappings) > 0 { + for _, mapping := range spec.Linux.GIDMappings { + if stat.Gid >= mapping.ContainerID && stat.Gid < mapping.ContainerID+mapping.Size { + isAccessible = true + break + } + } + } + } + if !isAccessible && stat.Mode&unix.S_IRUSR|unix.S_IXUSR != 0 { + if len(spec.Linux.UIDMappings) > 0 { + for _, mapping := range spec.Linux.UIDMappings { + if stat.Uid >= mapping.ContainerID && stat.Uid < mapping.ContainerID+mapping.Size { + isAccessible = true + break + } + } + } + } + // Check if it's empty. + hasContent := false + directory, err := os.Open(target) + if err != nil { + if !os.IsPermission(err) { + return undoBinds, errors.Wrapf(err, "error opening directory %q", target) + } + } else { + names, err := directory.Readdirnames(0) + directory.Close() + if err != nil { + return undoBinds, errors.Wrapf(err, "error reading contents of directory %q", target) + } + hasContent = false + for _, name := range names { + switch name { + case ".", "..": + continue + default: + hasContent = true + } + if hasContent { + break + } + } + } + // The target's a directory, so read-only bind mount an empty directory on it. + roFlags := uintptr(syscall.MS_BIND | syscall.MS_NOSUID | syscall.MS_NODEV | syscall.MS_NOEXEC | syscall.MS_RDONLY) + if !isReadOnly || (hasContent && isAccessible) { + if err = unix.Mount(roEmptyDir, target, "bind", roFlags, ""); err != nil { + return undoBinds, errors.Wrapf(err, "error masking directory %q in mount namespace", target) + } + if err = unix.Statfs(target, &fs); err != nil { + return undoBinds, errors.Wrapf(err, "error checking if directory %q was mounted read-only in mount namespace", target) + } + if fs.Flags&unix.ST_RDONLY == 0 { + if err = unix.Mount(target, target, "", roFlags|syscall.MS_REMOUNT, ""); err != nil { + return undoBinds, errors.Wrapf(err, "error making sure directory %q in mount namespace is read only", target) + } + } + } + } else { + // If the target's is not a directory or os.DevNull, bind mount os.DevNull over it. + if !isDevNull(targetinfo) { + if err = unix.Mount(os.DevNull, target, "", uintptr(syscall.MS_BIND|syscall.MS_RDONLY|syscall.MS_PRIVATE), ""); err != nil { + return undoBinds, errors.Wrapf(err, "error masking non-directory %q in mount namespace", target) + } + } + } + } + return undoBinds, nil +} diff --git a/vendor/github.com/containers/buildah/chroot/seccomp.go b/vendor/github.com/containers/buildah/chroot/seccomp.go new file mode 100644 index 00000000000..f130f7a22fd --- /dev/null +++ b/vendor/github.com/containers/buildah/chroot/seccomp.go @@ -0,0 +1,200 @@ +// +build linux,seccomp + +package chroot + +import ( + "io/ioutil" + + "github.com/containers/common/pkg/seccomp" + specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" + libseccomp "github.com/seccomp/libseccomp-golang" + "github.com/sirupsen/logrus" +) + +// setSeccomp sets the seccomp filter for ourselves and any processes that we'll start. +func setSeccomp(spec *specs.Spec) error { + logrus.Debugf("setting seccomp configuration") + if spec.Linux.Seccomp == nil { + return nil + } + mapAction := func(specAction specs.LinuxSeccompAction, errnoRet *uint) libseccomp.ScmpAction { + switch specAction { + case specs.ActKill: + return libseccomp.ActKill + case specs.ActTrap: + return libseccomp.ActTrap + case specs.ActErrno: + action := libseccomp.ActErrno + if errnoRet != nil { + action = action.SetReturnCode(int16(*errnoRet)) + } + return action + case specs.ActTrace: + return libseccomp.ActTrace + case specs.ActAllow: + return libseccomp.ActAllow + case specs.ActLog: + return libseccomp.ActLog + case specs.ActKillProcess: + return libseccomp.ActKillProcess + default: + logrus.Errorf("unmappable action %v", specAction) + } + return libseccomp.ActInvalid + } + mapArch := func(specArch specs.Arch) libseccomp.ScmpArch { + switch specArch { + case specs.ArchX86: + return libseccomp.ArchX86 + case specs.ArchX86_64: + return libseccomp.ArchAMD64 + case specs.ArchX32: + return libseccomp.ArchX32 + case specs.ArchARM: + return libseccomp.ArchARM + case specs.ArchAARCH64: + return libseccomp.ArchARM64 + case specs.ArchMIPS: + return libseccomp.ArchMIPS + case specs.ArchMIPS64: + return libseccomp.ArchMIPS64 + case specs.ArchMIPS64N32: + return libseccomp.ArchMIPS64N32 + case specs.ArchMIPSEL: + return libseccomp.ArchMIPSEL + case specs.ArchMIPSEL64: + return libseccomp.ArchMIPSEL64 + case specs.ArchMIPSEL64N32: + return libseccomp.ArchMIPSEL64N32 + case specs.ArchPPC: + return libseccomp.ArchPPC + case specs.ArchPPC64: + return libseccomp.ArchPPC64 + case specs.ArchPPC64LE: + return libseccomp.ArchPPC64LE + case specs.ArchS390: + return libseccomp.ArchS390 + case specs.ArchS390X: + return libseccomp.ArchS390X + case specs.ArchPARISC: + /* fallthrough */ /* for now */ + case specs.ArchPARISC64: + /* fallthrough */ /* for now */ + default: + logrus.Errorf("unmappable arch %v", specArch) + } + return libseccomp.ArchInvalid + } + mapOp := func(op specs.LinuxSeccompOperator) libseccomp.ScmpCompareOp { + switch op { + case specs.OpNotEqual: + return libseccomp.CompareNotEqual + case specs.OpLessThan: + return libseccomp.CompareLess + case specs.OpLessEqual: + return libseccomp.CompareLessOrEqual + case specs.OpEqualTo: + return libseccomp.CompareEqual + case specs.OpGreaterEqual: + return libseccomp.CompareGreaterEqual + case specs.OpGreaterThan: + return libseccomp.CompareGreater + case specs.OpMaskedEqual: + return libseccomp.CompareMaskedEqual + default: + logrus.Errorf("unmappable op %v", op) + } + return libseccomp.CompareInvalid + } + + filter, err := libseccomp.NewFilter(mapAction(spec.Linux.Seccomp.DefaultAction, nil)) + if err != nil { + return errors.Wrapf(err, "error creating seccomp filter with default action %q", spec.Linux.Seccomp.DefaultAction) + } + for _, arch := range spec.Linux.Seccomp.Architectures { + if err = filter.AddArch(mapArch(arch)); err != nil { + return errors.Wrapf(err, "error adding architecture %q(%q) to seccomp filter", arch, mapArch(arch)) + } + } + for _, rule := range spec.Linux.Seccomp.Syscalls { + scnames := make(map[libseccomp.ScmpSyscall]string) + for _, name := range rule.Names { + scnum, err := libseccomp.GetSyscallFromName(name) + if err != nil { + logrus.Debugf("error mapping syscall %q to a syscall, ignoring %q rule for %q", name, rule.Action, name) + continue + } + scnames[scnum] = name + } + for scnum := range scnames { + if len(rule.Args) == 0 { + if err = filter.AddRule(scnum, mapAction(rule.Action, rule.ErrnoRet)); err != nil { + return errors.Wrapf(err, "error adding a rule (%q:%q) to seccomp filter", scnames[scnum], rule.Action) + } + continue + } + var conditions []libseccomp.ScmpCondition + opsAreAllEquality := true + for _, arg := range rule.Args { + condition, err := libseccomp.MakeCondition(arg.Index, mapOp(arg.Op), arg.Value, arg.ValueTwo) + if err != nil { + return errors.Wrapf(err, "error building a seccomp condition %d:%v:%d:%d", arg.Index, arg.Op, arg.Value, arg.ValueTwo) + } + if arg.Op != specs.OpEqualTo { + opsAreAllEquality = false + } + conditions = append(conditions, condition) + } + if err = filter.AddRuleConditional(scnum, mapAction(rule.Action, rule.ErrnoRet), conditions); err != nil { + // Okay, if the rules specify multiple equality + // checks, assume someone thought that they + // were OR'd, when in fact they're ordinarily + // supposed to be AND'd. Break them up into + // different rules to get that OR effect. + if len(rule.Args) > 1 && opsAreAllEquality && err.Error() == "two checks on same syscall argument" { + for i := range conditions { + if err = filter.AddRuleConditional(scnum, mapAction(rule.Action, rule.ErrnoRet), conditions[i:i+1]); err != nil { + return errors.Wrapf(err, "error adding a conditional rule (%q:%q[%d]) to seccomp filter", scnames[scnum], rule.Action, i) + } + } + } else { + return errors.Wrapf(err, "error adding a conditional rule (%q:%q) to seccomp filter", scnames[scnum], rule.Action) + } + } + } + } + if err = filter.SetNoNewPrivsBit(spec.Process.NoNewPrivileges); err != nil { + return errors.Wrapf(err, "error setting no-new-privileges bit to %v", spec.Process.NoNewPrivileges) + } + err = filter.Load() + filter.Release() + if err != nil { + return errors.Wrapf(err, "error activating seccomp filter") + } + return nil +} + +func setupSeccomp(spec *specs.Spec, seccompProfilePath string) error { + switch seccompProfilePath { + case "unconfined": + spec.Linux.Seccomp = nil + case "": + seccompConfig, err := seccomp.GetDefaultProfile(spec) + if err != nil { + return errors.Wrapf(err, "loading default seccomp profile failed") + } + spec.Linux.Seccomp = seccompConfig + default: + seccompProfile, err := ioutil.ReadFile(seccompProfilePath) + if err != nil { + return errors.Wrapf(err, "opening seccomp profile (%s) failed", seccompProfilePath) + } + seccompConfig, err := seccomp.LoadProfile(string(seccompProfile), spec) + if err != nil { + return errors.Wrapf(err, "loading seccomp profile (%s) failed", seccompProfilePath) + } + spec.Linux.Seccomp = seccompConfig + } + return nil +} diff --git a/vendor/github.com/containers/buildah/chroot/seccomp_unsupported.go b/vendor/github.com/containers/buildah/chroot/seccomp_unsupported.go new file mode 100644 index 00000000000..f33dd254a35 --- /dev/null +++ b/vendor/github.com/containers/buildah/chroot/seccomp_unsupported.go @@ -0,0 +1,23 @@ +// +build !linux !seccomp + +package chroot + +import ( + "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" +) + +func setSeccomp(spec *specs.Spec) error { + if spec.Linux.Seccomp != nil { + return errors.New("configured a seccomp filter without seccomp support?") + } + return nil +} + +func setupSeccomp(spec *specs.Spec, seccompProfilePath string) error { + if spec.Linux != nil { + // runtime-tools may have supplied us with a default filter + spec.Linux.Seccomp = nil + } + return nil +} diff --git a/vendor/github.com/containers/buildah/chroot/selinux.go b/vendor/github.com/containers/buildah/chroot/selinux.go new file mode 100644 index 00000000000..ef96a0e7a26 --- /dev/null +++ b/vendor/github.com/containers/buildah/chroot/selinux.go @@ -0,0 +1,22 @@ +// +build linux + +package chroot + +import ( + "github.com/opencontainers/runtime-spec/specs-go" + selinux "github.com/opencontainers/selinux/go-selinux" + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// setSelinuxLabel sets the process label for child processes that we'll start. +func setSelinuxLabel(spec *specs.Spec) error { + logrus.Debugf("setting selinux label") + if spec.Process.SelinuxLabel != "" && selinux.GetEnabled() { + if err := label.SetProcessLabel(spec.Process.SelinuxLabel); err != nil { + return errors.Wrapf(err, "error setting process label to %q", spec.Process.SelinuxLabel) + } + } + return nil +} diff --git a/vendor/github.com/containers/buildah/chroot/selinux_unsupported.go b/vendor/github.com/containers/buildah/chroot/selinux_unsupported.go new file mode 100644 index 00000000000..41d2b86be79 --- /dev/null +++ b/vendor/github.com/containers/buildah/chroot/selinux_unsupported.go @@ -0,0 +1,18 @@ +// +build !linux + +package chroot + +import ( + "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" +) + +func setSelinuxLabel(spec *specs.Spec) error { + if spec.Linux.MountLabel != "" { + return errors.New("configured an SELinux mount label without SELinux support?") + } + if spec.Process.SelinuxLabel != "" { + return errors.New("configured an SELinux process label without SELinux support?") + } + return nil +} diff --git a/vendor/github.com/containers/buildah/chroot/unsupported.go b/vendor/github.com/containers/buildah/chroot/unsupported.go new file mode 100644 index 00000000000..5312c00241c --- /dev/null +++ b/vendor/github.com/containers/buildah/chroot/unsupported.go @@ -0,0 +1,15 @@ +// +build !linux + +package chroot + +import ( + "io" + + "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" +) + +// RunUsingChroot is not supported. +func RunUsingChroot(spec *specs.Spec, bundlePath string, stdin io.Reader, stdout, stderr io.Writer) (err error) { + return errors.Errorf("--isolation chroot is not supported on this platform") +} diff --git a/vendor/github.com/containers/buildah/commit.go b/vendor/github.com/containers/buildah/commit.go new file mode 100644 index 00000000000..25c30071611 --- /dev/null +++ b/vendor/github.com/containers/buildah/commit.go @@ -0,0 +1,416 @@ +package buildah + +import ( + "context" + "encoding/json" + "io" + "io/ioutil" + "os" + "strings" + "time" + + "github.com/containers/buildah/pkg/blobcache" + "github.com/containers/buildah/util" + "github.com/containers/common/libimage" + "github.com/containers/common/libimage/manifests" + "github.com/containers/image/v5/docker" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/signature" + is "github.com/containers/image/v5/storage" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" + encconfig "github.com/containers/ocicrypt/config" + "github.com/containers/storage" + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/stringid" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const ( + // BuilderIdentityAnnotation is the name of the annotation key containing + // the name and version of the producer of the image stored as an + // annotation on commit. + BuilderIdentityAnnotation = "io.buildah.version" +) + +// CommitOptions can be used to alter how an image is committed. +type CommitOptions struct { + // PreferredManifestType is the preferred type of image manifest. The + // image configuration format will be of a compatible type. + PreferredManifestType string + // Compression specifies the type of compression which is applied to + // layer blobs. The default is to not use compression, but + // archive.Gzip is recommended. + Compression archive.Compression + // SignaturePolicyPath specifies an override location for the signature + // policy which should be used for verifying the new image as it is + // being written. Except in specific circumstances, no value should be + // specified, indicating that the shared, system-wide default policy + // should be used. + SignaturePolicyPath string + // AdditionalTags is a list of additional names to add to the image, if + // the transport to which we're writing the image gives us a way to add + // them. + AdditionalTags []string + // ReportWriter is an io.Writer which will be used to log the writing + // of the new image. + ReportWriter io.Writer + // HistoryTimestamp is the timestamp used when creating new items in the + // image's history. If unset, the current time will be used. + HistoryTimestamp *time.Time + // github.com/containers/image/types SystemContext to hold credentials + // and other authentication/authorization information. + SystemContext *types.SystemContext + // IIDFile tells the builder to write the image ID to the specified file + IIDFile string + // Squash tells the builder to produce an image with a single layer + // instead of with possibly more than one layer. + Squash bool + // BlobDirectory is the name of a directory in which we'll look for + // prebuilt copies of layer blobs that we might otherwise need to + // regenerate from on-disk layers. If blobs are available, the + // manifest of the new image will reference the blobs rather than + // on-disk layers. + BlobDirectory string + // EmptyLayer tells the builder to omit the diff for the working + // container. + EmptyLayer bool + // OmitTimestamp forces epoch 0 as created timestamp to allow for + // deterministic, content-addressable builds. + // Deprecated use HistoryTimestamp instead. + OmitTimestamp bool + // SignBy is the fingerprint of a GPG key to use for signing the image. + SignBy string + // Manifest list to add the image to. + Manifest string + // MaxRetries is the maximum number of attempts we'll make to commit + // the image to an external registry if the first attempt fails. + MaxRetries int + // RetryDelay is how long to wait before retrying a commit attempt to a + // registry. + RetryDelay time.Duration + // OciEncryptConfig when non-nil indicates that an image should be encrypted. + // The encryption options is derived from the construction of EncryptConfig object. + OciEncryptConfig *encconfig.EncryptConfig + // OciEncryptLayers represents the list of layers to encrypt. + // If nil, don't encrypt any layers. + // If non-nil and len==0, denotes encrypt all layers. + // integers in the slice represent 0-indexed layer indices, with support for negative + // indexing. i.e. 0 is the first layer, -1 is the last (top-most) layer. + OciEncryptLayers *[]int + // UnsetEnvs is a list of environments to not add to final image. + UnsetEnvs []string +} + +var ( + // storageAllowedPolicyScopes overrides the policy for local storage + // to ensure that we can read images from it. + storageAllowedPolicyScopes = signature.PolicyTransportScopes{ + "": []signature.PolicyRequirement{ + signature.NewPRInsecureAcceptAnything(), + }, + } +) + +// checkRegistrySourcesAllows checks the $BUILD_REGISTRY_SOURCES environment +// variable, if it's set. The contents are expected to be a JSON-encoded +// github.com/openshift/api/config/v1.Image, set by an OpenShift build +// controller that arranged for us to be run in a container. +func checkRegistrySourcesAllows(forWhat string, dest types.ImageReference) (insecure bool, err error) { + transport := dest.Transport() + if transport == nil { + return false, nil + } + if transport.Name() != docker.Transport.Name() { + return false, nil + } + dref := dest.DockerReference() + if dref == nil || reference.Domain(dref) == "" { + return false, nil + } + + if registrySources, ok := os.LookupEnv("BUILD_REGISTRY_SOURCES"); ok && len(registrySources) > 0 { + // Use local struct instead of github.com/openshift/api/config/v1 RegistrySources + var sources struct { + InsecureRegistries []string `json:"insecureRegistries,omitempty"` + BlockedRegistries []string `json:"blockedRegistries,omitempty"` + AllowedRegistries []string `json:"allowedRegistries,omitempty"` + } + if err := json.Unmarshal([]byte(registrySources), &sources); err != nil { + return false, errors.Wrapf(err, "error parsing $BUILD_REGISTRY_SOURCES (%q) as JSON", registrySources) + } + blocked := false + if len(sources.BlockedRegistries) > 0 { + for _, blockedDomain := range sources.BlockedRegistries { + if blockedDomain == reference.Domain(dref) { + blocked = true + } + } + } + if blocked { + return false, errors.Errorf("%s registry at %q denied by policy: it is in the blocked registries list", forWhat, reference.Domain(dref)) + } + allowed := true + if len(sources.AllowedRegistries) > 0 { + allowed = false + for _, allowedDomain := range sources.AllowedRegistries { + if allowedDomain == reference.Domain(dref) { + allowed = true + } + } + } + if !allowed { + return false, errors.Errorf("%s registry at %q denied by policy: not in allowed registries list", forWhat, reference.Domain(dref)) + } + if len(sources.InsecureRegistries) > 0 { + return true, nil + } + } + return false, nil +} + +func (b *Builder) addManifest(ctx context.Context, manifestName string, imageSpec string) (string, error) { + var create bool + systemContext := &types.SystemContext{} + var list manifests.List + runtime, err := libimage.RuntimeFromStore(b.store, &libimage.RuntimeOptions{SystemContext: systemContext}) + if err != nil { + return "", err + } + manifestList, err := runtime.LookupManifestList(manifestName) + if err != nil { + create = true + list = manifests.Create() + } else { + locker, err := manifests.LockerForImage(b.store, manifestList.ID()) + if err != nil { + return "", err + } + locker.Lock() + defer locker.Unlock() + _, list, err = manifests.LoadFromImage(b.store, manifestList.ID()) + if err != nil { + return "", err + } + } + + names, err := util.ExpandNames([]string{manifestName}, systemContext, b.store) + if err != nil { + return "", errors.Wrapf(err, "error encountered while expanding manifest list name %q", manifestName) + } + + ref, err := util.VerifyTagName(imageSpec) + if err != nil { + // check if the local image exists + if ref, _, err = util.FindImage(b.store, "", systemContext, imageSpec); err != nil { + return "", err + } + } + + if _, err = list.Add(ctx, systemContext, ref, true); err != nil { + return "", err + } + var imageID string + if create { + imageID, err = list.SaveToImage(b.store, "", names, manifest.DockerV2ListMediaType) + } else { + imageID, err = list.SaveToImage(b.store, manifestList.ID(), nil, "") + } + return imageID, err +} + +// Commit writes the contents of the container, along with its updated +// configuration, to a new image in the specified location, and if we know how, +// add any additional tags that were specified. Returns the ID of the new image +// if commit was successful and the image destination was local. +func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options CommitOptions) (string, reference.Canonical, digest.Digest, error) { + var ( + imgID string + ) + + // If we weren't given a name, build a destination reference using a + // temporary name that we'll remove later. The correct thing to do + // would be to read the manifest and configuration blob, and ask the + // manifest for the ID that we'd give the image, but that computation + // requires that we know the digests of the layer blobs, which we don't + // want to compute here because we'll have to do it again when + // cp.Image() instantiates a source image, and we don't want to do the + // work twice. + if options.OmitTimestamp { + if options.HistoryTimestamp != nil { + return imgID, nil, "", errors.Errorf("OmitTimestamp ahd HistoryTimestamp can not be used together") + } + timestamp := time.Unix(0, 0).UTC() + options.HistoryTimestamp = ×tamp + } + nameToRemove := "" + if dest == nil { + nameToRemove = stringid.GenerateRandomID() + "-tmp" + dest2, err := is.Transport.ParseStoreReference(b.store, nameToRemove) + if err != nil { + return imgID, nil, "", errors.Wrapf(err, "error creating temporary destination reference for image") + } + dest = dest2 + } + + systemContext := getSystemContext(b.store, options.SystemContext, options.SignaturePolicyPath) + + blocked, err := isReferenceBlocked(dest, systemContext) + if err != nil { + return "", nil, "", errors.Wrapf(err, "error checking if committing to registry for %q is blocked", transports.ImageName(dest)) + } + if blocked { + return "", nil, "", errors.Errorf("commit access to registry for %q is blocked by configuration", transports.ImageName(dest)) + } + + // Load the system signing policy. + commitPolicy, err := signature.DefaultPolicy(systemContext) + if err != nil { + return "", nil, "", errors.Wrapf(err, "error obtaining default signature policy") + } + // Override the settings for local storage to make sure that we can always read the source "image". + commitPolicy.Transports[is.Transport.Name()] = storageAllowedPolicyScopes + + policyContext, err := signature.NewPolicyContext(commitPolicy) + if err != nil { + return imgID, nil, "", errors.Wrapf(err, "error creating new signature policy context") + } + defer func() { + if err2 := policyContext.Destroy(); err2 != nil { + logrus.Debugf("error destroying signature policy context: %v", err2) + } + }() + + // Check if the commit is blocked by $BUILDER_REGISTRY_SOURCES. + insecure, err := checkRegistrySourcesAllows("commit to", dest) + if err != nil { + return imgID, nil, "", err + } + if insecure { + if systemContext.DockerInsecureSkipTLSVerify == types.OptionalBoolFalse { + return imgID, nil, "", errors.Errorf("can't require tls verification on an insecured registry") + } + systemContext.DockerInsecureSkipTLSVerify = types.OptionalBoolTrue + systemContext.OCIInsecureSkipTLSVerify = true + systemContext.DockerDaemonInsecureSkipTLSVerify = true + } + logrus.Debugf("committing image with reference %q is allowed by policy", transports.ImageName(dest)) + + // Build an image reference from which we can copy the finished image. + src, err := b.makeImageRef(options) + if err != nil { + return imgID, nil, "", errors.Wrapf(err, "error computing layer digests and building metadata for container %q", b.ContainerID) + } + // In case we're using caching, decide how to handle compression for a cache. + // If we're using blob caching, set it up for the source. + maybeCachedSrc := src + maybeCachedDest := dest + if options.BlobDirectory != "" { + compress := types.PreserveOriginal + if options.Compression != archive.Uncompressed { + compress = types.Compress + } + cache, err := blobcache.NewBlobCache(src, options.BlobDirectory, compress) + if err != nil { + return imgID, nil, "", errors.Wrapf(err, "error wrapping image reference %q in blob cache at %q", transports.ImageName(src), options.BlobDirectory) + } + maybeCachedSrc = cache + cache, err = blobcache.NewBlobCache(dest, options.BlobDirectory, compress) + if err != nil { + return imgID, nil, "", errors.Wrapf(err, "error wrapping image reference %q in blob cache at %q", transports.ImageName(dest), options.BlobDirectory) + } + maybeCachedDest = cache + } + // "Copy" our image to where it needs to be. + switch options.Compression { + case archive.Uncompressed: + systemContext.OCIAcceptUncompressedLayers = true + case archive.Gzip: + systemContext.DirForceCompress = true + } + + if systemContext.ArchitectureChoice != b.Architecture() { + systemContext.ArchitectureChoice = b.Architecture() + } + if systemContext.OSChoice != b.OS() { + systemContext.OSChoice = b.OS() + } + + var manifestBytes []byte + if manifestBytes, err = retryCopyImage(ctx, policyContext, maybeCachedDest, maybeCachedSrc, dest, getCopyOptions(b.store, options.ReportWriter, nil, systemContext, "", false, options.SignBy, options.OciEncryptLayers, options.OciEncryptConfig, nil), options.MaxRetries, options.RetryDelay); err != nil { + return imgID, nil, "", errors.Wrapf(err, "error copying layers and metadata for container %q", b.ContainerID) + } + // If we've got more names to attach, and we know how to do that for + // the transport that we're writing the new image to, add them now. + if len(options.AdditionalTags) > 0 { + switch dest.Transport().Name() { + case is.Transport.Name(): + img, err := is.Transport.GetStoreImage(b.store, dest) + if err != nil { + return imgID, nil, "", errors.Wrapf(err, "error locating just-written image %q", transports.ImageName(dest)) + } + if err = util.AddImageNames(b.store, "", systemContext, img, options.AdditionalTags); err != nil { + return imgID, nil, "", errors.Wrapf(err, "error setting image names to %v", append(img.Names, options.AdditionalTags...)) + } + logrus.Debugf("assigned names %v to image %q", img.Names, img.ID) + default: + logrus.Warnf("don't know how to add tags to images stored in %q transport", dest.Transport().Name()) + } + } + + img, err := is.Transport.GetStoreImage(b.store, dest) + if err != nil && errors.Cause(err) != storage.ErrImageUnknown { + return imgID, nil, "", errors.Wrapf(err, "error locating image %q in local storage", transports.ImageName(dest)) + } + if err == nil { + imgID = img.ID + prunedNames := make([]string, 0, len(img.Names)) + for _, name := range img.Names { + if !(nameToRemove != "" && strings.Contains(name, nameToRemove)) { + prunedNames = append(prunedNames, name) + } + } + if len(prunedNames) < len(img.Names) { + if err = b.store.SetNames(imgID, prunedNames); err != nil { + return imgID, nil, "", errors.Wrapf(err, "failed to prune temporary name from image %q", imgID) + } + logrus.Debugf("reassigned names %v to image %q", prunedNames, img.ID) + dest2, err := is.Transport.ParseStoreReference(b.store, "@"+imgID) + if err != nil { + return imgID, nil, "", errors.Wrapf(err, "error creating unnamed destination reference for image") + } + dest = dest2 + } + if options.IIDFile != "" { + if err = ioutil.WriteFile(options.IIDFile, []byte("sha256:"+img.ID), 0644); err != nil { + return imgID, nil, "", err + } + } + } + + manifestDigest, err := manifest.Digest(manifestBytes) + if err != nil { + return imgID, nil, "", errors.Wrapf(err, "error computing digest of manifest of new image %q", transports.ImageName(dest)) + } + + var ref reference.Canonical + if name := dest.DockerReference(); name != nil { + ref, err = reference.WithDigest(name, manifestDigest) + if err != nil { + logrus.Warnf("error generating canonical reference with name %q and digest %s: %v", name, manifestDigest.String(), err) + } + } + + if options.Manifest != "" { + manifestID, err := b.addManifest(ctx, options.Manifest, imgID) + if err != nil { + return imgID, nil, "", err + } + logrus.Debugf("added imgID %s to manifestID %s", imgID, manifestID) + + } + return imgID, ref, manifestDigest, nil +} diff --git a/vendor/github.com/containers/buildah/common.go b/vendor/github.com/containers/buildah/common.go new file mode 100644 index 00000000000..ea28be1846f --- /dev/null +++ b/vendor/github.com/containers/buildah/common.go @@ -0,0 +1,88 @@ +package buildah + +import ( + "context" + "io" + "os" + "path/filepath" + "time" + + "github.com/containers/buildah/define" + "github.com/containers/common/pkg/retry" + cp "github.com/containers/image/v5/copy" + "github.com/containers/image/v5/docker" + "github.com/containers/image/v5/signature" + "github.com/containers/image/v5/types" + encconfig "github.com/containers/ocicrypt/config" + "github.com/containers/storage" + "github.com/containers/storage/pkg/unshare" +) + +const ( + // OCI used to define the "oci" image format + OCI = define.OCI + // DOCKER used to define the "docker" image format + DOCKER = define.DOCKER +) + +func getCopyOptions(store storage.Store, reportWriter io.Writer, sourceSystemContext *types.SystemContext, destinationSystemContext *types.SystemContext, manifestType string, removeSignatures bool, addSigner string, ociEncryptLayers *[]int, ociEncryptConfig *encconfig.EncryptConfig, ociDecryptConfig *encconfig.DecryptConfig) *cp.Options { + sourceCtx := getSystemContext(store, nil, "") + if sourceSystemContext != nil { + *sourceCtx = *sourceSystemContext + } + + destinationCtx := getSystemContext(store, nil, "") + if destinationSystemContext != nil { + *destinationCtx = *destinationSystemContext + } + return &cp.Options{ + ReportWriter: reportWriter, + SourceCtx: sourceCtx, + DestinationCtx: destinationCtx, + ForceManifestMIMEType: manifestType, + RemoveSignatures: removeSignatures, + SignBy: addSigner, + OciEncryptConfig: ociEncryptConfig, + OciDecryptConfig: ociDecryptConfig, + OciEncryptLayers: ociEncryptLayers, + } +} + +func getSystemContext(store storage.Store, defaults *types.SystemContext, signaturePolicyPath string) *types.SystemContext { + sc := &types.SystemContext{} + if defaults != nil { + *sc = *defaults + } + if signaturePolicyPath != "" { + sc.SignaturePolicyPath = signaturePolicyPath + } + if store != nil { + if sc.SystemRegistriesConfPath == "" && unshare.IsRootless() { + userRegistriesFile := filepath.Join(store.GraphRoot(), "registries.conf") + if _, err := os.Stat(userRegistriesFile); err == nil { + sc.SystemRegistriesConfPath = userRegistriesFile + } + } + } + return sc +} + +func retryCopyImage(ctx context.Context, policyContext *signature.PolicyContext, dest, src, registry types.ImageReference, copyOptions *cp.Options, maxRetries int, retryDelay time.Duration) ([]byte, error) { + var ( + manifestBytes []byte + err error + lastErr error + ) + err = retry.RetryIfNecessary(ctx, func() error { + manifestBytes, err = cp.Image(ctx, policyContext, dest, src, copyOptions) + if registry != nil && registry.Transport().Name() != docker.Transport.Name() { + lastErr = err + return nil + } + return err + }, &retry.RetryOptions{MaxRetry: maxRetries, Delay: retryDelay}) + if lastErr != nil { + err = lastErr + } + return manifestBytes, err +} diff --git a/vendor/github.com/containers/buildah/config.go b/vendor/github.com/containers/buildah/config.go new file mode 100644 index 00000000000..effaa81e4d0 --- /dev/null +++ b/vendor/github.com/containers/buildah/config.go @@ -0,0 +1,680 @@ +package buildah + +import ( + "context" + "encoding/json" + "os" + "runtime" + "strings" + "time" + + "github.com/containerd/containerd/platforms" + "github.com/containers/buildah/define" + "github.com/containers/buildah/docker" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/compression" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" + "github.com/containers/storage/pkg/stringid" + ociv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// unmarshalConvertedConfig obtains the config blob of img valid for the wantedManifestMIMEType format +// (either as it exists, or converting the image if necessary), and unmarshals it into dest. +// NOTE: The MIME type is of the _manifest_, not of the _config_ that is returned. +func unmarshalConvertedConfig(ctx context.Context, dest interface{}, img types.Image, wantedManifestMIMEType string) error { + _, actualManifestMIMEType, err := img.Manifest(ctx) + if err != nil { + return errors.Wrapf(err, "error getting manifest MIME type for %q", transports.ImageName(img.Reference())) + } + if wantedManifestMIMEType != actualManifestMIMEType { + layerInfos := img.LayerInfos() + for i := range layerInfos { // force the "compression" to gzip, which is supported by all of the formats we care about + layerInfos[i].CompressionOperation = types.Compress + layerInfos[i].CompressionAlgorithm = &compression.Gzip + } + updatedImg, err := img.UpdatedImage(ctx, types.ManifestUpdateOptions{ + LayerInfos: layerInfos, + }) + if err != nil { + return errors.Wrapf(err, "resetting recorded compression for %q", transports.ImageName(img.Reference())) + } + secondUpdatedImg, err := updatedImg.UpdatedImage(ctx, types.ManifestUpdateOptions{ + ManifestMIMEType: wantedManifestMIMEType, + }) + if err != nil { + return errors.Wrapf(err, "error converting image %q from %q to %q", transports.ImageName(img.Reference()), actualManifestMIMEType, wantedManifestMIMEType) + } + img = secondUpdatedImg + } + config, err := img.ConfigBlob(ctx) + if err != nil { + return errors.Wrapf(err, "error reading %s config from %q", wantedManifestMIMEType, transports.ImageName(img.Reference())) + } + if err := json.Unmarshal(config, dest); err != nil { + return errors.Wrapf(err, "error parsing %s configuration %q from %q", wantedManifestMIMEType, string(config), transports.ImageName(img.Reference())) + } + return nil +} + +func (b *Builder) initConfig(ctx context.Context, img types.Image, sys *types.SystemContext) error { + if img != nil { // A pre-existing image, as opposed to a "FROM scratch" new one. + rawManifest, manifestMIMEType, err := img.Manifest(ctx) + if err != nil { + return errors.Wrapf(err, "error reading image manifest for %q", transports.ImageName(img.Reference())) + } + rawConfig, err := img.ConfigBlob(ctx) + if err != nil { + return errors.Wrapf(err, "error reading image configuration for %q", transports.ImageName(img.Reference())) + } + b.Manifest = rawManifest + b.Config = rawConfig + + dimage := docker.V2Image{} + if err := unmarshalConvertedConfig(ctx, &dimage, img, manifest.DockerV2Schema2MediaType); err != nil { + return err + } + b.Docker = dimage + + oimage := ociv1.Image{} + if err := unmarshalConvertedConfig(ctx, &oimage, img, ociv1.MediaTypeImageManifest); err != nil { + return err + } + b.OCIv1 = oimage + + if manifestMIMEType == ociv1.MediaTypeImageManifest { + // Attempt to recover format-specific data from the manifest. + v1Manifest := ociv1.Manifest{} + if err := json.Unmarshal(b.Manifest, &v1Manifest); err != nil { + return errors.Wrapf(err, "error parsing OCI manifest %q", string(b.Manifest)) + } + for k, v := range v1Manifest.Annotations { + // NOTE: do not override annotations that are + // already set. Otherwise, we may erase + // annotations such as the digest of the base + // image. + if value := b.ImageAnnotations[k]; value == "" { + b.ImageAnnotations[k] = v + } + } + } + } + + b.setupLogger() + b.fixupConfig(sys) + return nil +} + +func (b *Builder) fixupConfig(sys *types.SystemContext) { + if b.Docker.Config != nil { + // Prefer image-level settings over those from the container it was built from. + b.Docker.ContainerConfig = *b.Docker.Config + } + b.Docker.Config = &b.Docker.ContainerConfig + b.Docker.DockerVersion = "" + now := time.Now().UTC() + if b.Docker.Created.IsZero() { + b.Docker.Created = now + } + if b.OCIv1.Created == nil || b.OCIv1.Created.IsZero() { + b.OCIv1.Created = &now + } + if b.OS() == "" { + if sys != nil && sys.OSChoice != "" { + b.SetOS(sys.OSChoice) + } else { + b.SetOS(runtime.GOOS) + } + } + if b.Architecture() == "" { + if sys != nil && sys.ArchitectureChoice != "" { + b.SetArchitecture(sys.ArchitectureChoice) + } else { + b.SetArchitecture(runtime.GOARCH) + } + // in case the arch string we started with was shorthand for a known arch+variant pair, normalize it + ps := platforms.Normalize(ociv1.Platform{OS: b.OS(), Architecture: b.Architecture(), Variant: b.Variant()}) + b.SetArchitecture(ps.Architecture) + b.SetVariant(ps.Variant) + } + if b.Format == define.Dockerv2ImageManifest && b.Hostname() == "" { + b.SetHostname(stringid.TruncateID(stringid.GenerateRandomID())) + } +} + +func (b *Builder) setupLogger() { + if b.Logger == nil { + b.Logger = logrus.New() + b.Logger.SetOutput(os.Stderr) + b.Logger.SetLevel(logrus.GetLevel()) + } +} + +// Annotations returns a set of key-value pairs from the image's manifest. +func (b *Builder) Annotations() map[string]string { + return copyStringStringMap(b.ImageAnnotations) +} + +// SetAnnotation adds or overwrites a key's value from the image's manifest. +// Note: this setting is not present in the Docker v2 image format, so it is +// discarded when writing images using Docker v2 formats. +func (b *Builder) SetAnnotation(key, value string) { + if b.ImageAnnotations == nil { + b.ImageAnnotations = map[string]string{} + } + b.ImageAnnotations[key] = value +} + +// UnsetAnnotation removes a key and its value from the image's manifest, if +// it's present. +func (b *Builder) UnsetAnnotation(key string) { + delete(b.ImageAnnotations, key) +} + +// ClearAnnotations removes all keys and their values from the image's +// manifest. +func (b *Builder) ClearAnnotations() { + b.ImageAnnotations = map[string]string{} +} + +// CreatedBy returns a description of how this image was built. +func (b *Builder) CreatedBy() string { + return b.ImageCreatedBy +} + +// SetCreatedBy sets the description of how this image was built. +func (b *Builder) SetCreatedBy(how string) { + b.ImageCreatedBy = how +} + +// OS returns a name of the OS on which the container, or a container built +// using an image built from this container, is intended to be run. +func (b *Builder) OS() string { + return b.OCIv1.OS +} + +// SetOS sets the name of the OS on which the container, or a container built +// using an image built from this container, is intended to be run. +func (b *Builder) SetOS(os string) { + b.OCIv1.OS = os + b.Docker.OS = os +} + +// Architecture returns a name of the architecture on which the container, or a +// container built using an image built from this container, is intended to be +// run. +func (b *Builder) Architecture() string { + return b.OCIv1.Architecture +} + +// SetArchitecture sets the name of the architecture on which the container, or +// a container built using an image built from this container, is intended to +// be run. +func (b *Builder) SetArchitecture(arch string) { + b.OCIv1.Architecture = arch + b.Docker.Architecture = arch +} + +// Variant returns a name of the architecture variant on which the container, +// or a container built using an image built from this container, is intended +// to be run. +func (b *Builder) Variant() string { + return b.OCIv1.Variant +} + +// SetVariant sets the name of the architecture variant on which the container, +// or a container built using an image built from this container, is intended +// to be run. +func (b *Builder) SetVariant(variant string) { + b.Docker.Variant = variant + b.OCIv1.Variant = variant +} + +// Maintainer returns contact information for the person who built the image. +func (b *Builder) Maintainer() string { + return b.OCIv1.Author +} + +// SetMaintainer sets contact information for the person who built the image. +func (b *Builder) SetMaintainer(who string) { + b.OCIv1.Author = who + b.Docker.Author = who +} + +// User returns information about the user as whom the container, or a +// container built using an image built from this container, should be run. +func (b *Builder) User() string { + return b.OCIv1.Config.User +} + +// SetUser sets information about the user as whom the container, or a +// container built using an image built from this container, should be run. +// Acceptable forms are a user name or ID, optionally followed by a colon and a +// group name or ID. +func (b *Builder) SetUser(spec string) { + b.OCIv1.Config.User = spec + b.Docker.Config.User = spec +} + +// OnBuild returns the OnBuild value from the container. +func (b *Builder) OnBuild() []string { + return copyStringSlice(b.Docker.Config.OnBuild) +} + +// ClearOnBuild removes all values from the OnBuild structure +func (b *Builder) ClearOnBuild() { + b.Docker.Config.OnBuild = []string{} +} + +// SetOnBuild sets a trigger instruction to be executed when the image is used +// as the base of another image. +// Note: this setting is not present in the OCIv1 image format, so it is +// discarded when writing images using OCIv1 formats. +func (b *Builder) SetOnBuild(onBuild string) { + if onBuild != "" && b.Format != define.Dockerv2ImageManifest { + b.Logger.Warnf("ONBUILD is not supported for OCI image format, %s will be ignored. Must use `docker` format", onBuild) + } + b.Docker.Config.OnBuild = append(b.Docker.Config.OnBuild, onBuild) +} + +// WorkDir returns the default working directory for running commands in the +// container, or in a container built using an image built from this container. +func (b *Builder) WorkDir() string { + return b.OCIv1.Config.WorkingDir +} + +// SetWorkDir sets the location of the default working directory for running +// commands in the container, or in a container built using an image built from +// this container. +func (b *Builder) SetWorkDir(there string) { + b.OCIv1.Config.WorkingDir = there + b.Docker.Config.WorkingDir = there +} + +// Shell returns the default shell for running commands in the +// container, or in a container built using an image built from this container. +func (b *Builder) Shell() []string { + return copyStringSlice(b.Docker.Config.Shell) +} + +// SetShell sets the default shell for running +// commands in the container, or in a container built using an image built from +// this container. +// Note: this setting is not present in the OCIv1 image format, so it is +// discarded when writing images using OCIv1 formats. +func (b *Builder) SetShell(shell []string) { + if len(shell) > 0 && b.Format != define.Dockerv2ImageManifest { + b.Logger.Warnf("SHELL is not supported for OCI image format, %s will be ignored. Must use `docker` format", shell) + } + + b.Docker.Config.Shell = copyStringSlice(shell) +} + +// Env returns a list of key-value pairs to be set when running commands in the +// container, or in a container built using an image built from this container. +func (b *Builder) Env() []string { + return copyStringSlice(b.OCIv1.Config.Env) +} + +// SetEnv adds or overwrites a value to the set of environment strings which +// should be set when running commands in the container, or in a container +// built using an image built from this container. +func (b *Builder) SetEnv(k string, v string) { + reset := func(s *[]string) { + n := []string{} + for i := range *s { + if !strings.HasPrefix((*s)[i], k+"=") { + n = append(n, (*s)[i]) + } + } + n = append(n, k+"="+v) + *s = n + } + reset(&b.OCIv1.Config.Env) + reset(&b.Docker.Config.Env) +} + +// UnsetEnv removes a value from the set of environment strings which should be +// set when running commands in this container, or in a container built using +// an image built from this container. +func (b *Builder) UnsetEnv(k string) { + unset := func(s *[]string) { + n := []string{} + for i := range *s { + if !strings.HasPrefix((*s)[i], k+"=") { + n = append(n, (*s)[i]) + } + } + *s = n + } + unset(&b.OCIv1.Config.Env) + unset(&b.Docker.Config.Env) +} + +// ClearEnv removes all values from the set of environment strings which should +// be set when running commands in this container, or in a container built +// using an image built from this container. +func (b *Builder) ClearEnv() { + b.OCIv1.Config.Env = []string{} + b.Docker.Config.Env = []string{} +} + +// Cmd returns the default command, or command parameters if an Entrypoint is +// set, to use when running a container built from an image built from this +// container. +func (b *Builder) Cmd() []string { + return copyStringSlice(b.OCIv1.Config.Cmd) +} + +// SetCmd sets the default command, or command parameters if an Entrypoint is +// set, to use when running a container built from an image built from this +// container. +func (b *Builder) SetCmd(cmd []string) { + b.OCIv1.Config.Cmd = copyStringSlice(cmd) + b.Docker.Config.Cmd = copyStringSlice(cmd) +} + +// Entrypoint returns the command to be run for containers built from images +// built from this container. +func (b *Builder) Entrypoint() []string { + if len(b.OCIv1.Config.Entrypoint) > 0 { + return copyStringSlice(b.OCIv1.Config.Entrypoint) + } + return nil +} + +// SetEntrypoint sets the command to be run for in containers built from images +// built from this container. +func (b *Builder) SetEntrypoint(ep []string) { + b.OCIv1.Config.Entrypoint = copyStringSlice(ep) + b.Docker.Config.Entrypoint = copyStringSlice(ep) +} + +// Labels returns a set of key-value pairs from the image's runtime +// configuration. +func (b *Builder) Labels() map[string]string { + return copyStringStringMap(b.OCIv1.Config.Labels) +} + +// SetLabel adds or overwrites a key's value from the image's runtime +// configuration. +func (b *Builder) SetLabel(k string, v string) { + if b.OCIv1.Config.Labels == nil { + b.OCIv1.Config.Labels = map[string]string{} + } + b.OCIv1.Config.Labels[k] = v + if b.Docker.Config.Labels == nil { + b.Docker.Config.Labels = map[string]string{} + } + b.Docker.Config.Labels[k] = v +} + +// UnsetLabel removes a key and its value from the image's runtime +// configuration, if it's present. +func (b *Builder) UnsetLabel(k string) { + delete(b.OCIv1.Config.Labels, k) + delete(b.Docker.Config.Labels, k) +} + +// ClearLabels removes all keys and their values from the image's runtime +// configuration. +func (b *Builder) ClearLabels() { + b.OCIv1.Config.Labels = map[string]string{} + b.Docker.Config.Labels = map[string]string{} +} + +// Ports returns the set of ports which should be exposed when a container +// based on an image built from this container is run. +func (b *Builder) Ports() []string { + p := []string{} + for k := range b.OCIv1.Config.ExposedPorts { + p = append(p, k) + } + return p +} + +// SetPort adds or overwrites an exported port in the set of ports which should +// be exposed when a container based on an image built from this container is +// run. +func (b *Builder) SetPort(p string) { + if b.OCIv1.Config.ExposedPorts == nil { + b.OCIv1.Config.ExposedPorts = map[string]struct{}{} + } + b.OCIv1.Config.ExposedPorts[p] = struct{}{} + if b.Docker.Config.ExposedPorts == nil { + b.Docker.Config.ExposedPorts = make(docker.PortSet) + } + b.Docker.Config.ExposedPorts[docker.Port(p)] = struct{}{} +} + +// UnsetPort removes an exposed port from the set of ports which should be +// exposed when a container based on an image built from this container is run. +func (b *Builder) UnsetPort(p string) { + delete(b.OCIv1.Config.ExposedPorts, p) + delete(b.Docker.Config.ExposedPorts, docker.Port(p)) +} + +// ClearPorts empties the set of ports which should be exposed when a container +// based on an image built from this container is run. +func (b *Builder) ClearPorts() { + b.OCIv1.Config.ExposedPorts = map[string]struct{}{} + b.Docker.Config.ExposedPorts = docker.PortSet{} +} + +// Volumes returns a list of filesystem locations which should be mounted from +// outside of the container when a container built from an image built from +// this container is run. +func (b *Builder) Volumes() []string { + v := []string{} + for k := range b.OCIv1.Config.Volumes { + v = append(v, k) + } + if len(v) > 0 { + return v + } + return nil +} + +// CheckVolume returns True if the location exists in the image's list of locations +// which should be mounted from outside of the container when a container +// based on an image built from this container is run + +func (b *Builder) CheckVolume(v string) bool { + _, OCIv1Volume := b.OCIv1.Config.Volumes[v] + _, DockerVolume := b.Docker.Config.Volumes[v] + return OCIv1Volume || DockerVolume +} + +// AddVolume adds a location to the image's list of locations which should be +// mounted from outside of the container when a container based on an image +// built from this container is run. +func (b *Builder) AddVolume(v string) { + if b.OCIv1.Config.Volumes == nil { + b.OCIv1.Config.Volumes = map[string]struct{}{} + } + b.OCIv1.Config.Volumes[v] = struct{}{} + if b.Docker.Config.Volumes == nil { + b.Docker.Config.Volumes = map[string]struct{}{} + } + b.Docker.Config.Volumes[v] = struct{}{} +} + +// RemoveVolume removes a location from the list of locations which should be +// mounted from outside of the container when a container based on an image +// built from this container is run. +func (b *Builder) RemoveVolume(v string) { + delete(b.OCIv1.Config.Volumes, v) + delete(b.Docker.Config.Volumes, v) +} + +// ClearVolumes removes all locations from the image's list of locations which +// should be mounted from outside of the container when a container based on an +// image built from this container is run. +func (b *Builder) ClearVolumes() { + b.OCIv1.Config.Volumes = map[string]struct{}{} + b.Docker.Config.Volumes = map[string]struct{}{} +} + +// Hostname returns the hostname which will be set in the container and in +// containers built using images built from the container. +func (b *Builder) Hostname() string { + return b.Docker.Config.Hostname +} + +// SetHostname sets the hostname which will be set in the container and in +// containers built using images built from the container. +// Note: this setting is not present in the OCIv1 image format, so it is +// discarded when writing images using OCIv1 formats. +func (b *Builder) SetHostname(name string) { + b.Docker.Config.Hostname = name +} + +// Domainname returns the domainname which will be set in the container and in +// containers built using images built from the container. +func (b *Builder) Domainname() string { + return b.Docker.Config.Domainname +} + +// SetDomainname sets the domainname which will be set in the container and in +// containers built using images built from the container. +// Note: this setting is not present in the OCIv1 image format, so it is +// discarded when writing images using OCIv1 formats. +func (b *Builder) SetDomainname(name string) { + if name != "" && b.Format != define.Dockerv2ImageManifest { + b.Logger.Warnf("DOMAINNAME is not supported for OCI image format, domainname %s will be ignored. Must use `docker` format", name) + } + b.Docker.Config.Domainname = name +} + +// SetDefaultMountsFilePath sets the mounts file path for testing purposes +func (b *Builder) SetDefaultMountsFilePath(path string) { + b.DefaultMountsFilePath = path +} + +// Comment returns the comment which will be set in the container and in +// containers built using images built from the container +func (b *Builder) Comment() string { + return b.Docker.Comment +} + +// SetComment sets the comment which will be set in the container and in +// containers built using images built from the container. +// Note: this setting is not present in the OCIv1 image format, so it is +// discarded when writing images using OCIv1 formats. +func (b *Builder) SetComment(comment string) { + if comment != "" && b.Format != define.Dockerv2ImageManifest { + logrus.Warnf("COMMENT is not supported for OCI image format, comment %s will be ignored. Must use `docker` format", comment) + } + b.Docker.Comment = comment +} + +// HistoryComment returns the comment which will be used in the history item +// which will describe the latest layer when we commit an image. +func (b *Builder) HistoryComment() string { + return b.ImageHistoryComment +} + +// SetHistoryComment sets the comment which will be used in the history item +// which will describe the latest layer when we commit an image. +func (b *Builder) SetHistoryComment(comment string) { + b.ImageHistoryComment = comment +} + +// StopSignal returns the signal which will be set in the container and in +// containers built using images built from the container +func (b *Builder) StopSignal() string { + return b.Docker.Config.StopSignal +} + +// SetStopSignal sets the signal which will be set in the container and in +// containers built using images built from the container. +func (b *Builder) SetStopSignal(stopSignal string) { + b.OCIv1.Config.StopSignal = stopSignal + b.Docker.Config.StopSignal = stopSignal +} + +// Healthcheck returns information that recommends how a container engine +// should check if a running container is "healthy". +func (b *Builder) Healthcheck() *docker.HealthConfig { + if b.Docker.Config.Healthcheck == nil { + return nil + } + return &docker.HealthConfig{ + Test: copyStringSlice(b.Docker.Config.Healthcheck.Test), + Interval: b.Docker.Config.Healthcheck.Interval, + Timeout: b.Docker.Config.Healthcheck.Timeout, + StartPeriod: b.Docker.Config.Healthcheck.StartPeriod, + Retries: b.Docker.Config.Healthcheck.Retries, + } +} + +// SetHealthcheck sets recommended commands to run in order to verify that a +// running container based on this image is "healthy", along with information +// specifying how often that test should be run, and how many times the test +// should fail before the container should be considered unhealthy. +// Note: this setting is not present in the OCIv1 image format, so it is +// discarded when writing images using OCIv1 formats. +func (b *Builder) SetHealthcheck(config *docker.HealthConfig) { + b.Docker.Config.Healthcheck = nil + if config != nil { + if b.Format != define.Dockerv2ImageManifest { + b.Logger.Warnf("HEALTHCHECK is not supported for OCI image format and will be ignored. Must use `docker` format") + } + b.Docker.Config.Healthcheck = &docker.HealthConfig{ + Test: copyStringSlice(config.Test), + Interval: config.Interval, + Timeout: config.Timeout, + StartPeriod: config.StartPeriod, + Retries: config.Retries, + } + } +} + +// AddPrependedEmptyLayer adds an item to the history that we'll create when +// committing the image, after any history we inherit from the base image, but +// before the history item that we'll use to describe the new layer that we're +// adding. +func (b *Builder) AddPrependedEmptyLayer(created *time.Time, createdBy, author, comment string) { + if created != nil { + copiedTimestamp := *created + created = &copiedTimestamp + } + b.PrependedEmptyLayers = append(b.PrependedEmptyLayers, ociv1.History{ + Created: created, + CreatedBy: createdBy, + Author: author, + Comment: comment, + EmptyLayer: true, + }) +} + +// ClearPrependedEmptyLayers clears the list of history entries that we'll add +// to the committed image before the entry for the layer that we're adding. +func (b *Builder) ClearPrependedEmptyLayers() { + b.PrependedEmptyLayers = nil +} + +// AddAppendedEmptyLayer adds an item to the history that we'll create when +// committing the image, after the history item that we'll use to describe the +// new layer that we're adding. +func (b *Builder) AddAppendedEmptyLayer(created *time.Time, createdBy, author, comment string) { + if created != nil { + copiedTimestamp := *created + created = &copiedTimestamp + } + b.AppendedEmptyLayers = append(b.AppendedEmptyLayers, ociv1.History{ + Created: created, + CreatedBy: createdBy, + Author: author, + Comment: comment, + EmptyLayer: true, + }) +} + +// ClearAppendedEmptyLayers clears the list of history entries that we'll add +// to the committed image after the entry for the layer that we're adding. +func (b *Builder) ClearAppendedEmptyLayers() { + b.AppendedEmptyLayers = nil +} diff --git a/vendor/github.com/containers/buildah/copier/copier.go b/vendor/github.com/containers/buildah/copier/copier.go new file mode 100644 index 00000000000..49f2c55eb27 --- /dev/null +++ b/vendor/github.com/containers/buildah/copier/copier.go @@ -0,0 +1,1897 @@ +package copier + +import ( + "archive/tar" + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net" + "os" + "os/user" + "path" + "path/filepath" + "strconv" + "strings" + "sync" + "syscall" + "time" + + "github.com/containers/buildah/util" + "github.com/containers/image/v5/pkg/compression" + "github.com/containers/storage/pkg/fileutils" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/reexec" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const ( + copierCommand = "buildah-copier" + maxLoopsFollowed = 64 + // See http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html#tag_20_92_13_06, from archive/tar + cISUID = 04000 // Set uid, from archive/tar + cISGID = 02000 // Set gid, from archive/tar + cISVTX = 01000 // Save text (sticky bit), from archive/tar +) + +func init() { + reexec.Register(copierCommand, copierMain) + // Attempt a user and host lookup to force libc (glibc, and possibly others that use dynamic + // modules to handle looking up user and host information) to load modules that match the libc + // our binary is currently using. Hopefully they're loaded on first use, so that they won't + // need to be loaded after we've chrooted into the rootfs, which could include modules that + // don't match our libc and which can't be loaded, or modules which we don't want to execute + // because we don't trust their code. + _, _ = user.Lookup("buildah") + _, _ = net.LookupHost("localhost") +} + +// isArchivePath returns true if the specified path can be read like a (possibly +// compressed) tarball. +func isArchivePath(path string) bool { + f, err := os.Open(path) + if err != nil { + return false + } + defer f.Close() + rc, _, err := compression.AutoDecompress(f) + if err != nil { + return false + } + defer rc.Close() + tr := tar.NewReader(rc) + _, err = tr.Next() + return err == nil +} + +// requestType encodes exactly what kind of request this is. +type requestType string + +const ( + requestEval requestType = "EVAL" + requestStat requestType = "STAT" + requestGet requestType = "GET" + requestPut requestType = "PUT" + requestMkdir requestType = "MKDIR" + requestRemove requestType = "REMOVE" + requestQuit requestType = "QUIT" +) + +// Request encodes a single request. +type request struct { + Request requestType + Root string // used by all requests + preservedRoot string + rootPrefix string // used to reconstruct paths being handed back to the caller + Directory string // used by all requests + preservedDirectory string + Globs []string `json:",omitempty"` // used by stat, get + preservedGlobs []string + StatOptions StatOptions `json:",omitempty"` + GetOptions GetOptions `json:",omitempty"` + PutOptions PutOptions `json:",omitempty"` + MkdirOptions MkdirOptions `json:",omitempty"` + RemoveOptions RemoveOptions `json:",omitempty"` +} + +func (req *request) Excludes() []string { + switch req.Request { + case requestEval: + return nil + case requestStat: + return req.StatOptions.Excludes + case requestGet: + return req.GetOptions.Excludes + case requestPut: + return nil + case requestMkdir: + return nil + case requestRemove: + return nil + case requestQuit: + return nil + default: + panic(fmt.Sprintf("not an implemented request type: %q", req.Request)) + } +} + +func (req *request) UIDMap() []idtools.IDMap { + switch req.Request { + case requestEval: + return nil + case requestStat: + return nil + case requestGet: + return req.GetOptions.UIDMap + case requestPut: + return req.PutOptions.UIDMap + case requestMkdir: + return req.MkdirOptions.UIDMap + case requestRemove: + return nil + case requestQuit: + return nil + default: + panic(fmt.Sprintf("not an implemented request type: %q", req.Request)) + } +} + +func (req *request) GIDMap() []idtools.IDMap { + switch req.Request { + case requestEval: + return nil + case requestStat: + return nil + case requestGet: + return req.GetOptions.GIDMap + case requestPut: + return req.PutOptions.GIDMap + case requestMkdir: + return req.MkdirOptions.GIDMap + case requestRemove: + return nil + case requestQuit: + return nil + default: + panic(fmt.Sprintf("not an implemented request type: %q", req.Request)) + } +} + +// Response encodes a single response. +type response struct { + Error string `json:",omitempty"` + Stat statResponse `json:",omitempty"` + Eval evalResponse `json:",omitempty"` + Get getResponse `json:",omitempty"` + Put putResponse `json:",omitempty"` + Mkdir mkdirResponse `json:",omitempty"` + Remove removeResponse `json:",omitempty"` +} + +// statResponse encodes a response for a single Stat request. +type statResponse struct { + Globs []*StatsForGlob +} + +// evalResponse encodes a response for a single Eval request. +type evalResponse struct { + Evaluated string +} + +// StatsForGlob encode results for a single glob pattern passed to Stat(). +type StatsForGlob struct { + Error string `json:",omitempty"` // error if the Glob pattern was malformed + Glob string // input pattern to which this result corresponds + Globbed []string // a slice of zero or more names that match the glob + Results map[string]*StatForItem // one for each Globbed value if there are any, or for Glob +} + +// StatForItem encode results for a single filesystem item, as returned by Stat(). +type StatForItem struct { + Error string `json:",omitempty"` + Name string + Size int64 // dereferenced value for symlinks + Mode os.FileMode // dereferenced value for symlinks + ModTime time.Time // dereferenced value for symlinks + IsSymlink bool + IsDir bool // dereferenced value for symlinks + IsRegular bool // dereferenced value for symlinks + IsArchive bool // dereferenced value for symlinks + ImmediateTarget string `json:",omitempty"` // raw link content +} + +// getResponse encodes a response for a single Get request. +type getResponse struct { +} + +// putResponse encodes a response for a single Put request. +type putResponse struct { +} + +// mkdirResponse encodes a response for a single Mkdir request. +type mkdirResponse struct { +} + +// removeResponse encodes a response for a single Remove request. +type removeResponse struct { +} + +// EvalOptions controls parts of Eval()'s behavior. +type EvalOptions struct { +} + +// Eval evaluates the directory's path, including any intermediate symbolic +// links. +// If root is specified and the current OS supports it, and the calling process +// has the necessary privileges, evaluation is performed in a chrooted context. +// If the directory is specified as an absolute path, it should either be the +// root directory or a subdirectory of the root directory. Otherwise, the +// directory is treated as a path relative to the root directory. +func Eval(root string, directory string, options EvalOptions) (string, error) { + req := request{ + Request: requestEval, + Root: root, + Directory: directory, + } + resp, err := copier(nil, nil, req) + if err != nil { + return "", err + } + if resp.Error != "" { + return "", errors.New(resp.Error) + } + return resp.Eval.Evaluated, nil +} + +// StatOptions controls parts of Stat()'s behavior. +type StatOptions struct { + CheckForArchives bool // check for and populate the IsArchive bit in returned values + Excludes []string // contents to pretend don't exist, using the OS-specific path separator +} + +// Stat globs the specified pattern in the specified directory and returns its +// results. +// If root and directory are both not specified, the current root directory is +// used, and relative names in the globs list are treated as being relative to +// the current working directory. +// If root is specified and the current OS supports it, and the calling process +// has the necessary privileges, the stat() is performed in a chrooted context. +// If the directory is specified as an absolute path, it should either be the +// root directory or a subdirectory of the root directory. Otherwise, the +// directory is treated as a path relative to the root directory. +// Relative names in the glob list are treated as being relative to the +// directory. +func Stat(root string, directory string, options StatOptions, globs []string) ([]*StatsForGlob, error) { + req := request{ + Request: requestStat, + Root: root, + Directory: directory, + Globs: append([]string{}, globs...), + StatOptions: options, + } + resp, err := copier(nil, nil, req) + if err != nil { + return nil, err + } + if resp.Error != "" { + return nil, errors.New(resp.Error) + } + return resp.Stat.Globs, nil +} + +// GetOptions controls parts of Get()'s behavior. +type GetOptions struct { + UIDMap, GIDMap []idtools.IDMap // map from hostIDs to containerIDs in the output archive + Excludes []string // contents to pretend don't exist, using the OS-specific path separator + ExpandArchives bool // extract the contents of named items that are archives + ChownDirs *idtools.IDPair // set ownership on directories. no effect on archives being extracted + ChmodDirs *os.FileMode // set permissions on directories. no effect on archives being extracted + ChownFiles *idtools.IDPair // set ownership of files. no effect on archives being extracted + ChmodFiles *os.FileMode // set permissions on files. no effect on archives being extracted + StripSetuidBit bool // strip the setuid bit off of items being copied. no effect on archives being extracted + StripSetgidBit bool // strip the setgid bit off of items being copied. no effect on archives being extracted + StripStickyBit bool // strip the sticky bit off of items being copied. no effect on archives being extracted + StripXattrs bool // don't record extended attributes of items being copied. no effect on archives being extracted + KeepDirectoryNames bool // don't strip the top directory's basename from the paths of items in subdirectories + Rename map[string]string // rename items with the specified names, or under the specified names + NoDerefSymlinks bool // don't follow symlinks when globs match them + IgnoreUnreadable bool // ignore errors reading items, instead of returning an error + NoCrossDevice bool // if a subdirectory is a mountpoint with a different device number, include it but skip its contents +} + +// Get produces an archive containing items that match the specified glob +// patterns and writes it to bulkWriter. +// If root and directory are both not specified, the current root directory is +// used, and relative names in the globs list are treated as being relative to +// the current working directory. +// If root is specified and the current OS supports it, and the calling process +// has the necessary privileges, the contents are read in a chrooted context. +// If the directory is specified as an absolute path, it should either be the +// root directory or a subdirectory of the root directory. Otherwise, the +// directory is treated as a path relative to the root directory. +// Relative names in the glob list are treated as being relative to the +// directory. +func Get(root string, directory string, options GetOptions, globs []string, bulkWriter io.Writer) error { + req := request{ + Request: requestGet, + Root: root, + Directory: directory, + Globs: append([]string{}, globs...), + StatOptions: StatOptions{ + CheckForArchives: options.ExpandArchives, + }, + GetOptions: options, + } + resp, err := copier(nil, bulkWriter, req) + if err != nil { + return err + } + if resp.Error != "" { + return errors.New(resp.Error) + } + return nil +} + +// PutOptions controls parts of Put()'s behavior. +type PutOptions struct { + UIDMap, GIDMap []idtools.IDMap // map from containerIDs to hostIDs when writing contents to disk + DefaultDirOwner *idtools.IDPair // set ownership of implicitly-created directories, default is ChownDirs, or 0:0 if ChownDirs not set + DefaultDirMode *os.FileMode // set permissions on implicitly-created directories, default is ChmodDirs, or 0755 if ChmodDirs not set + ChownDirs *idtools.IDPair // set ownership of newly-created directories + ChmodDirs *os.FileMode // set permissions on newly-created directories + ChownFiles *idtools.IDPair // set ownership of newly-created files + ChmodFiles *os.FileMode // set permissions on newly-created files + StripXattrs bool // don't bother trying to set extended attributes of items being copied + IgnoreXattrErrors bool // ignore any errors encountered when attempting to set extended attributes + IgnoreDevices bool // ignore items which are character or block devices + NoOverwriteDirNonDir bool // instead of quietly overwriting directories with non-directories, return an error + Rename map[string]string // rename items with the specified names, or under the specified names +} + +// Put extracts an archive from the bulkReader at the specified directory. +// If root and directory are both not specified, the current root directory is +// used. +// If root is specified and the current OS supports it, and the calling process +// has the necessary privileges, the contents are written in a chrooted +// context. If the directory is specified as an absolute path, it should +// either be the root directory or a subdirectory of the root directory. +// Otherwise, the directory is treated as a path relative to the root +// directory. +func Put(root string, directory string, options PutOptions, bulkReader io.Reader) error { + req := request{ + Request: requestPut, + Root: root, + Directory: directory, + PutOptions: options, + } + resp, err := copier(bulkReader, nil, req) + if err != nil { + return err + } + if resp.Error != "" { + return errors.New(resp.Error) + } + return nil +} + +// MkdirOptions controls parts of Mkdir()'s behavior. +type MkdirOptions struct { + UIDMap, GIDMap []idtools.IDMap // map from containerIDs to hostIDs when creating directories + ChownNew *idtools.IDPair // set ownership of newly-created directories + ChmodNew *os.FileMode // set permissions on newly-created directories +} + +// Mkdir ensures that the specified directory exists. Any directories which +// need to be created will be given the specified ownership and permissions. +// If root and directory are both not specified, the current root directory is +// used. +// If root is specified and the current OS supports it, and the calling process +// has the necessary privileges, the directory is created in a chrooted +// context. If the directory is specified as an absolute path, it should +// either be the root directory or a subdirectory of the root directory. +// Otherwise, the directory is treated as a path relative to the root +// directory. +func Mkdir(root string, directory string, options MkdirOptions) error { + req := request{ + Request: requestMkdir, + Root: root, + Directory: directory, + MkdirOptions: options, + } + resp, err := copier(nil, nil, req) + if err != nil { + return err + } + if resp.Error != "" { + return errors.New(resp.Error) + } + return nil +} + +// RemoveOptions controls parts of Remove()'s behavior. +type RemoveOptions struct { + All bool // if Directory is a directory, remove its contents as well +} + +// Remove removes the specified directory or item, traversing any intermediate +// symbolic links. +// If the root directory is not specified, the current root directory is used. +// If root is specified and the current OS supports it, and the calling process +// has the necessary privileges, the remove() is performed in a chrooted context. +// If the item to remove is specified as an absolute path, it should either be +// in the root directory or in a subdirectory of the root directory. Otherwise, +// the directory is treated as a path relative to the root directory. +func Remove(root string, item string, options RemoveOptions) error { + req := request{ + Request: requestRemove, + Root: root, + Directory: item, + RemoveOptions: options, + } + resp, err := copier(nil, nil, req) + if err != nil { + return err + } + if resp.Error != "" { + return errors.New(resp.Error) + } + return nil +} + +// cleanerReldirectory resolves relative path candidate lexically, attempting +// to ensure that when joined as a subdirectory of another directory, it does +// not reference anything outside of that other directory. +func cleanerReldirectory(candidate string) string { + cleaned := strings.TrimPrefix(filepath.Clean(string(os.PathSeparator)+candidate), string(os.PathSeparator)) + if cleaned == "" { + return "." + } + return cleaned +} + +// convertToRelSubdirectory returns the path of directory, bound and relative to +// root, as a relative path, or an error if that path can't be computed or if +// the two directories are on different volumes +func convertToRelSubdirectory(root, directory string) (relative string, err error) { + if root == "" || !filepath.IsAbs(root) { + return "", errors.Errorf("expected root directory to be an absolute path, got %q", root) + } + if directory == "" || !filepath.IsAbs(directory) { + return "", errors.Errorf("expected directory to be an absolute path, got %q", root) + } + if filepath.VolumeName(root) != filepath.VolumeName(directory) { + return "", errors.Errorf("%q and %q are on different volumes", root, directory) + } + rel, err := filepath.Rel(root, directory) + if err != nil { + return "", errors.Wrapf(err, "error computing path of %q relative to %q", directory, root) + } + return cleanerReldirectory(rel), nil +} + +func currentVolumeRoot() (string, error) { + cwd, err := os.Getwd() + if err != nil { + return "", errors.Wrapf(err, "error getting current working directory") + } + return filepath.VolumeName(cwd) + string(os.PathSeparator), nil +} + +func isVolumeRoot(candidate string) (bool, error) { + abs, err := filepath.Abs(candidate) + if err != nil { + return false, errors.Wrapf(err, "error converting %q to an absolute path", candidate) + } + return abs == filepath.VolumeName(abs)+string(os.PathSeparator), nil +} + +func looksLikeAbs(candidate string) bool { + return candidate[0] == os.PathSeparator && (len(candidate) == 1 || candidate[1] != os.PathSeparator) +} + +func copier(bulkReader io.Reader, bulkWriter io.Writer, req request) (*response, error) { + if req.Directory == "" { + if req.Root == "" { + wd, err := os.Getwd() + if err != nil { + return nil, errors.Wrapf(err, "error getting current working directory") + } + req.Directory = wd + } else { + req.Directory = req.Root + } + } + if req.Root == "" { + root, err := currentVolumeRoot() + if err != nil { + return nil, errors.Wrapf(err, "error determining root of current volume") + } + req.Root = root + } + if filepath.IsAbs(req.Directory) { + _, err := convertToRelSubdirectory(req.Root, req.Directory) + if err != nil { + return nil, errors.Wrapf(err, "error rewriting %q to be relative to %q", req.Directory, req.Root) + } + } + isAlreadyRoot, err := isVolumeRoot(req.Root) + if err != nil { + return nil, errors.Wrapf(err, "error checking if %q is a root directory", req.Root) + } + if !isAlreadyRoot && canChroot { + return copierWithSubprocess(bulkReader, bulkWriter, req) + } + return copierWithoutSubprocess(bulkReader, bulkWriter, req) +} + +func copierWithoutSubprocess(bulkReader io.Reader, bulkWriter io.Writer, req request) (*response, error) { + req.preservedRoot = req.Root + req.rootPrefix = string(os.PathSeparator) + req.preservedDirectory = req.Directory + req.preservedGlobs = append([]string{}, req.Globs...) + if !filepath.IsAbs(req.Directory) { + req.Directory = filepath.Join(req.Root, cleanerReldirectory(req.Directory)) + } + absoluteGlobs := make([]string, 0, len(req.Globs)) + for _, glob := range req.preservedGlobs { + if filepath.IsAbs(glob) { + relativeGlob, err := convertToRelSubdirectory(req.preservedRoot, glob) + if err != nil { + fmt.Fprintf(os.Stderr, "error rewriting %q to be relative to %q: %v", glob, req.preservedRoot, err) + os.Exit(1) + } + absoluteGlobs = append(absoluteGlobs, filepath.Join(req.Root, string(os.PathSeparator)+relativeGlob)) + } else { + absoluteGlobs = append(absoluteGlobs, filepath.Join(req.Directory, cleanerReldirectory(glob))) + } + } + req.Globs = absoluteGlobs + resp, cb, err := copierHandler(bulkReader, bulkWriter, req) + if err != nil { + return nil, err + } + if cb != nil { + if err = cb(); err != nil { + return nil, err + } + } + return resp, nil +} + +func closeIfNotNilYet(f **os.File, what string) { + if f != nil && *f != nil { + err := (*f).Close() + *f = nil + if err != nil { + logrus.Debugf("error closing %s: %v", what, err) + } + } +} + +func copierWithSubprocess(bulkReader io.Reader, bulkWriter io.Writer, req request) (resp *response, err error) { + if bulkReader == nil { + bulkReader = bytes.NewReader([]byte{}) + } + if bulkWriter == nil { + bulkWriter = ioutil.Discard + } + cmd := reexec.Command(copierCommand) + stdinRead, stdinWrite, err := os.Pipe() + if err != nil { + return nil, errors.Wrapf(err, "pipe") + } + defer closeIfNotNilYet(&stdinRead, "stdin pipe reader") + defer closeIfNotNilYet(&stdinWrite, "stdin pipe writer") + encoder := json.NewEncoder(stdinWrite) + stdoutRead, stdoutWrite, err := os.Pipe() + if err != nil { + return nil, errors.Wrapf(err, "pipe") + } + defer closeIfNotNilYet(&stdoutRead, "stdout pipe reader") + defer closeIfNotNilYet(&stdoutWrite, "stdout pipe writer") + decoder := json.NewDecoder(stdoutRead) + bulkReaderRead, bulkReaderWrite, err := os.Pipe() + if err != nil { + return nil, errors.Wrapf(err, "pipe") + } + defer closeIfNotNilYet(&bulkReaderRead, "child bulk content reader pipe, read end") + defer closeIfNotNilYet(&bulkReaderWrite, "child bulk content reader pipe, write end") + bulkWriterRead, bulkWriterWrite, err := os.Pipe() + if err != nil { + return nil, errors.Wrapf(err, "pipe") + } + defer closeIfNotNilYet(&bulkWriterRead, "child bulk content writer pipe, read end") + defer closeIfNotNilYet(&bulkWriterWrite, "child bulk content writer pipe, write end") + cmd.Dir = "/" + cmd.Env = append([]string{fmt.Sprintf("LOGLEVEL=%d", logrus.GetLevel())}, os.Environ()...) + + errorBuffer := bytes.Buffer{} + cmd.Stdin = stdinRead + cmd.Stdout = stdoutWrite + cmd.Stderr = &errorBuffer + cmd.ExtraFiles = []*os.File{bulkReaderRead, bulkWriterWrite} + if err = cmd.Start(); err != nil { + return nil, errors.Wrapf(err, "error starting subprocess") + } + cmdToWaitFor := cmd + defer func() { + if cmdToWaitFor != nil { + if err := cmdToWaitFor.Wait(); err != nil { + if errorBuffer.String() != "" { + logrus.Debug(errorBuffer.String()) + } + } + } + }() + stdinRead.Close() + stdinRead = nil + stdoutWrite.Close() + stdoutWrite = nil + bulkReaderRead.Close() + bulkReaderRead = nil + bulkWriterWrite.Close() + bulkWriterWrite = nil + killAndReturn := func(err error, step string) (*response, error) { // nolint: unparam + if err2 := cmd.Process.Kill(); err2 != nil { + return nil, errors.Wrapf(err, "error killing subprocess: %v; %s", err2, step) + } + return nil, errors.Wrap(err, step) + } + if err = encoder.Encode(req); err != nil { + return killAndReturn(err, "error encoding request for copier subprocess") + } + if err = decoder.Decode(&resp); err != nil { + if errors.Is(err, io.EOF) && errorBuffer.Len() > 0 { + return killAndReturn(errors.New(errorBuffer.String()), "error in copier subprocess") + } + return killAndReturn(err, "error decoding response from copier subprocess") + } + if err = encoder.Encode(&request{Request: requestQuit}); err != nil { + return killAndReturn(err, "error encoding request for copier subprocess") + } + stdinWrite.Close() + stdinWrite = nil + stdoutRead.Close() + stdoutRead = nil + var wg sync.WaitGroup + var readError, writeError error + wg.Add(1) + go func() { + _, writeError = io.Copy(bulkWriter, bulkWriterRead) + bulkWriterRead.Close() + bulkWriterRead = nil + wg.Done() + }() + wg.Add(1) + go func() { + _, readError = io.Copy(bulkReaderWrite, bulkReader) + bulkReaderWrite.Close() + bulkReaderWrite = nil + wg.Done() + }() + wg.Wait() + cmdToWaitFor = nil + if err = cmd.Wait(); err != nil { + if errorBuffer.String() != "" { + err = fmt.Errorf("%s", errorBuffer.String()) + } + return nil, err + } + if cmd.ProcessState.Exited() && !cmd.ProcessState.Success() { + err = fmt.Errorf("subprocess exited with error") + if errorBuffer.String() != "" { + err = fmt.Errorf("%s", errorBuffer.String()) + } + return nil, err + } + loggedOutput := strings.TrimSuffix(errorBuffer.String(), "\n") + if len(loggedOutput) > 0 { + for _, output := range strings.Split(loggedOutput, "\n") { + logrus.Debug(output) + } + } + if readError != nil { + return nil, errors.Wrapf(readError, "error passing bulk input to subprocess") + } + if writeError != nil { + return nil, errors.Wrapf(writeError, "error passing bulk output from subprocess") + } + return resp, nil +} + +func copierMain() { + var chrooted bool + decoder := json.NewDecoder(os.Stdin) + encoder := json.NewEncoder(os.Stdout) + previousRequestRoot := "" + + // Set logging. + if level := os.Getenv("LOGLEVEL"); level != "" { + if ll, err := strconv.Atoi(level); err == nil { + logrus.SetLevel(logrus.Level(ll)) + } + } + + // Set up descriptors for receiving and sending tarstreams. + bulkReader := os.NewFile(3, "bulk-reader") + bulkWriter := os.NewFile(4, "bulk-writer") + + for { + // Read a request. + req := new(request) + if err := decoder.Decode(req); err != nil { + fmt.Fprintf(os.Stderr, "error decoding request from copier parent process: %v", err) + os.Exit(1) + } + if req.Request == requestQuit { + // Making Quit a specific request means that we could + // run Stat() at a caller's behest before using the + // same process for Get() or Put(). Maybe later. + break + } + + // Multiple requests should list the same root, because we + // can't un-chroot to chroot to some other location. + if previousRequestRoot != "" { + // Check that we got the same input value for + // where-to-chroot-to. + if req.Root != previousRequestRoot { + fmt.Fprintf(os.Stderr, "error: can't change location of chroot from %q to %q", previousRequestRoot, req.Root) + os.Exit(1) + } + previousRequestRoot = req.Root + } else { + // Figure out where to chroot to, if we weren't told. + if req.Root == "" { + root, err := currentVolumeRoot() + if err != nil { + fmt.Fprintf(os.Stderr, "error determining root of current volume: %v", err) + os.Exit(1) + } + req.Root = root + } + // Change to the specified root directory. + var err error + chrooted, err = chroot(req.Root) + if err != nil { + fmt.Fprintf(os.Stderr, "%v", err) + os.Exit(1) + } + } + + req.preservedRoot = req.Root + req.rootPrefix = string(os.PathSeparator) + req.preservedDirectory = req.Directory + req.preservedGlobs = append([]string{}, req.Globs...) + if chrooted { + // We'll need to adjust some things now that the root + // directory isn't what it was. Make the directory and + // globs absolute paths for simplicity's sake. + absoluteDirectory := req.Directory + if !filepath.IsAbs(req.Directory) { + absoluteDirectory = filepath.Join(req.Root, cleanerReldirectory(req.Directory)) + } + relativeDirectory, err := convertToRelSubdirectory(req.preservedRoot, absoluteDirectory) + if err != nil { + fmt.Fprintf(os.Stderr, "error rewriting %q to be relative to %q: %v", absoluteDirectory, req.preservedRoot, err) + os.Exit(1) + } + req.Directory = filepath.Clean(string(os.PathSeparator) + relativeDirectory) + absoluteGlobs := make([]string, 0, len(req.Globs)) + for i, glob := range req.preservedGlobs { + if filepath.IsAbs(glob) { + relativeGlob, err := convertToRelSubdirectory(req.preservedRoot, glob) + if err != nil { + fmt.Fprintf(os.Stderr, "error rewriting %q to be relative to %q: %v", glob, req.preservedRoot, err) + os.Exit(1) + } + absoluteGlobs = append(absoluteGlobs, filepath.Clean(string(os.PathSeparator)+relativeGlob)) + } else { + absoluteGlobs = append(absoluteGlobs, filepath.Join(req.Directory, cleanerReldirectory(req.Globs[i]))) + } + } + req.Globs = absoluteGlobs + req.rootPrefix = req.Root + req.Root = string(os.PathSeparator) + } else { + // Make the directory and globs absolute paths for + // simplicity's sake. + if !filepath.IsAbs(req.Directory) { + req.Directory = filepath.Join(req.Root, cleanerReldirectory(req.Directory)) + } + absoluteGlobs := make([]string, 0, len(req.Globs)) + for i, glob := range req.preservedGlobs { + if filepath.IsAbs(glob) { + absoluteGlobs = append(absoluteGlobs, req.Globs[i]) + } else { + absoluteGlobs = append(absoluteGlobs, filepath.Join(req.Directory, cleanerReldirectory(req.Globs[i]))) + } + } + req.Globs = absoluteGlobs + } + resp, cb, err := copierHandler(bulkReader, bulkWriter, *req) + if err != nil { + fmt.Fprintf(os.Stderr, "error handling request %#v from copier parent process: %v", *req, err) + os.Exit(1) + } + // Encode the response. + if err := encoder.Encode(resp); err != nil { + fmt.Fprintf(os.Stderr, "error encoding response %#v for copier parent process: %v", *req, err) + os.Exit(1) + } + // If there's bulk data to transfer, run the callback to either + // read or write it. + if cb != nil { + if err = cb(); err != nil { + fmt.Fprintf(os.Stderr, "error during bulk transfer for %#v: %v", *req, err) + os.Exit(1) + } + } + } +} + +func copierHandler(bulkReader io.Reader, bulkWriter io.Writer, req request) (*response, func() error, error) { + // NewPatternMatcher splits patterns into components using + // os.PathSeparator, implying that it expects OS-specific naming + // conventions. + excludes := req.Excludes() + pm, err := fileutils.NewPatternMatcher(excludes) + if err != nil { + return nil, nil, errors.Wrapf(err, "error processing excludes list %v", excludes) + } + + var idMappings *idtools.IDMappings + uidMap, gidMap := req.UIDMap(), req.GIDMap() + if len(uidMap) > 0 && len(gidMap) > 0 { + idMappings = idtools.NewIDMappingsFromMaps(uidMap, gidMap) + } + + switch req.Request { + default: + return nil, nil, errors.Errorf("not an implemented request type: %q", req.Request) + case requestEval: + resp := copierHandlerEval(req) + return resp, nil, nil + case requestStat: + resp := copierHandlerStat(req, pm) + return resp, nil, nil + case requestGet: + return copierHandlerGet(bulkWriter, req, pm, idMappings) + case requestPut: + return copierHandlerPut(bulkReader, req, idMappings) + case requestMkdir: + return copierHandlerMkdir(req, idMappings) + case requestRemove: + resp := copierHandlerRemove(req) + return resp, nil, nil + case requestQuit: + return nil, nil, nil + } +} + +// pathIsExcluded computes path relative to root, then asks the pattern matcher +// if the result is excluded. Returns the relative path and the matcher's +// results. +func pathIsExcluded(root, path string, pm *fileutils.PatternMatcher) (string, bool, error) { + rel, err := convertToRelSubdirectory(root, path) + if err != nil { + return "", false, errors.Wrapf(err, "copier: error computing path of %q relative to root %q", path, root) + } + if pm == nil { + return rel, false, nil + } + if rel == "." { + // special case + return rel, false, nil + } + // Matches uses filepath.FromSlash() to convert candidates before + // checking if they match the patterns it's been given, implying that + // it expects Unix-style paths. + matches, err := pm.Matches(filepath.ToSlash(rel)) // nolint:staticcheck + if err != nil { + return rel, false, errors.Wrapf(err, "copier: error checking if %q is excluded", rel) + } + if matches { + return rel, true, nil + } + return rel, false, nil +} + +// resolvePath resolves symbolic links in paths, treating the specified +// directory as the root. +// Resolving the path this way, and using the result, is in no way secure +// against another process manipulating the content that we're looking at, and +// it is not expected to be. +// This helps us approximate chrooted behavior on systems and in test cases +// where chroot isn't available. +func resolvePath(root, path string, evaluateFinalComponent bool, pm *fileutils.PatternMatcher) (string, error) { + rel, err := convertToRelSubdirectory(root, path) + if err != nil { + return "", errors.Errorf("error making path %q relative to %q", path, root) + } + workingPath := root + followed := 0 + components := strings.Split(rel, string(os.PathSeparator)) + excluded := false + for len(components) > 0 { + // if anything we try to examine is excluded, then resolution has to "break" + _, thisExcluded, err := pathIsExcluded(root, filepath.Join(workingPath, components[0]), pm) + if err != nil { + return "", err + } + excluded = excluded || thisExcluded + if !excluded { + if target, err := os.Readlink(filepath.Join(workingPath, components[0])); err == nil && !(len(components) == 1 && !evaluateFinalComponent) { + followed++ + if followed > maxLoopsFollowed { + return "", &os.PathError{ + Op: "open", + Path: path, + Err: syscall.ELOOP, + } + } + if filepath.IsAbs(target) || looksLikeAbs(target) { + // symlink to an absolute path - prepend the + // root directory to that absolute path to + // replace the current location, and resolve + // the remaining components + workingPath = root + components = append(strings.Split(target, string(os.PathSeparator)), components[1:]...) + continue + } + // symlink to a relative path - add the link target to + // the current location to get the next location, and + // resolve the remaining components + rel, err := convertToRelSubdirectory(root, filepath.Join(workingPath, target)) + if err != nil { + return "", errors.Errorf("error making path %q relative to %q", filepath.Join(workingPath, target), root) + } + workingPath = root + components = append(strings.Split(filepath.Clean(string(os.PathSeparator)+rel), string(os.PathSeparator)), components[1:]...) + continue + } + } + // append the current component's name to get the next location + workingPath = filepath.Join(workingPath, components[0]) + if workingPath == filepath.Join(root, "..") { + // attempted to go above the root using a relative path .., scope it + workingPath = root + } + // ready to handle the next component + components = components[1:] + } + return workingPath, nil +} + +func copierHandlerEval(req request) *response { + errorResponse := func(fmtspec string, args ...interface{}) *response { + return &response{Error: fmt.Sprintf(fmtspec, args...), Eval: evalResponse{}} + } + resolvedTarget, err := resolvePath(req.Root, req.Directory, true, nil) + if err != nil { + return errorResponse("copier: eval: error resolving %q: %v", req.Directory, err) + } + return &response{Eval: evalResponse{Evaluated: filepath.Join(req.rootPrefix, resolvedTarget)}} +} + +func copierHandlerStat(req request, pm *fileutils.PatternMatcher) *response { + errorResponse := func(fmtspec string, args ...interface{}) *response { + return &response{Error: fmt.Sprintf(fmtspec, args...), Stat: statResponse{}} + } + if len(req.Globs) == 0 { + return errorResponse("copier: stat: expected at least one glob pattern, got none") + } + var stats []*StatsForGlob + for i, glob := range req.Globs { + s := StatsForGlob{ + Glob: req.preservedGlobs[i], + } + // glob this pattern + globMatched, err := filepath.Glob(glob) + if err != nil { + s.Error = fmt.Sprintf("copier: stat: %q while matching glob pattern %q", err.Error(), glob) + } + + if len(globMatched) == 0 && strings.ContainsAny(glob, "*?[") { + continue + } + // collect the matches + s.Globbed = make([]string, 0, len(globMatched)) + s.Results = make(map[string]*StatForItem) + for _, globbed := range globMatched { + rel, excluded, err := pathIsExcluded(req.Root, globbed, pm) + if err != nil { + return errorResponse("copier: stat: %v", err) + } + if excluded { + continue + } + // if the glob was an absolute path, reconstruct the + // path that we should hand back for the match + var resultName string + if filepath.IsAbs(req.preservedGlobs[i]) { + resultName = filepath.Join(req.rootPrefix, globbed) + } else { + relResult := rel + if req.Directory != req.Root { + relResult, err = convertToRelSubdirectory(req.Directory, globbed) + if err != nil { + return errorResponse("copier: stat: error making %q relative to %q: %v", globbed, req.Directory, err) + } + } + resultName = relResult + } + result := StatForItem{Name: resultName} + s.Globbed = append(s.Globbed, resultName) + s.Results[resultName] = &result + // lstat the matched value + linfo, err := os.Lstat(globbed) + if err != nil { + result.Error = err.Error() + continue + } + result.Size = linfo.Size() + result.Mode = linfo.Mode() + result.ModTime = linfo.ModTime() + result.IsDir = linfo.IsDir() + result.IsRegular = result.Mode.IsRegular() + result.IsSymlink = (linfo.Mode() & os.ModeType) == os.ModeSymlink + checkForArchive := req.StatOptions.CheckForArchives + if result.IsSymlink { + // if the match was a symbolic link, read it + immediateTarget, err := os.Readlink(globbed) + if err != nil { + result.Error = err.Error() + continue + } + // record where it points, both by itself (it + // could be a relative link) and in the context + // of the chroot + result.ImmediateTarget = immediateTarget + resolvedTarget, err := resolvePath(req.Root, globbed, true, pm) + if err != nil { + return errorResponse("copier: stat: error resolving %q: %v", globbed, err) + } + // lstat the thing that we point to + info, err := os.Lstat(resolvedTarget) + if err != nil { + result.Error = err.Error() + continue + } + // replace IsArchive/IsDir/IsRegular with info about the target + if info.Mode().IsRegular() && req.StatOptions.CheckForArchives { + result.IsArchive = isArchivePath(resolvedTarget) + checkForArchive = false + } + result.IsDir = info.IsDir() + result.IsRegular = info.Mode().IsRegular() + } + if result.IsRegular && checkForArchive { + // we were asked to check on this, and it + // wasn't a symlink, in which case we'd have + // already checked what the link points to + result.IsArchive = isArchivePath(globbed) + } + } + // no unskipped matches -> error + if len(s.Globbed) == 0 { + s.Globbed = nil + s.Results = nil + s.Error = fmt.Sprintf("copier: stat: %q: %v", glob, syscall.ENOENT) + } + stats = append(stats, &s) + } + // no matches -> error + if len(stats) == 0 { + s := StatsForGlob{ + Error: fmt.Sprintf("copier: stat: %q: %v", req.Globs, syscall.ENOENT), + } + stats = append(stats, &s) + } + return &response{Stat: statResponse{Globs: stats}} +} + +func errorIsPermission(err error) bool { + err = errors.Cause(err) + if err == nil { + return false + } + return os.IsPermission(err) || strings.Contains(err.Error(), "permission denied") +} + +func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMatcher, idMappings *idtools.IDMappings) (*response, func() error, error) { + statRequest := req + statRequest.Request = requestStat + statResponse := copierHandlerStat(req, pm) + errorResponse := func(fmtspec string, args ...interface{}) (*response, func() error, error) { + return &response{Error: fmt.Sprintf(fmtspec, args...), Stat: statResponse.Stat, Get: getResponse{}}, nil, nil + } + if statResponse.Error != "" { + return errorResponse("%s", statResponse.Error) + } + if len(req.Globs) == 0 { + return errorResponse("copier: get: expected at least one glob pattern, got 0") + } + // build a queue of items by globbing + var queue []string + globMatchedCount := 0 + for _, glob := range req.Globs { + globMatched, err := filepath.Glob(glob) + if err != nil { + return errorResponse("copier: get: glob %q: %v", glob, err) + } + globMatchedCount += len(globMatched) + queue = append(queue, globMatched...) + } + // no matches -> error + if len(queue) == 0 { + return errorResponse("copier: get: globs %v matched nothing (%d filtered out): %v", req.Globs, globMatchedCount, syscall.ENOENT) + } + topInfo, err := os.Stat(req.Directory) + if err != nil { + return errorResponse("copier: get: error reading info about directory %q: %v", req.Directory, err) + } + cb := func() error { + tw := tar.NewWriter(bulkWriter) + defer tw.Close() + hardlinkChecker := new(util.HardlinkChecker) + itemsCopied := 0 + for i, item := range queue { + // if we're not discarding the names of individual directories, keep track of this one + relNamePrefix := "" + if req.GetOptions.KeepDirectoryNames { + relNamePrefix = filepath.Base(item) + } + // if the named thing-to-read is a symlink, dereference it + info, err := os.Lstat(item) + if err != nil { + return errors.Wrapf(err, "copier: get: lstat %q", item) + } + // chase links. if we hit a dead end, we should just fail + followedLinks := 0 + const maxFollowedLinks = 16 + for !req.GetOptions.NoDerefSymlinks && info.Mode()&os.ModeType == os.ModeSymlink && followedLinks < maxFollowedLinks { + path, err := os.Readlink(item) + if err != nil { + continue + } + if filepath.IsAbs(path) || looksLikeAbs(path) { + path = filepath.Join(req.Root, path) + } else { + path = filepath.Join(filepath.Dir(item), path) + } + item = path + if _, err = convertToRelSubdirectory(req.Root, item); err != nil { + return errors.Wrapf(err, "copier: get: computing path of %q(%q) relative to %q", queue[i], item, req.Root) + } + if info, err = os.Lstat(item); err != nil { + return errors.Wrapf(err, "copier: get: lstat %q(%q)", queue[i], item) + } + followedLinks++ + } + if followedLinks >= maxFollowedLinks { + return errors.Wrapf(syscall.ELOOP, "copier: get: resolving symlink %q(%q)", queue[i], item) + } + // evaluate excludes relative to the root directory + if info.Mode().IsDir() { + // we don't expand any of the contents that are archives + options := req.GetOptions + options.ExpandArchives = false + walkfn := func(path string, info os.FileInfo, err error) error { + if err != nil { + if options.IgnoreUnreadable && errorIsPermission(err) { + if info != nil && info.IsDir() { + return filepath.SkipDir + } + return nil + } else if os.IsNotExist(errors.Cause(err)) { + logrus.Warningf("copier: file disappeared while reading: %q", path) + return nil + } + return errors.Wrapf(err, "copier: get: error reading %q", path) + } + if info.Mode()&os.ModeType == os.ModeSocket { + logrus.Warningf("copier: skipping socket %q", info.Name()) + return nil + } + // compute the path of this item + // relative to the top-level directory, + // for the tar header + rel, relErr := convertToRelSubdirectory(item, path) + if relErr != nil { + return errors.Wrapf(relErr, "copier: get: error computing path of %q relative to top directory %q", path, item) + } + // prefix the original item's name if we're keeping it + if relNamePrefix != "" { + rel = filepath.Join(relNamePrefix, rel) + } + if rel == "" || rel == "." { + // skip the "." entry + return nil + } + skippedPath, skip, err := pathIsExcluded(req.Root, path, pm) + if err != nil { + return err + } + if skip { + if info.IsDir() { + // if there are no "include + // this anyway" patterns at + // all, we don't need to + // descend into this particular + // directory if it's a directory + if !pm.Exclusions() { + return filepath.SkipDir + } + // if there are exclusion + // patterns for which this + // path is a prefix, we + // need to keep descending + for _, pattern := range pm.Patterns() { + if !pattern.Exclusion() { + continue + } + spec := strings.Trim(pattern.String(), string(os.PathSeparator)) + trimmedPath := strings.Trim(skippedPath, string(os.PathSeparator)) + if strings.HasPrefix(spec+string(os.PathSeparator), trimmedPath) { + // we can't just skip over + // this directory + return nil + } + } + // there are exclusions, but + // none of them apply here + return filepath.SkipDir + } + // skip this item, but if we're + // a directory, a more specific + // but-include-this for + // something under it might + // also be in the excludes list + return nil + } + // if it's a symlink, read its target + symlinkTarget := "" + if info.Mode()&os.ModeType == os.ModeSymlink { + target, err := os.Readlink(path) + if err != nil { + return errors.Wrapf(err, "copier: get: readlink(%q(%q))", rel, path) + } + symlinkTarget = target + } + // if it's a directory and we're staying on one device, and it's on a + // different device than the one we started from, skip its contents + var ok error + if info.Mode().IsDir() && req.GetOptions.NoCrossDevice { + if !sameDevice(topInfo, info) { + ok = filepath.SkipDir + } + } + // add the item to the outgoing tar stream + if err := copierHandlerGetOne(info, symlinkTarget, rel, path, options, tw, hardlinkChecker, idMappings); err != nil { + if req.GetOptions.IgnoreUnreadable && errorIsPermission(err) { + return ok + } else if os.IsNotExist(errors.Cause(err)) { + logrus.Warningf("copier: file disappeared while reading: %q", path) + return nil + } + return err + } + return ok + } + // walk the directory tree, checking/adding items individually + if err := filepath.Walk(item, walkfn); err != nil { + return errors.Wrapf(err, "copier: get: %q(%q)", queue[i], item) + } + itemsCopied++ + } else { + _, skip, err := pathIsExcluded(req.Root, item, pm) + if err != nil { + return err + } + if skip { + continue + } + // add the item to the outgoing tar stream. in + // cases where this was a symlink that we + // dereferenced, be sure to use the name of the + // link. + if err := copierHandlerGetOne(info, "", filepath.Base(queue[i]), item, req.GetOptions, tw, hardlinkChecker, idMappings); err != nil { + if req.GetOptions.IgnoreUnreadable && errorIsPermission(err) { + continue + } + return errors.Wrapf(err, "copier: get: %q", queue[i]) + } + itemsCopied++ + } + } + if itemsCopied == 0 { + return errors.Wrapf(syscall.ENOENT, "copier: get: copied no items") + } + return nil + } + return &response{Stat: statResponse.Stat, Get: getResponse{}}, cb, nil +} + +func handleRename(rename map[string]string, name string) string { + if rename == nil { + return name + } + // header names always use '/', so use path instead of filepath to manipulate it + if directMapping, ok := rename[name]; ok { + return directMapping + } + prefix, remainder := path.Split(name) + for prefix != "" { + if mappedPrefix, ok := rename[prefix]; ok { + return path.Join(mappedPrefix, remainder) + } + if prefix[len(prefix)-1] == '/' { + prefix = prefix[:len(prefix)-1] + if mappedPrefix, ok := rename[prefix]; ok { + return path.Join(mappedPrefix, remainder) + } + } + newPrefix, middlePart := path.Split(prefix) + if newPrefix == prefix { + return name + } + prefix = newPrefix + remainder = path.Join(middlePart, remainder) + } + return name +} + +func copierHandlerGetOne(srcfi os.FileInfo, symlinkTarget, name, contentPath string, options GetOptions, tw *tar.Writer, hardlinkChecker *util.HardlinkChecker, idMappings *idtools.IDMappings) error { + // build the header using the name provided + hdr, err := tar.FileInfoHeader(srcfi, symlinkTarget) + if err != nil { + return errors.Wrapf(err, "error generating tar header for %s (%s)", contentPath, symlinkTarget) + } + if name != "" { + hdr.Name = filepath.ToSlash(name) + } + if options.Rename != nil { + hdr.Name = handleRename(options.Rename, hdr.Name) + } + if options.StripSetuidBit { + hdr.Mode &^= cISUID + } + if options.StripSetgidBit { + hdr.Mode &^= cISGID + } + if options.StripStickyBit { + hdr.Mode &^= cISVTX + } + // read extended attributes + var xattrs map[string]string + if !options.StripXattrs { + xattrs, err = Lgetxattrs(contentPath) + if err != nil { + return errors.Wrapf(err, "error getting extended attributes for %q", contentPath) + } + } + hdr.Xattrs = xattrs // nolint:staticcheck + if hdr.Typeflag == tar.TypeReg { + // if it's an archive and we're extracting archives, read the + // file and spool out its contents in-line. (if we just + // inlined the whole file, we'd also be inlining the EOF marker + // it contains) + if options.ExpandArchives && isArchivePath(contentPath) { + f, err := os.Open(contentPath) + if err != nil { + return errors.Wrapf(err, "error opening file for reading archive contents") + } + defer f.Close() + rc, _, err := compression.AutoDecompress(f) + if err != nil { + return errors.Wrapf(err, "error decompressing %s", contentPath) + } + defer rc.Close() + tr := tar.NewReader(rc) + hdr, err := tr.Next() + for err == nil { + if options.Rename != nil { + hdr.Name = handleRename(options.Rename, hdr.Name) + } + if err = tw.WriteHeader(hdr); err != nil { + return errors.Wrapf(err, "error writing tar header from %q to pipe", contentPath) + } + if hdr.Size != 0 { + n, err := io.Copy(tw, tr) + if err != nil { + return errors.Wrapf(err, "error extracting content from archive %s: %s", contentPath, hdr.Name) + } + if n != hdr.Size { + return errors.Errorf("error extracting contents of archive %s: incorrect length for %q", contentPath, hdr.Name) + } + tw.Flush() + } + hdr, err = tr.Next() + } + if err != io.EOF { + return errors.Wrapf(err, "error extracting contents of archive %s", contentPath) + } + return nil + } + // if this regular file is hard linked to something else we've + // already added, set up to output a TypeLink entry instead of + // a TypeReg entry + target := hardlinkChecker.Check(srcfi) + if target != "" { + hdr.Typeflag = tar.TypeLink + hdr.Linkname = filepath.ToSlash(target) + hdr.Size = 0 + } else { + // note the device/inode pair for this file + hardlinkChecker.Add(srcfi, name) + } + } + // map the ownership for the archive + if idMappings != nil && !idMappings.Empty() { + hostPair := idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid} + hdr.Uid, hdr.Gid, err = idMappings.ToContainer(hostPair) + if err != nil { + return errors.Wrapf(err, "error mapping host filesystem owners %#v to container filesystem owners", hostPair) + } + } + // force ownership and/or permissions, if requested + if hdr.Typeflag == tar.TypeDir { + if options.ChownDirs != nil { + hdr.Uid, hdr.Gid = options.ChownDirs.UID, options.ChownDirs.GID + } + if options.ChmodDirs != nil { + hdr.Mode = int64(*options.ChmodDirs) + } + } else { + if options.ChownFiles != nil { + hdr.Uid, hdr.Gid = options.ChownFiles.UID, options.ChownFiles.GID + } + if options.ChmodFiles != nil { + hdr.Mode = int64(*options.ChmodFiles) + } + } + var f *os.File + if hdr.Typeflag == tar.TypeReg { + // open the file first so that we don't write a header for it if we can't actually read it + f, err = os.Open(contentPath) + if err != nil { + return errors.Wrapf(err, "error opening file for adding its contents to archive") + } + defer f.Close() + } + // output the header + if err = tw.WriteHeader(hdr); err != nil { + return errors.Wrapf(err, "error writing header for %s (%s)", contentPath, hdr.Name) + } + if hdr.Typeflag == tar.TypeReg { + // output the content + n, err := io.Copy(tw, f) + if err != nil { + return errors.Wrapf(err, "error copying %s", contentPath) + } + if n != hdr.Size { + return errors.Errorf("error copying %s: incorrect size (expected %d bytes, read %d bytes)", contentPath, n, hdr.Size) + } + tw.Flush() + } + return nil +} + +func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDMappings) (*response, func() error, error) { + errorResponse := func(fmtspec string, args ...interface{}) (*response, func() error, error) { + return &response{Error: fmt.Sprintf(fmtspec, args...), Put: putResponse{}}, nil, nil + } + dirUID, dirGID, defaultDirUID, defaultDirGID := 0, 0, 0, 0 + if req.PutOptions.ChownDirs != nil { + dirUID, dirGID = req.PutOptions.ChownDirs.UID, req.PutOptions.ChownDirs.GID + defaultDirUID, defaultDirGID = dirUID, dirGID + } + defaultDirMode := os.FileMode(0755) + if req.PutOptions.ChmodDirs != nil { + defaultDirMode = *req.PutOptions.ChmodDirs + } + if req.PutOptions.DefaultDirOwner != nil { + defaultDirUID, defaultDirGID = req.PutOptions.DefaultDirOwner.UID, req.PutOptions.DefaultDirOwner.GID + } + if req.PutOptions.DefaultDirMode != nil { + defaultDirMode = *req.PutOptions.DefaultDirMode + } + var fileUID, fileGID *int + if req.PutOptions.ChownFiles != nil { + fileUID, fileGID = &req.PutOptions.ChownFiles.UID, &req.PutOptions.ChownFiles.GID + } + if idMappings != nil && !idMappings.Empty() { + containerDirPair := idtools.IDPair{UID: dirUID, GID: dirGID} + hostDirPair, err := idMappings.ToHost(containerDirPair) + if err != nil { + return errorResponse("copier: put: error mapping container filesystem owner %d:%d to host filesystem owners: %v", dirUID, dirGID, err) + } + dirUID, dirGID = hostDirPair.UID, hostDirPair.GID + defaultDirUID, defaultDirGID = hostDirPair.UID, hostDirPair.GID + if req.PutOptions.ChownFiles != nil { + containerFilePair := idtools.IDPair{UID: *fileUID, GID: *fileGID} + hostFilePair, err := idMappings.ToHost(containerFilePair) + if err != nil { + return errorResponse("copier: put: error mapping container filesystem owner %d:%d to host filesystem owners: %v", fileUID, fileGID, err) + } + fileUID, fileGID = &hostFilePair.UID, &hostFilePair.GID + } + } + ensureDirectoryUnderRoot := func(directory string) error { + rel, err := convertToRelSubdirectory(req.Root, directory) + if err != nil { + return errors.Wrapf(err, "%q is not a subdirectory of %q", directory, req.Root) + } + subdir := "" + for _, component := range strings.Split(rel, string(os.PathSeparator)) { + subdir = filepath.Join(subdir, component) + path := filepath.Join(req.Root, subdir) + if err := os.Mkdir(path, 0700); err == nil { + if err = lchown(path, defaultDirUID, defaultDirGID); err != nil { + return errors.Wrapf(err, "copier: put: error setting owner of %q to %d:%d", path, defaultDirUID, defaultDirGID) + } + if err = os.Chmod(path, defaultDirMode); err != nil { + return errors.Wrapf(err, "copier: put: error setting permissions on %q to 0%o", path, defaultDirMode) + } + } else { + if !os.IsExist(err) { + return errors.Wrapf(err, "copier: put: error checking directory %q", path) + } + } + } + return nil + } + createFile := func(path string, tr *tar.Reader) (int64, error) { + f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC|os.O_EXCL, 0600) + if err != nil && os.IsExist(err) { + if req.PutOptions.NoOverwriteDirNonDir { + if st, err2 := os.Lstat(path); err2 == nil && st.IsDir() { + return 0, errors.Wrapf(err, "copier: put: error creating file at %q", path) + } + } + if err = os.RemoveAll(path); err != nil { + return 0, errors.Wrapf(err, "copier: put: error removing item to be overwritten %q", path) + } + f, err = os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC|os.O_EXCL, 0600) + } + if err != nil { + return 0, errors.Wrapf(err, "copier: put: error opening file %q for writing", path) + } + defer f.Close() + n, err := io.Copy(f, tr) + if err != nil { + return n, errors.Wrapf(err, "copier: put: error writing file %q", path) + } + return n, nil + } + targetDirectory, err := resolvePath(req.Root, req.Directory, true, nil) + if err != nil { + return errorResponse("copier: put: error resolving %q: %v", req.Directory, err) + } + info, err := os.Lstat(targetDirectory) + if err == nil { + if !info.IsDir() { + return errorResponse("copier: put: %s (%s): exists but is not a directory", req.Directory, targetDirectory) + } + } else { + if !os.IsNotExist(err) { + return errorResponse("copier: put: %s: %v", req.Directory, err) + } + if err := ensureDirectoryUnderRoot(req.Directory); err != nil { + return errorResponse("copier: put: %v", err) + } + } + cb := func() error { + type directoryAndTimes struct { + directory string + atime, mtime time.Time + } + var directoriesAndTimes []directoryAndTimes + defer func() { + for i := range directoriesAndTimes { + directoryAndTimes := directoriesAndTimes[len(directoriesAndTimes)-i-1] + if err := lutimes(false, directoryAndTimes.directory, directoryAndTimes.atime, directoryAndTimes.mtime); err != nil { + logrus.Debugf("error setting access and modify timestamps on %q to %s and %s: %v", directoryAndTimes.directory, directoryAndTimes.atime, directoryAndTimes.mtime, err) + } + } + }() + ignoredItems := make(map[string]struct{}) + tr := tar.NewReader(bulkReader) + hdr, err := tr.Next() + for err == nil { + nameBeforeRenaming := hdr.Name + if len(hdr.Name) == 0 { + // no name -> ignore the entry + ignoredItems[nameBeforeRenaming] = struct{}{} + hdr, err = tr.Next() + continue + } + if req.PutOptions.Rename != nil { + hdr.Name = handleRename(req.PutOptions.Rename, hdr.Name) + } + // figure out who should own this new item + if idMappings != nil && !idMappings.Empty() { + containerPair := idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid} + hostPair, err := idMappings.ToHost(containerPair) + if err != nil { + return errors.Wrapf(err, "error mapping container filesystem owner 0,0 to host filesystem owners") + } + hdr.Uid, hdr.Gid = hostPair.UID, hostPair.GID + } + if hdr.Typeflag == tar.TypeDir { + if req.PutOptions.ChownDirs != nil { + hdr.Uid, hdr.Gid = dirUID, dirGID + } + } else { + if req.PutOptions.ChownFiles != nil { + hdr.Uid, hdr.Gid = *fileUID, *fileGID + } + } + // make sure the parent directory exists, including for tar.TypeXGlobalHeader entries + // that we otherwise ignore, because that's what docker build does with them + path := filepath.Join(targetDirectory, cleanerReldirectory(filepath.FromSlash(hdr.Name))) + if err := ensureDirectoryUnderRoot(filepath.Dir(path)); err != nil { + return err + } + // figure out what the permissions should be + if hdr.Typeflag == tar.TypeDir { + if req.PutOptions.ChmodDirs != nil { + hdr.Mode = int64(*req.PutOptions.ChmodDirs) + } + } else { + if req.PutOptions.ChmodFiles != nil { + hdr.Mode = int64(*req.PutOptions.ChmodFiles) + } + } + // create the new item + devMajor := uint32(hdr.Devmajor) + devMinor := uint32(hdr.Devminor) + mode := os.FileMode(hdr.Mode) & os.ModePerm + switch hdr.Typeflag { + // no type flag for sockets + default: + return errors.Errorf("unrecognized Typeflag %c", hdr.Typeflag) + case tar.TypeReg, tar.TypeRegA: + var written int64 + written, err = createFile(path, tr) + // only check the length if there wasn't an error, which we'll + // check along with errors for other types of entries + if err == nil && written != hdr.Size { + return errors.Errorf("copier: put: error creating regular file %q: incorrect length (%d != %d)", path, written, hdr.Size) + } + case tar.TypeLink: + var linkTarget string + if _, ignoredTarget := ignoredItems[hdr.Linkname]; ignoredTarget { + // hard link to an ignored item: skip this, too + ignoredItems[nameBeforeRenaming] = struct{}{} + goto nextHeader + } + if req.PutOptions.Rename != nil { + hdr.Linkname = handleRename(req.PutOptions.Rename, hdr.Linkname) + } + if linkTarget, err = resolvePath(targetDirectory, filepath.Join(req.Root, filepath.FromSlash(hdr.Linkname)), true, nil); err != nil { + return errors.Errorf("error resolving hardlink target path %q under root %q", hdr.Linkname, req.Root) + } + if err = os.Link(linkTarget, path); err != nil && os.IsExist(err) { + if req.PutOptions.NoOverwriteDirNonDir { + if st, err := os.Lstat(path); err == nil && st.IsDir() { + break + } + } + if err = os.RemoveAll(path); err == nil { + err = os.Link(linkTarget, path) + } + } + case tar.TypeSymlink: + // if req.PutOptions.Rename != nil { + // todo: the general solution requires resolving to an absolute path, handling + // renaming, and then possibly converting back to a relative symlink + // } + if err = os.Symlink(filepath.FromSlash(hdr.Linkname), filepath.FromSlash(path)); err != nil && os.IsExist(err) { + if req.PutOptions.NoOverwriteDirNonDir { + if st, err := os.Lstat(path); err == nil && st.IsDir() { + break + } + } + if err = os.RemoveAll(path); err == nil { + err = os.Symlink(filepath.FromSlash(hdr.Linkname), filepath.FromSlash(path)) + } + } + case tar.TypeChar: + if req.PutOptions.IgnoreDevices { + ignoredItems[nameBeforeRenaming] = struct{}{} + goto nextHeader + } + if err = mknod(path, chrMode(0600), int(mkdev(devMajor, devMinor))); err != nil && os.IsExist(err) { + if req.PutOptions.NoOverwriteDirNonDir { + if st, err := os.Lstat(path); err == nil && st.IsDir() { + break + } + } + if err = os.RemoveAll(path); err == nil { + err = mknod(path, chrMode(0600), int(mkdev(devMajor, devMinor))) + } + } + case tar.TypeBlock: + if req.PutOptions.IgnoreDevices { + ignoredItems[nameBeforeRenaming] = struct{}{} + goto nextHeader + } + if err = mknod(path, blkMode(0600), int(mkdev(devMajor, devMinor))); err != nil && os.IsExist(err) { + if req.PutOptions.NoOverwriteDirNonDir { + if st, err := os.Lstat(path); err == nil && st.IsDir() { + break + } + } + if err = os.RemoveAll(path); err == nil { + err = mknod(path, blkMode(0600), int(mkdev(devMajor, devMinor))) + } + } + case tar.TypeDir: + if err = os.Mkdir(path, 0700); err != nil && os.IsExist(err) { + var st os.FileInfo + if st, err = os.Lstat(path); err == nil && !st.IsDir() { + // it's not a directory, so remove it and mkdir + if err = os.Remove(path); err == nil { + err = os.Mkdir(path, 0700) + } + } + // either we removed it and retried, or it was a directory, + // in which case we want to just add the new stuff under it + } + // make a note of the directory's times. we + // might create items under it, which will + // cause the mtime to change after we correct + // it, so we'll need to correct it again later + directoriesAndTimes = append(directoriesAndTimes, directoryAndTimes{ + directory: path, + atime: hdr.AccessTime, + mtime: hdr.ModTime, + }) + case tar.TypeFifo: + if err = mkfifo(path, 0600); err != nil && os.IsExist(err) { + if req.PutOptions.NoOverwriteDirNonDir { + if st, err := os.Lstat(path); err == nil && st.IsDir() { + break + } + } + if err = os.RemoveAll(path); err == nil { + err = mkfifo(path, 0600) + } + } + case tar.TypeXGlobalHeader: + // Per archive/tar, PAX uses these to specify key=value information + // applies to all subsequent entries. The one in reported in #2717, + // https://www.openssl.org/source/openssl-1.1.1g.tar.gz, includes a + // comment=(40 byte hex string) at the start, possibly a digest. + // Don't try to create whatever path was used for the header. + goto nextHeader + } + // check for errors + if err != nil { + return errors.Wrapf(err, "copier: put: error creating %q", path) + } + // set ownership + if err = lchown(path, hdr.Uid, hdr.Gid); err != nil { + return errors.Wrapf(err, "copier: put: error setting ownership of %q to %d:%d", path, hdr.Uid, hdr.Gid) + } + // set permissions, except for symlinks, since we don't have lchmod + if hdr.Typeflag != tar.TypeSymlink { + if err = os.Chmod(path, mode); err != nil { + return errors.Wrapf(err, "copier: put: error setting permissions on %q to 0%o", path, mode) + } + } + // set other bits that might have been reset by chown() + if hdr.Typeflag != tar.TypeSymlink { + if hdr.Mode&cISUID == cISUID { + mode |= syscall.S_ISUID + } + if hdr.Mode&cISGID == cISGID { + mode |= syscall.S_ISGID + } + if hdr.Mode&cISVTX == cISVTX { + mode |= syscall.S_ISVTX + } + if err = syscall.Chmod(path, uint32(mode)); err != nil { + return errors.Wrapf(err, "error setting additional permissions on %q to 0%o", path, mode) + } + } + // set xattrs, including some that might have been reset by chown() + if !req.PutOptions.StripXattrs { + if err = Lsetxattrs(path, hdr.Xattrs); err != nil { // nolint:staticcheck + if !req.PutOptions.IgnoreXattrErrors { + return errors.Wrapf(err, "copier: put: error setting extended attributes on %q", path) + } + } + } + // set time + if hdr.AccessTime.IsZero() || hdr.AccessTime.Before(hdr.ModTime) { + hdr.AccessTime = hdr.ModTime + } + if err = lutimes(hdr.Typeflag == tar.TypeSymlink, path, hdr.AccessTime, hdr.ModTime); err != nil { + return errors.Wrapf(err, "error setting access and modify timestamps on %q to %s and %s", path, hdr.AccessTime, hdr.ModTime) + } + nextHeader: + hdr, err = tr.Next() + } + if err != io.EOF { + return errors.Wrapf(err, "error reading tar stream: expected EOF") + } + return nil + } + return &response{Error: "", Put: putResponse{}}, cb, nil +} + +func copierHandlerMkdir(req request, idMappings *idtools.IDMappings) (*response, func() error, error) { + errorResponse := func(fmtspec string, args ...interface{}) (*response, func() error, error) { + return &response{Error: fmt.Sprintf(fmtspec, args...), Mkdir: mkdirResponse{}}, nil, nil + } + dirUID, dirGID := 0, 0 + if req.MkdirOptions.ChownNew != nil { + dirUID, dirGID = req.MkdirOptions.ChownNew.UID, req.MkdirOptions.ChownNew.GID + } + dirMode := os.FileMode(0755) + if req.MkdirOptions.ChmodNew != nil { + dirMode = *req.MkdirOptions.ChmodNew + } + if idMappings != nil && !idMappings.Empty() { + containerDirPair := idtools.IDPair{UID: dirUID, GID: dirGID} + hostDirPair, err := idMappings.ToHost(containerDirPair) + if err != nil { + return errorResponse("copier: mkdir: error mapping container filesystem owner %d:%d to host filesystem owners: %v", dirUID, dirGID, err) + } + dirUID, dirGID = hostDirPair.UID, hostDirPair.GID + } + + directory, err := resolvePath(req.Root, req.Directory, true, nil) + if err != nil { + return errorResponse("copier: mkdir: error resolving %q: %v", req.Directory, err) + } + + rel, err := convertToRelSubdirectory(req.Root, directory) + if err != nil { + return errorResponse("copier: mkdir: error computing path of %q relative to %q: %v", directory, req.Root, err) + } + + subdir := "" + for _, component := range strings.Split(rel, string(os.PathSeparator)) { + subdir = filepath.Join(subdir, component) + path := filepath.Join(req.Root, subdir) + if err := os.Mkdir(path, 0700); err == nil { + if err = chown(path, dirUID, dirGID); err != nil { + return errorResponse("copier: mkdir: error setting owner of %q to %d:%d: %v", path, dirUID, dirGID, err) + } + if err = chmod(path, dirMode); err != nil { + return errorResponse("copier: mkdir: error setting permissions on %q to 0%o: %v", path, dirMode) + } + } else { + if !os.IsExist(err) { + return errorResponse("copier: mkdir: error checking directory %q: %v", path, err) + } + } + } + + return &response{Error: "", Mkdir: mkdirResponse{}}, nil, nil +} + +func copierHandlerRemove(req request) *response { + errorResponse := func(fmtspec string, args ...interface{}) *response { + return &response{Error: fmt.Sprintf(fmtspec, args...), Remove: removeResponse{}} + } + resolvedTarget, err := resolvePath(req.Root, req.Directory, false, nil) + if err != nil { + return errorResponse("copier: remove: %v", err) + } + if req.RemoveOptions.All { + err = os.RemoveAll(resolvedTarget) + } else { + err = os.Remove(resolvedTarget) + } + if err != nil { + return errorResponse("copier: remove %q: %v", req.Directory, err) + } + return &response{Error: "", Remove: removeResponse{}} +} diff --git a/vendor/github.com/containers/buildah/copier/syscall_unix.go b/vendor/github.com/containers/buildah/copier/syscall_unix.go new file mode 100644 index 00000000000..9fc8fece383 --- /dev/null +++ b/vendor/github.com/containers/buildah/copier/syscall_unix.go @@ -0,0 +1,95 @@ +// +build !windows + +package copier + +import ( + "os" + "syscall" + "time" + + "github.com/pkg/errors" + "golang.org/x/sys/unix" +) + +var canChroot = os.Getuid() == 0 + +func chroot(root string) (bool, error) { + if canChroot { + if err := os.Chdir(root); err != nil { + return false, errors.Wrapf(err, "error changing to intended-new-root directory %q", root) + } + if err := unix.Chroot(root); err != nil { + return false, errors.Wrapf(err, "error chrooting to directory %q", root) + } + if err := os.Chdir(string(os.PathSeparator)); err != nil { + return false, errors.Wrapf(err, "error changing to just-became-root directory %q", root) + } + return true, nil + } + return false, nil +} + +func chrMode(mode os.FileMode) uint32 { + return uint32(unix.S_IFCHR | mode) +} + +func blkMode(mode os.FileMode) uint32 { + return uint32(unix.S_IFBLK | mode) +} + +func mkdev(major, minor uint32) uint64 { + return unix.Mkdev(major, minor) +} + +func mkfifo(path string, mode uint32) error { + return unix.Mkfifo(path, mode) +} + +func mknod(path string, mode uint32, dev int) error { + return unix.Mknod(path, mode, dev) +} + +func chmod(path string, mode os.FileMode) error { + return os.Chmod(path, mode) +} + +func chown(path string, uid, gid int) error { + return os.Chown(path, uid, gid) +} + +func lchown(path string, uid, gid int) error { + return os.Lchown(path, uid, gid) +} + +func lutimes(isSymlink bool, path string, atime, mtime time.Time) error { + if atime.IsZero() || mtime.IsZero() { + now := time.Now() + if atime.IsZero() { + atime = now + } + if mtime.IsZero() { + mtime = now + } + } + return unix.Lutimes(path, []unix.Timeval{unix.NsecToTimeval(atime.UnixNano()), unix.NsecToTimeval(mtime.UnixNano())}) +} + +// sameDevice returns true unless we're sure that they're not on the same device +func sameDevice(a, b os.FileInfo) bool { + aSys := a.Sys() + bSys := b.Sys() + if aSys == nil || bSys == nil { + return true + } + au, aok := aSys.(*syscall.Stat_t) + bu, bok := bSys.(*syscall.Stat_t) + if !aok || !bok { + return true + } + return au.Dev == bu.Dev +} + +const ( + testModeMask = int64(os.ModePerm) + testIgnoreSymlinkDates = false +) diff --git a/vendor/github.com/containers/buildah/copier/syscall_windows.go b/vendor/github.com/containers/buildah/copier/syscall_windows.go new file mode 100644 index 00000000000..3a88d2d3eb1 --- /dev/null +++ b/vendor/github.com/containers/buildah/copier/syscall_windows.go @@ -0,0 +1,88 @@ +// +build windows + +package copier + +import ( + "errors" + "os" + "syscall" + "time" + + "golang.org/x/sys/windows" +) + +var canChroot = false + +func chroot(path string) (bool, error) { + return false, nil +} + +func chrMode(mode os.FileMode) uint32 { + return windows.S_IFCHR | uint32(mode) +} + +func blkMode(mode os.FileMode) uint32 { + return windows.S_IFBLK | uint32(mode) +} + +func mkdev(major, minor uint32) uint64 { + return 0 +} + +func mkfifo(path string, mode uint32) error { + return syscall.ENOSYS +} + +func mknod(path string, mode uint32, dev int) error { + return syscall.ENOSYS +} + +func chmod(path string, mode os.FileMode) error { + err := os.Chmod(path, mode) + if err != nil && errors.Is(err, syscall.EWINDOWS) { + return nil + } + return err +} + +func chown(path string, uid, gid int) error { + err := os.Chown(path, uid, gid) + if err != nil && errors.Is(err, syscall.EWINDOWS) { + return nil + } + return err +} + +func lchown(path string, uid, gid int) error { + err := os.Lchown(path, uid, gid) + if err != nil && errors.Is(err, syscall.EWINDOWS) { + return nil + } + return err +} + +func lutimes(isSymlink bool, path string, atime, mtime time.Time) error { + if isSymlink { + return nil + } + if atime.IsZero() || mtime.IsZero() { + now := time.Now() + if atime.IsZero() { + atime = now + } + if mtime.IsZero() { + mtime = now + } + } + return windows.UtimesNano(path, []windows.Timespec{windows.NsecToTimespec(atime.UnixNano()), windows.NsecToTimespec(mtime.UnixNano())}) +} + +// sameDevice returns true since we can't be sure that they're not on the same device +func sameDevice(a, b os.FileInfo) bool { + return true +} + +const ( + testModeMask = int64(0600) + testIgnoreSymlinkDates = true +) diff --git a/vendor/github.com/containers/buildah/copier/unwrap_112.go b/vendor/github.com/containers/buildah/copier/unwrap_112.go new file mode 100644 index 00000000000..ebbad08b991 --- /dev/null +++ b/vendor/github.com/containers/buildah/copier/unwrap_112.go @@ -0,0 +1,11 @@ +// +build !go113 + +package copier + +import ( + "github.com/pkg/errors" +) + +func unwrapError(err error) error { + return errors.Cause(err) +} diff --git a/vendor/github.com/containers/buildah/copier/unwrap_113.go b/vendor/github.com/containers/buildah/copier/unwrap_113.go new file mode 100644 index 00000000000..cd0d0fb6871 --- /dev/null +++ b/vendor/github.com/containers/buildah/copier/unwrap_113.go @@ -0,0 +1,18 @@ +// +build go113 + +package copier + +import ( + stderror "errors" + + "github.com/pkg/errors" +) + +func unwrapError(err error) error { + e := errors.Cause(err) + for e != nil { + err = e + e = errors.Unwrap(err) + } + return err +} diff --git a/vendor/github.com/containers/buildah/copier/xattrs.go b/vendor/github.com/containers/buildah/copier/xattrs.go new file mode 100644 index 00000000000..c757adcc887 --- /dev/null +++ b/vendor/github.com/containers/buildah/copier/xattrs.go @@ -0,0 +1,97 @@ +// +build linux netbsd freebsd darwin + +package copier + +import ( + "path/filepath" + "strings" + "syscall" + + "github.com/pkg/errors" + "golang.org/x/sys/unix" +) + +const ( + xattrsSupported = true +) + +var ( + relevantAttributes = []string{"security.capability", "security.ima", "user.*"} // the attributes that we preserve - we discard others +) + +// isRelevantXattr checks if "attribute" matches one of the attribute patterns +// listed in the "relevantAttributes" list. +func isRelevantXattr(attribute string) bool { + for _, relevant := range relevantAttributes { + matched, err := filepath.Match(relevant, attribute) + if err != nil || !matched { + continue + } + return true + } + return false +} + +// Lgetxattrs returns a map of the relevant extended attributes set on the given file. +func Lgetxattrs(path string) (map[string]string, error) { + maxSize := 64 * 1024 * 1024 + listSize := 64 * 1024 + var list []byte + for listSize < maxSize { + list = make([]byte, listSize) + size, err := unix.Llistxattr(path, list) + if err != nil { + if unwrapError(err) == syscall.ERANGE { + listSize *= 2 + continue + } + if (unwrapError(err) == syscall.ENOTSUP) || (unwrapError(err) == syscall.ENOSYS) { + // treat these errors listing xattrs as equivalent to "no xattrs" + list = list[:0] + break + } + return nil, errors.Wrapf(err, "error listing extended attributes of %q", path) + } + list = list[:size] + break + } + if listSize >= maxSize { + return nil, errors.Errorf("unable to read list of attributes for %q: size would have been too big", path) + } + m := make(map[string]string) + for _, attribute := range strings.Split(string(list), string('\000')) { + if isRelevantXattr(attribute) { + attributeSize := 64 * 1024 + var attributeValue []byte + for attributeSize < maxSize { + attributeValue = make([]byte, attributeSize) + size, err := unix.Lgetxattr(path, attribute, attributeValue) + if err != nil { + if unwrapError(err) == syscall.ERANGE { + attributeSize *= 2 + continue + } + return nil, errors.Wrapf(err, "error getting value of extended attribute %q on %q", attribute, path) + } + m[attribute] = string(attributeValue[:size]) + break + } + if attributeSize >= maxSize { + return nil, errors.Errorf("unable to read attribute %q of %q: size would have been too big", attribute, path) + } + } + } + return m, nil +} + +// Lsetxattrs sets the relevant members of the specified extended attributes on the given file. +func Lsetxattrs(path string, xattrs map[string]string) error { + for attribute, value := range xattrs { + if isRelevantXattr(attribute) { + if err := unix.Lsetxattr(path, attribute, []byte(value), 0); err != nil { + return errors.Wrapf(err, "error setting value of extended attribute %q on %q", attribute, path) + } + } + } + return nil +} diff --git a/vendor/github.com/containers/buildah/copier/xattrs_unsupported.go b/vendor/github.com/containers/buildah/copier/xattrs_unsupported.go new file mode 100644 index 00000000000..750d842f8f3 --- /dev/null +++ b/vendor/github.com/containers/buildah/copier/xattrs_unsupported.go @@ -0,0 +1,15 @@ +// +build !linux,!netbsd,!freebsd,!darwin + +package copier + +const ( + xattrsSupported = false +) + +func Lgetxattrs(path string) (map[string]string, error) { + return nil, nil +} + +func Lsetxattrs(path string, xattrs map[string]string) error { + return nil +} diff --git a/vendor/github.com/containers/buildah/define/build.go b/vendor/github.com/containers/buildah/define/build.go new file mode 100644 index 00000000000..23c0ba0a2e6 --- /dev/null +++ b/vendor/github.com/containers/buildah/define/build.go @@ -0,0 +1,254 @@ +package define + +import ( + "io" + "time" + + nettypes "github.com/containers/common/libnetwork/types" + "github.com/containers/image/v5/types" + encconfig "github.com/containers/ocicrypt/config" + "github.com/containers/storage/pkg/archive" + "golang.org/x/sync/semaphore" +) + +// CommonBuildOptions are resources that can be defined by flags for both buildah from and build +type CommonBuildOptions struct { + // AddHost is the list of hostnames to add to the build container's /etc/hosts. + AddHost []string + // CgroupParent is the path to cgroups under which the cgroup for the container will be created. + CgroupParent string + // CPUPeriod limits the CPU CFS (Completely Fair Scheduler) period + CPUPeriod uint64 + // CPUQuota limits the CPU CFS (Completely Fair Scheduler) quota + CPUQuota int64 + // CPUShares (relative weight + CPUShares uint64 + // CPUSetCPUs in which to allow execution (0-3, 0,1) + CPUSetCPUs string + // CPUSetMems memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. + CPUSetMems string + // HTTPProxy determines whether *_proxy env vars from the build host are passed into the container. + HTTPProxy bool + // Memory is the upper limit (in bytes) on how much memory running containers can use. + Memory int64 + // DNSSearch is the list of DNS search domains to add to the build container's /etc/resolv.conf + DNSSearch []string + // DNSServers is the list of DNS servers to add to the build container's /etc/resolv.conf + DNSServers []string + // DNSOptions is the list of DNS + DNSOptions []string + // MemorySwap limits the amount of memory and swap together. + MemorySwap int64 + // LabelOpts is the a slice of fields of an SELinux context, given in "field:pair" format, or "disable". + // Recognized field names are "role", "type", and "level". + LabelOpts []string + // OmitTimestamp forces epoch 0 as created timestamp to allow for + // deterministic, content-addressable builds. + OmitTimestamp bool + // SeccompProfilePath is the pathname of a seccomp profile. + SeccompProfilePath string + // ApparmorProfile is the name of an apparmor profile. + ApparmorProfile string + // ShmSize is the "size" value to use when mounting an shmfs on the container's /dev/shm directory. + ShmSize string + // Ulimit specifies resource limit options, in the form type:softlimit[:hardlimit]. + // These types are recognized: + // "core": maximum core dump size (ulimit -c) + // "cpu": maximum CPU time (ulimit -t) + // "data": maximum size of a process's data segment (ulimit -d) + // "fsize": maximum size of new files (ulimit -f) + // "locks": maximum number of file locks (ulimit -x) + // "memlock": maximum amount of locked memory (ulimit -l) + // "msgqueue": maximum amount of data in message queues (ulimit -q) + // "nice": niceness adjustment (nice -n, ulimit -e) + // "nofile": maximum number of open files (ulimit -n) + // "nproc": maximum number of processes (ulimit -u) + // "rss": maximum size of a process's (ulimit -m) + // "rtprio": maximum real-time scheduling priority (ulimit -r) + // "rttime": maximum amount of real-time execution between blocking syscalls + // "sigpending": maximum number of pending signals (ulimit -i) + // "stack": maximum stack size (ulimit -s) + Ulimit []string + // Volumes to bind mount into the container + Volumes []string + // Secrets are the available secrets to use in a build. Each item in the + // slice takes the form "id=foo,src=bar", where both "id" and "src" are + // required, in that order, and "bar" is the name of a file. + Secrets []string + // SSHSources is the available ssh agent connections to forward in the build + SSHSources []string +} + +// BuildOptions can be used to alter how an image is built. +type BuildOptions struct { + // ContainerSuffix it the name to suffix containers with + ContainerSuffix string + // ContextDirectory is the default source location for COPY and ADD + // commands. + ContextDirectory string + // PullPolicy controls whether or not we pull images. It should be one + // of PullIfMissing, PullAlways, PullIfNewer, or PullNever. + PullPolicy PullPolicy + // Registry is a value which is prepended to the image's name, if it + // needs to be pulled and the image name alone can not be resolved to a + // reference to a source image. No separator is implicitly added. + Registry string + // IgnoreUnrecognizedInstructions tells us to just log instructions we + // don't recognize, and try to keep going. + IgnoreUnrecognizedInstructions bool + // Manifest Name to which the image will be added. + Manifest string + // Quiet tells us whether or not to announce steps as we go through them. + Quiet bool + // Isolation controls how Run() runs things. + Isolation Isolation + // Runtime is the name of the command to run for RUN instructions when + // Isolation is either IsolationDefault or IsolationOCI. It should + // accept the same arguments and flags that runc does. + Runtime string + // RuntimeArgs adds global arguments for the runtime. + RuntimeArgs []string + // TransientMounts is a list of mounts that won't be kept in the image. + TransientMounts []string + // Compression specifies the type of compression which is applied to + // layer blobs. The default is to not use compression, but + // archive.Gzip is recommended. + Compression archive.Compression + // Arguments which can be interpolated into Dockerfiles + Args map[string]string + // Name of the image to write to. + Output string + // Additional tags to add to the image that we write, if we know of a + // way to add them. + AdditionalTags []string + // Log is a callback that will print a progress message. If no value + // is supplied, the message will be sent to Err (or os.Stderr, if Err + // is nil) by default. + Log func(format string, args ...interface{}) + // In is connected to stdin for RUN instructions. + In io.Reader + // Out is a place where non-error log messages are sent. + Out io.Writer + // Err is a place where error log messages should be sent. + Err io.Writer + // SignaturePolicyPath specifies an override location for the signature + // policy which should be used for verifying the new image as it is + // being written. Except in specific circumstances, no value should be + // specified, indicating that the shared, system-wide default policy + // should be used. + SignaturePolicyPath string + // ReportWriter is an io.Writer which will be used to report the + // progress of the (possible) pulling of the source image and the + // writing of the new image. + ReportWriter io.Writer + // OutputFormat is the format of the output image's manifest and + // configuration data. + // Accepted values are buildah.OCIv1ImageManifest and buildah.Dockerv2ImageManifest. + OutputFormat string + // SystemContext holds parameters used for authentication. + SystemContext *types.SystemContext + // NamespaceOptions controls how we set up namespaces processes that we + // might need when handling RUN instructions. + NamespaceOptions []NamespaceOption + // ConfigureNetwork controls whether or not network interfaces and + // routing are configured for a new network namespace (i.e., when not + // joining another's namespace and not just using the host's + // namespace), effectively deciding whether or not the process has a + // usable network. + ConfigureNetwork NetworkConfigurationPolicy + // CNIPluginPath is the location of CNI plugin helpers, if they should be + // run from a location other than the default location. + CNIPluginPath string + // CNIConfigDir is the location of CNI configuration files, if the files in + // the default configuration directory shouldn't be used. + CNIConfigDir string + + // NetworkInterface is the libnetwork network interface used to setup CNI or netavark networks. + NetworkInterface nettypes.ContainerNetwork `json:"-"` + + // ID mapping options to use if we're setting up our own user namespace + // when handling RUN instructions. + IDMappingOptions *IDMappingOptions + // AddCapabilities is a list of capabilities to add to the default set when + // handling RUN instructions. + AddCapabilities []string + // DropCapabilities is a list of capabilities to remove from the default set + // when handling RUN instructions. If a capability appears in both lists, it + // will be dropped. + DropCapabilities []string + // CommonBuildOpts is *required*. + CommonBuildOpts *CommonBuildOptions + // DefaultMountsFilePath is the file path holding the mounts to be mounted in "host-path:container-path" format + DefaultMountsFilePath string + // IIDFile tells the builder to write the image ID to the specified file + IIDFile string + // Squash tells the builder to produce an image with a single layer + // instead of with possibly more than one layer. + Squash bool + // Labels metadata for an image + Labels []string + // Annotation metadata for an image + Annotations []string + // OnBuild commands to be run by images based on this image + OnBuild []string + // Layers tells the builder to create a cache of images for each step in the Dockerfile + Layers bool + // NoCache tells the builder to build the image from scratch without checking for a cache. + // It creates a new set of cached images for the build. + NoCache bool + // RemoveIntermediateCtrs tells the builder whether to remove intermediate containers used + // during the build process. Default is true. + RemoveIntermediateCtrs bool + // ForceRmIntermediateCtrs tells the builder to remove all intermediate containers even if + // the build was unsuccessful. + ForceRmIntermediateCtrs bool + // BlobDirectory is a directory which we'll use for caching layer blobs. + BlobDirectory string + // Target the targeted FROM in the Dockerfile to build. + Target string + // Devices are the additional devices to add to the containers. + Devices []string + // SignBy is the fingerprint of a GPG key to use for signing images. + SignBy string + // Architecture specifies the target architecture of the image to be built. + Architecture string + // Timestamp sets the created timestamp to the specified time, allowing + // for deterministic, content-addressable builds. + Timestamp *time.Time + // OS is the specifies the operating system of the image to be built. + OS string + // MaxPullPushRetries is the maximum number of attempts we'll make to pull or push any one + // image from or to an external registry if the first attempt fails. + MaxPullPushRetries int + // PullPushRetryDelay is how long to wait before retrying a pull or push attempt. + PullPushRetryDelay time.Duration + // OciDecryptConfig contains the config that can be used to decrypt an image if it is + // encrypted if non-nil. If nil, it does not attempt to decrypt an image. + OciDecryptConfig *encconfig.DecryptConfig + // Jobs is the number of stages to run in parallel. If not specified it defaults to 1. + // Ignored if a JobSemaphore is provided. + Jobs *int + // JobSemaphore, for when you want Jobs to be shared with more than just this build. + JobSemaphore *semaphore.Weighted + // LogRusage logs resource usage for each step. + LogRusage bool + // File to which the Rusage logs will be saved to instead of stdout + RusageLogFile string + // Excludes is a list of excludes to be used instead of the .dockerignore file. + Excludes []string + // IgnoreFile is a name of the .containerignore file + IgnoreFile string + // From is the image name to use to replace the value specified in the first + // FROM instruction in the Containerfile + From string + // Platforms is the list of parsed OS/Arch/Variant triples that we want + // to build the image for. If this slice has items in it, the OS and + // Architecture fields above are ignored. + Platforms []struct{ OS, Arch, Variant string } + // AllPlatforms tells the builder to set the list of target platforms + // to match the set of platforms for which all of the build's base + // images are available. If this field is set, Platforms is ignored. + AllPlatforms bool + // UnsetEnvs is a list of environments to not add to final image. + UnsetEnvs []string +} diff --git a/vendor/github.com/containers/buildah/define/isolation.go b/vendor/github.com/containers/buildah/define/isolation.go new file mode 100644 index 00000000000..53bea85fdb0 --- /dev/null +++ b/vendor/github.com/containers/buildah/define/isolation.go @@ -0,0 +1,32 @@ +package define + +import ( + "fmt" +) + +type Isolation int + +const ( + // IsolationDefault is whatever we think will work best. + IsolationDefault Isolation = iota + // IsolationOCI is a proper OCI runtime. + IsolationOCI + // IsolationChroot is a more chroot-like environment: less isolation, + // but with fewer requirements. + IsolationChroot + // IsolationOCIRootless is a proper OCI runtime in rootless mode. + IsolationOCIRootless +) + +// String converts a Isolation into a string. +func (i Isolation) String() string { + switch i { + case IsolationDefault, IsolationOCI: + return "oci" + case IsolationChroot: + return "chroot" + case IsolationOCIRootless: + return "rootless" + } + return fmt.Sprintf("unrecognized isolation type %d", i) +} diff --git a/vendor/github.com/containers/buildah/define/namespace.go b/vendor/github.com/containers/buildah/define/namespace.go new file mode 100644 index 00000000000..d0247fe9165 --- /dev/null +++ b/vendor/github.com/containers/buildah/define/namespace.go @@ -0,0 +1,87 @@ +package define + +import ( + "fmt" +) + +// NamespaceOption controls how we set up a namespace when launching processes. +type NamespaceOption struct { + // Name specifies the type of namespace, typically matching one of the + // ...Namespace constants defined in + // github.com/opencontainers/runtime-spec/specs-go. + Name string + // Host is used to force our processes to use the host's namespace of + // this type. + Host bool + // Path is the path of the namespace to attach our process to, if Host + // is not set. If Host is not set and Path is also empty, a new + // namespace will be created for the process that we're starting. + // If Name is specs.NetworkNamespace, if Path doesn't look like an + // absolute path, it is treated as a comma-separated list of CNI + // configuration names which will be selected from among all of the CNI + // network configurations which we find. + Path string +} + +// NamespaceOptions provides some helper methods for a slice of NamespaceOption +// structs. +type NamespaceOptions []NamespaceOption + +// Find the configuration for the namespace of the given type. If there are +// duplicates, find the _last_ one of the type, since we assume it was appended +// more recently. +func (n *NamespaceOptions) Find(namespace string) *NamespaceOption { + for i := range *n { + j := len(*n) - 1 - i + if (*n)[j].Name == namespace { + return &((*n)[j]) + } + } + return nil +} + +// AddOrReplace either adds or replaces the configuration for a given namespace. +func (n *NamespaceOptions) AddOrReplace(options ...NamespaceOption) { +nextOption: + for _, option := range options { + for i := range *n { + j := len(*n) - 1 - i + if (*n)[j].Name == option.Name { + (*n)[j] = option + continue nextOption + } + } + *n = append(*n, option) + } +} + +// NetworkConfigurationPolicy takes the value NetworkDefault, NetworkDisabled, +// or NetworkEnabled. +type NetworkConfigurationPolicy int + +const ( + // NetworkDefault is one of the values that BuilderOptions.ConfigureNetwork + // can take, signalling that the default behavior should be used. + NetworkDefault NetworkConfigurationPolicy = iota + // NetworkDisabled is one of the values that BuilderOptions.ConfigureNetwork + // can take, signalling that network interfaces should NOT be configured for + // newly-created network namespaces. + NetworkDisabled + // NetworkEnabled is one of the values that BuilderOptions.ConfigureNetwork + // can take, signalling that network interfaces should be configured for + // newly-created network namespaces. + NetworkEnabled +) + +// String formats a NetworkConfigurationPolicy as a string. +func (p NetworkConfigurationPolicy) String() string { + switch p { + case NetworkDefault: + return "NetworkDefault" + case NetworkDisabled: + return "NetworkDisabled" + case NetworkEnabled: + return "NetworkEnabled" + } + return fmt.Sprintf("unknown NetworkConfigurationPolicy %d", p) +} diff --git a/vendor/github.com/containers/buildah/define/pull.go b/vendor/github.com/containers/buildah/define/pull.go new file mode 100644 index 00000000000..00787bd9b64 --- /dev/null +++ b/vendor/github.com/containers/buildah/define/pull.go @@ -0,0 +1,50 @@ +package define + +import ( + "fmt" +) + +// PullPolicy takes the value PullIfMissing, PullAlways, PullIfNewer, or PullNever. +type PullPolicy int + +const ( + // PullIfMissing is one of the values that BuilderOptions.PullPolicy + // can take, signalling that the source image should be pulled from a + // registry if a local copy of it is not already present. + PullIfMissing PullPolicy = iota + // PullAlways is one of the values that BuilderOptions.PullPolicy can + // take, signalling that a fresh, possibly updated, copy of the image + // should be pulled from a registry before the build proceeds. + PullAlways + // PullIfNewer is one of the values that BuilderOptions.PullPolicy + // can take, signalling that the source image should only be pulled + // from a registry if a local copy is not already present or if a + // newer version the image is present on the repository. + PullIfNewer + // PullNever is one of the values that BuilderOptions.PullPolicy can + // take, signalling that the source image should not be pulled from a + // registry. + PullNever +) + +// String converts a PullPolicy into a string. +func (p PullPolicy) String() string { + switch p { + case PullIfMissing: + return "missing" + case PullAlways: + return "always" + case PullIfNewer: + return "ifnewer" + case PullNever: + return "never" + } + return fmt.Sprintf("unrecognized policy %d", p) +} + +var PolicyMap = map[string]PullPolicy{ + "missing": PullIfMissing, + "always": PullAlways, + "never": PullNever, + "ifnewer": PullIfNewer, +} diff --git a/vendor/github.com/containers/buildah/define/types.go b/vendor/github.com/containers/buildah/define/types.go new file mode 100644 index 00000000000..88cf7e04d09 --- /dev/null +++ b/vendor/github.com/containers/buildah/define/types.go @@ -0,0 +1,224 @@ +package define + +import ( + "bufio" + "bytes" + "fmt" + "io/ioutil" + "net/http" + urlpkg "net/url" + "os" + "os/exec" + "path" + "path/filepath" + "strings" + + "github.com/containers/image/v5/manifest" + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/chrootarchive" + "github.com/containers/storage/pkg/ioutils" + v1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const ( + // Package is the name of this package, used in help output and to + // identify working containers. + Package = "buildah" + // Version for the Package. Bump version in contrib/rpm/buildah.spec + // too. + Version = "1.24.5" + + // DefaultRuntime if containers.conf fails. + DefaultRuntime = "runc" + + // OCIv1ImageManifest is the MIME type of an OCIv1 image manifest, + // suitable for specifying as a value of the PreferredManifestType + // member of a CommitOptions structure. It is also the default. + OCIv1ImageManifest = v1.MediaTypeImageManifest + // Dockerv2ImageManifest is the MIME type of a Docker v2s2 image + // manifest, suitable for specifying as a value of the + // PreferredManifestType member of a CommitOptions structure. + Dockerv2ImageManifest = manifest.DockerV2Schema2MediaType + + // OCI used to define the "oci" image format + OCI = "oci" + // DOCKER used to define the "docker" image format + DOCKER = "docker" +) + +var ( + // DefaultCapabilities is the list of capabilities which we grant by + // default to containers which are running under UID 0. + DefaultCapabilities = []string{ + "CAP_AUDIT_WRITE", + "CAP_CHOWN", + "CAP_DAC_OVERRIDE", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_MKNOD", + "CAP_NET_BIND_SERVICE", + "CAP_SETFCAP", + "CAP_SETGID", + "CAP_SETPCAP", + "CAP_SETUID", + "CAP_SYS_CHROOT", + } + // DefaultNetworkSysctl is the list of Kernel parameters which we + // grant by default to containers which are running under UID 0. + DefaultNetworkSysctl = map[string]string{ + "net.ipv4.ping_group_range": "0 0", + } + + Gzip = archive.Gzip + Bzip2 = archive.Bzip2 + Xz = archive.Xz + Zstd = archive.Zstd + Uncompressed = archive.Uncompressed +) + +// IDMappingOptions controls how we set up UID/GID mapping when we set up a +// user namespace. +type IDMappingOptions struct { + HostUIDMapping bool + HostGIDMapping bool + UIDMap []specs.LinuxIDMapping + GIDMap []specs.LinuxIDMapping +} + +// Secret is a secret source that can be used in a RUN +type Secret struct { + ID string + Source string + SourceType string +} + +// TempDirForURL checks if the passed-in string looks like a URL or -. If it is, +// TempDirForURL creates a temporary directory, arranges for its contents to be +// the contents of that URL, and returns the temporary directory's path, along +// with the name of a subdirectory which should be used as the build context +// (which may be empty or "."). Removal of the temporary directory is the +// responsibility of the caller. If the string doesn't look like a URL, +// TempDirForURL returns empty strings and a nil error code. +func TempDirForURL(dir, prefix, url string) (name string, subdir string, err error) { + if !strings.HasPrefix(url, "http://") && + !strings.HasPrefix(url, "https://") && + !strings.HasPrefix(url, "git://") && + !strings.HasPrefix(url, "github.com/") && + url != "-" { + return "", "", nil + } + name, err = ioutil.TempDir(dir, prefix) + if err != nil { + return "", "", errors.Wrapf(err, "error creating temporary directory for %q", url) + } + urlParsed, err := urlpkg.Parse(url) + if err != nil { + return "", "", errors.Wrapf(err, "error parsing url %q", url) + } + if strings.HasPrefix(url, "git://") || strings.HasSuffix(urlParsed.Path, ".git") { + combinedOutput, err := cloneToDirectory(url, name) + if err != nil { + if err2 := os.RemoveAll(name); err2 != nil { + logrus.Debugf("error removing temporary directory %q: %v", name, err2) + } + return "", "", errors.Wrapf(err, "cloning %q to %q:\n%s", url, name, string(combinedOutput)) + } + return name, "", nil + } + if strings.HasPrefix(url, "github.com/") { + ghurl := url + url = fmt.Sprintf("https://%s/archive/master.tar.gz", ghurl) + logrus.Debugf("resolving url %q to %q", ghurl, url) + subdir = path.Base(ghurl) + "-master" + } + if strings.HasPrefix(url, "http://") || strings.HasPrefix(url, "https://") { + err = downloadToDirectory(url, name) + if err != nil { + if err2 := os.RemoveAll(name); err2 != nil { + logrus.Debugf("error removing temporary directory %q: %v", name, err2) + } + return "", subdir, err + } + return name, subdir, nil + } + if url == "-" { + err = stdinToDirectory(name) + if err != nil { + if err2 := os.RemoveAll(name); err2 != nil { + logrus.Debugf("error removing temporary directory %q: %v", name, err2) + } + return "", subdir, err + } + logrus.Debugf("Build context is at %q", name) + return name, subdir, nil + } + logrus.Debugf("don't know how to retrieve %q", url) + if err2 := os.Remove(name); err2 != nil { + logrus.Debugf("error removing temporary directory %q: %v", name, err2) + } + return "", "", errors.Errorf("unreachable code reached") +} + +func cloneToDirectory(url, dir string) ([]byte, error) { + gitBranch := strings.Split(url, "#") + var cmd *exec.Cmd + if len(gitBranch) < 2 { + logrus.Debugf("cloning %q to %q", url, dir) + cmd = exec.Command("git", "clone", url, dir) + } else { + logrus.Debugf("cloning repo %q and branch %q to %q", gitBranch[0], gitBranch[1], dir) + cmd = exec.Command("git", "clone", "--recurse-submodules", "-b", gitBranch[1], gitBranch[0], dir) + } + return cmd.CombinedOutput() +} + +func downloadToDirectory(url, dir string) error { + logrus.Debugf("extracting %q to %q", url, dir) + resp, err := http.Get(url) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.ContentLength == 0 { + return errors.Errorf("no contents in %q", url) + } + if err := chrootarchive.Untar(resp.Body, dir, nil); err != nil { + resp1, err := http.Get(url) + if err != nil { + return err + } + defer resp1.Body.Close() + body, err := ioutil.ReadAll(resp1.Body) + if err != nil { + return err + } + dockerfile := filepath.Join(dir, "Dockerfile") + // Assume this is a Dockerfile + if err := ioutils.AtomicWriteFile(dockerfile, body, 0600); err != nil { + return errors.Wrapf(err, "Failed to write %q to %q", url, dockerfile) + } + } + return nil +} + +func stdinToDirectory(dir string) error { + logrus.Debugf("extracting stdin to %q", dir) + r := bufio.NewReader(os.Stdin) + b, err := ioutil.ReadAll(r) + if err != nil { + return errors.Wrapf(err, "Failed to read from stdin") + } + reader := bytes.NewReader(b) + if err := chrootarchive.Untar(reader, dir, nil); err != nil { + dockerfile := filepath.Join(dir, "Dockerfile") + // Assume this is a Dockerfile + if err := ioutils.AtomicWriteFile(dockerfile, b, 0600); err != nil { + return errors.Wrapf(err, "Failed to write bytes to %q", dockerfile) + } + } + return nil +} diff --git a/vendor/github.com/containers/buildah/define/types_unix.go b/vendor/github.com/containers/buildah/define/types_unix.go new file mode 100644 index 00000000000..aedadad368f --- /dev/null +++ b/vendor/github.com/containers/buildah/define/types_unix.go @@ -0,0 +1,9 @@ +// +build darwin linux + +package define + +import ( + "github.com/opencontainers/runc/libcontainer/devices" +) + +type ContainerDevices = []devices.Device diff --git a/vendor/github.com/containers/buildah/define/types_unsupported.go b/vendor/github.com/containers/buildah/define/types_unsupported.go new file mode 100644 index 00000000000..64e26d377e8 --- /dev/null +++ b/vendor/github.com/containers/buildah/define/types_unsupported.go @@ -0,0 +1,6 @@ +// +build !linux,!darwin + +package define + +// ContainerDevices is currently not implemented. +type ContainerDevices = []struct{} diff --git a/vendor/github.com/containers/buildah/delete.go b/vendor/github.com/containers/buildah/delete.go new file mode 100644 index 00000000000..e3bddba207d --- /dev/null +++ b/vendor/github.com/containers/buildah/delete.go @@ -0,0 +1,17 @@ +package buildah + +import ( + "github.com/pkg/errors" +) + +// Delete removes the working container. The buildah.Builder object should not +// be used after this method is called. +func (b *Builder) Delete() error { + if err := b.store.DeleteContainer(b.ContainerID); err != nil { + return errors.Wrapf(err, "error deleting build container %q", b.ContainerID) + } + b.MountPoint = "" + b.Container = "" + b.ContainerID = "" + return nil +} diff --git a/vendor/github.com/containers/buildah/developmentplan.md b/vendor/github.com/containers/buildah/developmentplan.md new file mode 100644 index 00000000000..7d82c2e6d9b --- /dev/null +++ b/vendor/github.com/containers/buildah/developmentplan.md @@ -0,0 +1,13 @@ +![buildah logo](https://cdn.rawgit.com/containers/buildah/main/logos/buildah-logo_large.png) + +# Development Plan + +## Development goals for Buildah + + * Integration into Kubernetes and potentially other tools. The biggest requirement for this is to be able run Buildah within a standard linux container without SYS_ADMIN privileges. This would allow Buildah to run non-privileged containers inside of Kubernetes, so you could distribute your container workloads. + + * Integration with User Namespace, Podman has this already and the goal is to get `buildah build` and `buildah run` to be able to run its containers in a usernamespace to give the builder better security isolation from the host. + + * Buildah `buildah build` command's goal is to have feature parity with other OCI image and container build systems. + + * Addressing issues from the community as reported in the [Issues](https://github.com/containers/buildah/issues) page. diff --git a/vendor/github.com/containers/buildah/digester.go b/vendor/github.com/containers/buildah/digester.go new file mode 100644 index 00000000000..870ab8d9845 --- /dev/null +++ b/vendor/github.com/containers/buildah/digester.go @@ -0,0 +1,269 @@ +package buildah + +import ( + "archive/tar" + "fmt" + "hash" + "io" + "sync" + "time" + + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +type digester interface { + io.WriteCloser + ContentType() string + Digest() digest.Digest +} + +// A simple digester just digests its content as-is. +type simpleDigester struct { + digester digest.Digester + hasher hash.Hash + contentType string +} + +func newSimpleDigester(contentType string) digester { + finalDigester := digest.Canonical.Digester() + return &simpleDigester{ + digester: finalDigester, + hasher: finalDigester.Hash(), + contentType: contentType, + } +} + +func (s *simpleDigester) ContentType() string { + return s.contentType +} + +func (s *simpleDigester) Write(p []byte) (int, error) { + return s.hasher.Write(p) +} + +func (s *simpleDigester) Close() error { + return nil +} + +func (s *simpleDigester) Digest() digest.Digest { + return s.digester.Digest() +} + +// A tarFilterer passes a tarball through to an io.WriteCloser, potentially +// modifying headers as it goes. +type tarFilterer struct { + wg sync.WaitGroup + pipeWriter *io.PipeWriter + closedLock sync.Mutex + closed bool + err error +} + +func (t *tarFilterer) Write(p []byte) (int, error) { + return t.pipeWriter.Write(p) +} + +func (t *tarFilterer) Close() error { + t.closedLock.Lock() + if t.closed { + t.closedLock.Unlock() + return errors.Errorf("tar filter is already closed") + } + t.closed = true + t.closedLock.Unlock() + err := t.pipeWriter.Close() + t.wg.Wait() + if err != nil { + return errors.Wrapf(err, "error closing filter pipe") + } + return t.err +} + +// newTarFilterer passes one or more tar archives through to an io.WriteCloser +// as a single archive, potentially calling filter to modify headers and +// contents as it goes. +// +// Note: if "filter" indicates that a given item should be skipped, there is no +// guarantee that there will not be a subsequent item of type TypeLink, which +// is a hard link, which points to the skipped item as the link target. +func newTarFilterer(writeCloser io.WriteCloser, filter func(hdr *tar.Header) (skip, replaceContents bool, replacementContents io.Reader)) io.WriteCloser { + pipeReader, pipeWriter := io.Pipe() + tarWriter := tar.NewWriter(writeCloser) + filterer := &tarFilterer{ + pipeWriter: pipeWriter, + } + filterer.wg.Add(1) + go func() { + filterer.closedLock.Lock() + closed := filterer.closed + filterer.closedLock.Unlock() + for !closed { + tarReader := tar.NewReader(pipeReader) + hdr, err := tarReader.Next() + for err == nil { + var skip, replaceContents bool + var replacementContents io.Reader + if filter != nil { + skip, replaceContents, replacementContents = filter(hdr) + } + if !skip { + err = tarWriter.WriteHeader(hdr) + if err != nil { + err = errors.Wrapf(err, "error filtering tar header for %q", hdr.Name) + break + } + if hdr.Size != 0 { + var n int64 + var copyErr error + if replaceContents { + n, copyErr = io.CopyN(tarWriter, replacementContents, hdr.Size) + } else { + n, copyErr = io.Copy(tarWriter, tarReader) + } + if copyErr != nil { + err = errors.Wrapf(copyErr, "error copying content for %q", hdr.Name) + break + } + if n != hdr.Size { + err = errors.Errorf("error filtering content for %q: expected %d bytes, got %d bytes", hdr.Name, hdr.Size, n) + break + } + } + } + hdr, err = tarReader.Next() + } + if err != io.EOF { + filterer.err = errors.Wrapf(err, "error reading tar archive") + break + } + filterer.closedLock.Lock() + closed = filterer.closed + filterer.closedLock.Unlock() + } + pipeReader.Close() + tarWriter.Close() + writeCloser.Close() + filterer.wg.Done() + }() + return filterer +} + +// A tar digester digests an archive, modifying the headers it digests by +// calling a specified function to potentially modify the header that it's +// about to write. +type tarDigester struct { + isOpen bool + nested digester + tarFilterer io.WriteCloser +} + +func modifyTarHeaderForDigesting(hdr *tar.Header) (skip, replaceContents bool, replacementContents io.Reader) { + zeroTime := time.Time{} + hdr.ModTime = zeroTime + hdr.AccessTime = zeroTime + hdr.ChangeTime = zeroTime + return false, false, nil +} + +func newTarDigester(contentType string) digester { + nested := newSimpleDigester(contentType) + digester := &tarDigester{ + isOpen: true, + nested: nested, + tarFilterer: newTarFilterer(nested, modifyTarHeaderForDigesting), + } + return digester +} + +func (t *tarDigester) ContentType() string { + return t.nested.ContentType() +} + +func (t *tarDigester) Digest() digest.Digest { + return t.nested.Digest() +} + +func (t *tarDigester) Write(p []byte) (int, error) { + return t.tarFilterer.Write(p) +} + +func (t *tarDigester) Close() error { + if t.isOpen { + t.isOpen = false + return t.tarFilterer.Close() + } + return nil +} + +// CompositeDigester can compute a digest over multiple items. +type CompositeDigester struct { + digesters []digester + closer io.Closer +} + +// closeOpenDigester closes an open sub-digester, if we have one. +func (c *CompositeDigester) closeOpenDigester() { + if c.closer != nil { + c.closer.Close() + c.closer = nil + } +} + +// Restart clears all state, so that the composite digester can start over. +func (c *CompositeDigester) Restart() { + c.closeOpenDigester() + c.digesters = nil +} + +// Start starts recording the digest for a new item ("", "file", or "dir"). +// The caller should call Hash() immediately after to retrieve the new +// io.WriteCloser. +func (c *CompositeDigester) Start(contentType string) { + c.closeOpenDigester() + switch contentType { + case "": + c.digesters = append(c.digesters, newSimpleDigester("")) + case "file", "dir": + digester := newTarDigester(contentType) + c.closer = digester + c.digesters = append(c.digesters, digester) + default: + panic(fmt.Sprintf(`unrecognized content type: expected "", "file", or "dir", got %q`, contentType)) + } +} + +// Hash returns the hasher for the current item. +func (c *CompositeDigester) Hash() io.WriteCloser { + num := len(c.digesters) + if num == 0 { + return nil + } + return c.digesters[num-1] +} + +// Digest returns the content type and a composite digest over everything +// that's been digested. +func (c *CompositeDigester) Digest() (string, digest.Digest) { + c.closeOpenDigester() + num := len(c.digesters) + switch num { + case 0: + return "", "" + case 1: + return c.digesters[0].ContentType(), c.digesters[0].Digest() + default: + content := "" + for i, digester := range c.digesters { + if i > 0 { + content += "," + } + contentType := digester.ContentType() + if contentType != "" { + contentType += ":" + } + content += contentType + digester.Digest().Encoded() + } + return "multi", digest.Canonical.FromString(content) + } +} diff --git a/vendor/github.com/containers/buildah/docker/AUTHORS b/vendor/github.com/containers/buildah/docker/AUTHORS new file mode 100644 index 00000000000..b2cd9ecbe6f --- /dev/null +++ b/vendor/github.com/containers/buildah/docker/AUTHORS @@ -0,0 +1,1788 @@ +# This file lists all individuals having contributed content to the repository. +# For how it is generated, see `hack/generate-authors.sh`. + +Aanand Prasad +Aaron Davidson +Aaron Feng +Aaron Huslage +Aaron Lehmann +Aaron Welch +Aaron.L.Xu +Abel Muiño +Abhijeet Kasurde +Abhinav Ajgaonkar +Abhishek Chanda +Abin Shahab +Adam Avilla +Adam Eijdenberg +Adam Kunk +Adam Miller +Adam Mills +Adam Singer +Adam Walz +Addam Hardy +Aditi Rajagopal +Aditya +Adolfo Ochagavía +Adria Casas +Adrian Moisey +Adrian Mouat +Adrian Oprea +Adrien Folie +Adrien Gallouët +Ahmed Kamal +Ahmet Alp Balkan +Aidan Feldman +Aidan Hobson Sayers +AJ Bowen +Ajey Charantimath +ajneu +Akihiro Suda +Akira Koyasu +Akshay Karle +Al Tobey +alambike +Alan Scherger +Alan Thompson +Albert Callarisa +Albert Zhang +Aleksa Sarai +Aleksandrs Fadins +Alena Prokharchyk +Alessandro Boch +Alessio Biancalana +Alex Chan +Alex Chen +Alex Coventry +Alex Crawford +Alex Ellis +Alex Gaynor +Alex Olshansky +Alex Samorukov +Alex Warhawk +Alexander Artemenko +Alexander Boyd +Alexander Larsson +Alexander Morozov +Alexander Shopov +Alexandre Beslic +Alexandre González +Alexandru Sfirlogea +Alexey Guskov +Alexey Kotlyarov +Alexey Shamrin +Alexis THOMAS +Alfred Landrum +Ali Dehghani +Alicia Lauerman +Alihan Demir +Allen Madsen +Allen Sun +almoehi +Alvaro Saurin +Alvin Richards +amangoel +Amen Belayneh +Amir Goldstein +Amit Bakshi +Amit Krishnan +Amit Shukla +Amy Lindburg +Anand Patil +AnandkumarPatel +Anatoly Borodin +Anchal Agrawal +Anders Janmyr +Andre Dublin <81dublin@gmail.com> +Andre Granovsky +Andrea Luzzardi +Andrea Turli +Andreas Köhler +Andreas Savvides +Andreas Tiefenthaler +Andrei Gherzan +Andrew C. Bodine +Andrew Clay Shafer +Andrew Duckworth +Andrew France +Andrew Gerrand +Andrew Guenther +Andrew Hsu +Andrew Kuklewicz +Andrew Macgregor +Andrew Macpherson +Andrew Martin +Andrew McDonnell +Andrew Munsell +Andrew Po +Andrew Weiss +Andrew Williams +Andrews Medina +Andrey Petrov +Andrey Stolbovsky +André Martins +andy +Andy Chambers +andy diller +Andy Goldstein +Andy Kipp +Andy Rothfusz +Andy Smith +Andy Wilson +Anes Hasicic +Anil Belur +Anil Madhavapeddy +Ankush Agarwal +Anonmily +Anran Qiao +Anthon van der Neut +Anthony Baire +Anthony Bishopric +Anthony Dahanne +Anthony Sottile +Anton Löfgren +Anton Nikitin +Anton Polonskiy +Anton Tiurin +Antonio Murdaca +Antonis Kalipetis +Antony Messerli +Anuj Bahuguna +Anusha Ragunathan +apocas +Arash Deshmeh +ArikaChen +Arnaud Lefebvre +Arnaud Porterie +Arthur Barr +Arthur Gautier +Artur Meyster +Arun Gupta +Asbjørn Enge +averagehuman +Avi Das +Avi Miller +Avi Vaid +ayoshitake +Azat Khuyiyakhmetov +Bardia Keyoumarsi +Barnaby Gray +Barry Allard +Bartłomiej Piotrowski +Bastiaan Bakker +bdevloed +Ben Bonnefoy +Ben Firshman +Ben Golub +Ben Hall +Ben Sargent +Ben Severson +Ben Toews +Ben Wiklund +Benjamin Atkin +Benoit Chesneau +Bernerd Schaefer +Bert Goethals +Bharath Thiruveedula +Bhiraj Butala +Bhumika Bayani +Bilal Amarni +Bill W +bin liu +Bingshen Wang +Blake Geno +Boaz Shuster +bobby abbott +Boshi Lian +boucher +Bouke Haarsma +Boyd Hemphill +boynux +Bradley Cicenas +Bradley Wright +Brandon Liu +Brandon Philips +Brandon Rhodes +Brendan Dixon +Brent Salisbury +Brett Higgins +Brett Kochendorfer +Brian (bex) Exelbierd +Brian Bland +Brian DeHamer +Brian Dorsey +Brian Flad +Brian Goff +Brian McCallister +Brian Olsen +Brian Shumate +Brian Torres-Gil +Brian Trump +Brice Jaglin +Briehan Lombaard +Bruno Bigras +Bruno Binet +Bruno Gazzera +Bruno Renié +Bruno Tavares +Bryan Bess +Bryan Boreham +Bryan Matsuo +Bryan Murphy +buddhamagnet +Burke Libbey +Byung Kang +Caleb Spare +Calen Pennington +Cameron Boehmer +Cameron Spear +Campbell Allen +Candid Dauth +Cao Weiwei +Carl Henrik Lunde +Carl Loa Odin +Carl X. Su +Carlos Alexandro Becker +Carlos Sanchez +Carol Fager-Higgins +Cary +Casey Bisson +Ce Gao +Cedric Davies +Cezar Sa Espinola +Chad Swenson +Chance Zibolski +Chander G +Charles Chan +Charles Hooper +Charles Law +Charles Lindsay +Charles Merriam +Charles Sarrazin +Charles Smith +Charlie Drage +Charlie Lewis +Chase Bolt +ChaYoung You +Chen Chao +Chen Chuanliang +Chen Hanxiao +Chen Mingjie +cheney90 +Chewey +Chia-liang Kao +chli +Cholerae Hu +Chris Alfonso +Chris Armstrong +Chris Dituri +Chris Fordham +Chris Gavin +Chris Khoo +Chris McKinnel +Chris Seto +Chris Snow +Chris St. Pierre +Chris Stivers +Chris Swan +Chris Wahl +Chris Weyl +chrismckinnel +Christian Berendt +Christian Böhme +Christian Persson +Christian Rotzoll +Christian Simon +Christian Stefanescu +ChristoperBiscardi +Christophe Mehay +Christophe Troestler +Christopher Currie +Christopher Jones +Christopher Latham +Christopher Rigor +Christy Perez +Chun Chen +Ciro S. Costa +Clayton Coleman +Clinton Kitson +Coenraad Loubser +Colin Dunklau +Colin Rice +Colin Walters +Collin Guarino +Colm Hally +companycy +Cory Forsyth +cressie176 +CrimsonGlory +Cristian Staretu +cristiano balducci +Cruceru Calin-Cristian +CUI Wei +Cyprian Gracz +Cyril F +Daan van Berkel +Daehyeok Mun +Dafydd Crosby +dalanlan +Damian Smyth +Damien Nadé +Damien Nozay +Damjan Georgievski +Dan Anolik +Dan Buch +Dan Cotora +Dan Feldman +Dan Griffin +Dan Hirsch +Dan Keder +Dan Levy +Dan McPherson +Dan Stine +Dan Walsh +Dan Williams +Daniel Antlinger +Daniel Exner +Daniel Farrell +Daniel Garcia +Daniel Gasienica +Daniel Hiltgen +Daniel Menet +Daniel Mizyrycki +Daniel Nephin +Daniel Norberg +Daniel Nordberg +Daniel Robinson +Daniel S +Daniel Von Fange +Daniel X Moore +Daniel YC Lin +Daniel Zhang +Daniel, Dao Quang Minh +Danny Berger +Danny Yates +Darren Coxall +Darren Shepherd +Darren Stahl +Dattatraya Kumbhar +Davanum Srinivas +Dave Barboza +Dave Henderson +Dave MacDonald +Dave Tucker +David Anderson +David Calavera +David Corking +David Cramer +David Currie +David Davis +David Dooling +David Gageot +David Gebler +David Lawrence +David Lechner +David M. Karr +David Mackey +David Mat +David Mcanulty +David Pelaez +David R. Jenni +David Röthlisberger +David Sheets +David Sissitka +David Trott +David Williamson +David Xia +David Young +Davide Ceretti +Dawn Chen +dbdd +dcylabs +decadent +deed02392 +Deng Guangxing +Deni Bertovic +Denis Gladkikh +Denis Ollier +Dennis Chen +Dennis Docter +Derek +Derek +Derek Ch +Derek McGowan +Deric Crago +Deshi Xiao +devmeyster +Devvyn Murphy +Dharmit Shah +Diego Romero +Diego Siqueira +Dieter Reuter +Dillon Dixon +Dima Stopel +Dimitri John Ledkov +Dimitris Rozakis +Dimitry Andric +Dinesh Subhraveti +Ding Fei +Diogo Monica +DiuDiugirl +Djibril Koné +dkumor +Dmitri Logvinenko +Dmitri Shuralyov +Dmitry Demeshchuk +Dmitry Gusev +Dmitry Kononenko +Dmitry Shyshkin +Dmitry Smirnov +Dmitry V. Krivenok +Dmitry Vorobev +Dolph Mathews +Dominik Dingel +Dominik Finkbeiner +Dominik Honnef +Don Kirkby +Don Kjer +Don Spaulding +Donald Huang +Dong Chen +Donovan Jones +Doron Podoleanu +Doug Davis +Doug MacEachern +Doug Tangren +Dr Nic Williams +dragon788 +Dražen Lučanin +Drew Erny +Dustin Sallings +Ed Costello +Edmund Wagner +Eiichi Tsukata +Eike Herzbach +Eivin Giske Skaaren +Eivind Uggedal +Elan Ruusamäe +Elena Morozova +Elias Probst +Elijah Zupancic +eluck +Elvir Kuric +Emil Hernvall +Emily Maier +Emily Rose +Emir Ozer +Enguerran +Eohyung Lee +epeterso +Eric Barch +Eric Curtin +Eric Hanchrow +Eric Lee +Eric Myhre +Eric Paris +Eric Rafaloff +Eric Rosenberg +Eric Sage +Erica Windisch +Eric Yang +Eric-Olivier Lamey +Erik Bray +Erik Dubbelboer +Erik Hollensbe +Erik Inge Bolsø +Erik Kristensen +Erik St. Martin +Erik Weathers +Erno Hopearuoho +Erwin van der Koogh +Euan +Eugene Yakubovich +eugenkrizo +evalle +Evan Allrich +Evan Carmi +Evan Hazlett +Evan Hazlett +Evan Krall +Evan Phoenix +Evan Wies +Evelyn Xu +Everett Toews +Evgeny Vereshchagin +Ewa Czechowska +Eystein Måløy Stenberg +ezbercih +Ezra Silvera +Fabiano Rosas +Fabio Falci +Fabio Rapposelli +Fabio Rehm +Fabrizio Regini +Fabrizio Soppelsa +Faiz Khan +falmp +Fangyuan Gao <21551127@zju.edu.cn> +Fareed Dudhia +Fathi Boudra +Federico Gimenez +Felipe Oliveira +Felix Abecassis +Felix Geisendörfer +Felix Hupfeld +Felix Rabe +Felix Ruess +Felix Schindler +Ferenc Szabo +Fernando +Fero Volar +Ferran Rodenas +Filipe Brandenburger +Filipe Oliveira +fl0yd +Flavio Castelli +FLGMwt +Florian +Florian Klein +Florian Maier +Florian Weingarten +Florin Asavoaie +fonglh +fortinux +Francesc Campoy +Francis Chuang +Francisco Carriedo +Francisco Souza +Frank Groeneveld +Frank Herrmann +Frank Macreery +Frank Rosquin +Fred Lifton +Frederick F. Kautz IV +Frederik Loeffert +Frederik Nordahl Jul Sabroe +Freek Kalter +frosforever +fy2462 +Félix Baylac-Jacqué +Félix Cantournet +Gabe Rosenhouse +Gabor Nagy +Gabriel Linder +Gabriel Monroy +Gabriel Nicolas Avellaneda +Gaetan de Villele +Galen Sampson +Gang Qiao +Gareth Rushgrove +Garrett Barboza +Gaurav +gautam, prasanna +Gaël PORTAY +GennadySpb +Geoffrey Bachelet +George MacRorie +George Xie +Georgi Hristozov +Gereon Frey +German DZ +Gert van Valkenhoef +Gerwim +Gianluca Borello +Gildas Cuisinier +gissehel +Giuseppe Mazzotta +Gleb Fotengauer-Malinovskiy +Gleb M Borisov +Glyn Normington +GoBella +Goffert van Gool +Gosuke Miyashita +Gou Rao +Govinda Fichtner +Grant Reaber +Graydon Hoare +Greg Fausak +Greg Thornton +grossws +grunny +gs11 +Guilhem Lettron +Guilherme Salgado +Guillaume Dufour +Guillaume J. Charmes +guoxiuyan +Gurjeet Singh +Guruprasad +gwx296173 +Günter Zöchbauer +Hans Kristian Flaatten +Hans Rødtang +Hao Shu Wei +Hao Zhang <21521210@zju.edu.cn> +Harald Albers +Harley Laue +Harold Cooper +Harry Zhang +Harshal Patil +He Simei +He Xin +heartlock <21521209@zju.edu.cn> +Hector Castro +Helen Xie +Henning Sprang +Hobofan +Hollie Teal +Hong Xu +Hongbin Lu +hsinko <21551195@zju.edu.cn> +Hu Keping +Hu Tao +Huanzhong Zhang +Huayi Zhang +Hugo Duncan +Hugo Marisco <0x6875676f@gmail.com> +Hunter Blanks +huqun +Huu Nguyen +hyeongkyu.lee +hyp3rdino +Hyzhou <1187766782@qq.com> +Ian Babrou +Ian Bishop +Ian Bull +Ian Calvert +Ian Campbell +Ian Lee +Ian Main +Ian Truslove +Iavael +Icaro Seara +Igor Dolzhikov +Iliana Weller +Ilkka Laukkanen +Ilya Dmitrichenko +Ilya Gusev +ILYA Khlopotov +imre Fitos +inglesp +Ingo Gottwald +Isaac Dupree +Isabel Jimenez +Isao Jonas +Ivan Babrou +Ivan Fraixedes +Ivan Grcic +J Bruni +J. Nunn +Jack Danger Canty +Jacob Atzen +Jacob Edelman +Jacob Tomlinson +Jake Champlin +Jake Moshenko +Jake Sanders +jakedt +James Allen +James Carey +James Carr +James DeFelice +James Harrison Fisher +James Kyburz +James Kyle +James Lal +James Mills +James Nugent +James Turnbull +Jamie Hannaford +Jamshid Afshar +Jan Keromnes +Jan Koprowski +Jan Pazdziora +Jan Toebes +Jan-Gerd Tenberge +Jan-Jaap Driessen +Jana Radhakrishnan +Jannick Fahlbusch +Janonymous +Januar Wayong +Jared Biel +Jared Hocutt +Jaroslaw Zabiello +jaseg +Jasmine Hegman +Jason Divock +Jason Giedymin +Jason Green +Jason Hall +Jason Heiss +Jason Livesay +Jason McVetta +Jason Plum +Jason Shepherd +Jason Smith +Jason Sommer +Jason Stangroome +jaxgeller +Jay +Jay +Jay Kamat +Jean-Baptiste Barth +Jean-Baptiste Dalido +Jean-Christophe Berthon +Jean-Paul Calderone +Jean-Pierre Huynh +Jean-Tiare Le Bigot +Jeff Anderson +Jeff Johnston +Jeff Lindsay +Jeff Mickey +Jeff Minard +Jeff Nickoloff +Jeff Silberman +Jeff Welch +Jeffrey Bolle +Jeffrey Morgan +Jeffrey van Gogh +Jenny Gebske +Jeremy Grosser +Jeremy Price +Jeremy Qian +Jeremy Unruh +Jeroen Jacobs +Jesse Dearing +Jesse Dubay +Jessica Frazelle +Jezeniel Zapanta +jgeiger +Jhon Honce +Ji.Zhilong +Jian Zhang +jianbosun +Jie Luo +Jilles Oldenbeuving +Jim Alateras +Jim Minter +Jim Perrin +Jimmy Cuadra +Jimmy Puckett +jimmyxian +Jinsoo Park +Jiri Popelka +Jiuyue Ma +Jiří Župka +jjy +jmzwcn +Joao Fernandes +Joe Beda +Joe Doliner +Joe Ferguson +Joe Gordon +Joe Shaw +Joe Van Dyk +Joel Friedly +Joel Handwell +Joel Hansson +Joel Wurtz +Joey Geiger +Joey Gibson +Joffrey F +Johan Euphrosine +Johan Rydberg +Johanan Lieberman +Johannes 'fish' Ziemke +John Costa +John Feminella +John Gardiner Myers +John Gossman +John Howard (VM) +John Mulhausen +John OBrien III +John Starks +John Stephens +John Tims +John Warwick +John Willis +johnharris85 +Jon Wedaman +Jonas Pfenniger +Jonathan A. Sternberg +Jonathan Boulle +Jonathan Camp +Jonathan Dowland +Jonathan Lebon +Jonathan Lomas +Jonathan McCrohan +Jonathan Mueller +Jonathan Pares +Jonathan Rudenberg +Jonathan Stoppani +Jonh Wendell +Joost Cassee +Jordan +Jordan Arentsen +Jordan Sissel +Jorge Marin +Jose Diaz-Gonzalez +Joseph Anthony Pasquale Holsten +Joseph Hager +Joseph Kern +Josh +Josh Bodah +Josh Chorlton +Josh Eveleth +Josh Hawn +Josh Horwitz +Josh Poimboeuf +Josh Wilson +Josiah Kiehl +José Tomás Albornoz +JP +jrabbit +jroenf +Julian Taylor +Julien Barbier +Julien Bisconti +Julien Bordellier +Julien Dubois +Julien Pervillé +Julio Montes +Jun-Ru Chang +Jussi Nummelin +Justas Brazauskas +Justin Cormack +Justin Force +Justin Plock +Justin Simonelis +Justin Terry +Justyn Temme +Jyrki Puttonen +Jérôme Petazzoni +Jörg Thalheim +Kai Blin +Kai Qiang Wu(Kennan) +Kamil Domański +kamjar gerami +Kanstantsin Shautsou +Kara Alexandra +Karan Lyons +Kareem Khazem +kargakis +Karl Grzeszczak +Karol Duleba +Katie McLaughlin +Kato Kazuyoshi +Katrina Owen +Kawsar Saiyeed +Kay Yan +kayrus +Ke Li +Ke Xu +Kei Ohmura +Keith Hudgins +Keli Hu +Ken Cochrane +Ken Herner +Ken ICHIKAWA +Kenfe-Mickaël Laventure +Kenjiro Nakayama +Kent Johnson +Kevin "qwazerty" Houdebert +Kevin Burke +Kevin Clark +Kevin J. Lynagh +Kevin Jing Qiu +Kevin Kern +Kevin Menard +Kevin P. Kucharczyk +Kevin Richardson +Kevin Shi +Kevin Wallace +Kevin Yap +kevinmeredith +Keyvan Fatehi +kies +Kim BKC Carlbacker +Kim Eik +Kimbro Staken +Kir Kolyshkin +Kiran Gangadharan +Kirill Kolyshkin +Kirill SIbirev +knappe +Kohei Tsuruta +Koichi Shiraishi +Konrad Kleine +Konstantin L +Konstantin Pelykh +Krasi Georgiev +Krasimir Georgiev +Kris-Mikael Krister +Kristian Haugene +Kristina Zabunova +krrg +Kun Zhang +Kunal Kushwaha +Kyle Conroy +Kyle Linden +kyu +Lachlan Coote +Lai Jiangshan +Lajos Papp +Lakshan Perera +Lalatendu Mohanty +Lance Chen +Lance Kinley +Lars Butler +Lars Kellogg-Stedman +Lars R. Damerow +Lars-Magnus Skog +Laszlo Meszaros +Laura Frank +Laurent Erignoux +Laurie Voss +Leandro Siqueira +Lee Chao <932819864@qq.com> +Lee, Meng-Han +leeplay +Lei Jitang +Len Weincier +Lennie +Leo Gallucci +Leszek Kowalski +Levi Blackstone +Levi Gross +Lewis Daly +Lewis Marshall +Lewis Peckover +Liam Macgillavry +Liana Lo +Liang Mingqiang +Liang-Chi Hsieh +liaoqingwei +Lily Guo +limsy +Lin Lu +LingFaKe +Linus Heckemann +Liran Tal +Liron Levin +Liu Bo +Liu Hua +liwenqi +lixiaobing10051267 +Liz Zhang +LIZAO LI +Lizzie Dixon <_@lizzie.io> +Lloyd Dewolf +Lokesh Mandvekar +longliqiang88 <394564827@qq.com> +Lorenz Leutgeb +Lorenzo Fontana +Louis Opter +Luca Favatella +Luca Marturana +Luca Orlandi +Luca-Bogdan Grigorescu +Lucas Chan +Lucas Chi +Luciano Mores +Luis Martínez de Bartolomé Izquierdo +Luiz Svoboda +Lukas Waslowski +lukaspustina +Lukasz Zajaczkowski +lukemarsden +Lyn +Lynda O'Leary +Lénaïc Huard +Ma Müller +Ma Shimiao +Mabin +Madhav Puri +Madhu Venugopal +Mageee <21521230.zju.edu.cn> +Mahesh Tiyyagura +malnick +Malte Janduda +manchoz +Manfred Touron +Manfred Zabarauskas +Mansi Nahar +mansinahar +Manuel Meurer +Manuel Woelker +mapk0y +Marc Abramowitz +Marc Kuo +Marc Tamsky +Marcelo Salazar +Marco Hennings +Marcus Cobden +Marcus Farkas +Marcus Linke +Marcus Ramberg +Marek Goldmann +Marian Marinov +Marianna Tessel +Mario Loriedo +Marius Gundersen +Marius Sturm +Marius Voila +Mark Allen +Mark McGranaghan +Mark McKinstry +Mark Milstein +Mark Parker +Mark West +Marko Mikulicic +Marko Tibold +Markus Fix +Martijn Dwars +Martijn van Oosterhout +Martin Honermeyer +Martin Kelly +Martin Mosegaard Amdisen +Martin Redmond +Mary Anthony +Masahito Zembutsu +Masayuki Morita +Mason Malone +Mateusz Sulima +Mathias Monnerville +Mathieu Le Marec - Pasquet +Mathieu Parent +Matt Apperson +Matt Bachmann +Matt Bentley +Matt Haggard +Matt Hoyle +Matt McCormick +Matt Moore +Matt Richardson +Matt Robenolt +Matthew Heon +Matthew Lapworth +Matthew Mayer +Matthew Mueller +Matthew Riley +Matthias Klumpp +Matthias Kühnle +Matthias Rampke +Matthieu Hauglustaine +mattymo +mattyw +Mauricio Garavaglia +mauriyouth +Max Shytikov +Maxim Fedchyshyn +Maxim Ivanov +Maxim Kulkin +Maxim Treskin +Maxime Petazzoni +Meaglith Ma +meejah +Megan Kostick +Mehul Kar +Mei ChunTao +Mengdi Gao +Mert Yazıcıoğlu +mgniu +Micah Zoltu +Michael A. Smith +Michael Bridgen +Michael Brown +Michael Chiang +Michael Crosby +Michael Currie +Michael Friis +Michael Gorsuch +Michael Grauer +Michael Holzheu +Michael Hudson-Doyle +Michael Huettermann +Michael Irwin +Michael Käufl +Michael Neale +Michael Prokop +Michael Scharf +Michael Stapelberg +Michael Steinert +Michael Thies +Michael West +Michal Fojtik +Michal Gebauer +Michal Jemala +Michal Minář +Michal Wieczorek +Michaël Pailloncy +Michał Czeraszkiewicz +Michiel@unhosted +Mickaël FORTUNATO +Miguel Angel Fernández +Miguel Morales +Mihai Borobocea +Mihuleacc Sergiu +Mike Brown +Mike Chelen +Mike Danese +Mike Dillon +Mike Dougherty +Mike Gaffney +Mike Goelzer +Mike Leone +Mike MacCana +Mike Naberezny +Mike Snitzer +mikelinjie <294893458@qq.com> +Mikhail Sobolev +Milind Chawre +Miloslav Trmač +mingqing +Mingzhen Feng +Misty Stanley-Jones +Mitch Capper +mlarcher +Mohammad Banikazemi +Mohammed Aaqib Ansari +Mohit Soni +Morgan Bauer +Morgante Pell +Morgy93 +Morten Siebuhr +Morton Fox +Moysés Borges +mqliang +Mrunal Patel +msabansal +mschurenko +Muayyad Alsadi +muge +Mustafa Akın +Muthukumar R +Máximo Cuadros +Médi-Rémi Hashim +Nahum Shalman +Nakul Pathak +Nalin Dahyabhai +Nan Monnand Deng +Naoki Orii +Natalie Parker +Natanael Copa +Nate Brennand +Nate Eagleson +Nate Jones +Nathan Hsieh +Nathan Kleyn +Nathan LeClaire +Nathan McCauley +Nathan Williams +Neal McBurnett +Neil Peterson +Nelson Chen +Neyazul Haque +Nghia Tran +Niall O'Higgins +Nicholas E. Rabenau +nick +Nick DeCoursin +Nick Irvine +Nick Parker +Nick Payne +Nick Stenning +Nick Stinemates +NickrenREN +Nicola Kabar +Nicolas Borboën +Nicolas De loof +Nicolas Dudebout +Nicolas Goy +Nicolas Kaiser +Nicolás Hock Isaza +Nigel Poulton +NikolaMandic +nikolas +Nikolay Milovanov +Nirmal Mehta +Nishant Totla +NIWA Hideyuki +Noah Treuhaft +noducks +Nolan Darilek +nponeccop +Nuutti Kotivuori +nzwsch +O.S. Tezer +objectified +OddBloke +odk- +Oguz Bilgic +Oh Jinkyun +Ohad Schneider +ohmystack +Ole Reifschneider +Oliver Neal +Olivier Gambier +Olle Jonsson +Oriol Francès +orkaa +Oskar Niburski +Otto Kekäläinen +Ovidio Mallo +oyld +ozlerhakan +paetling +pandrew +panticz +Paolo G. Giarrusso +Pascal Borreli +Pascal Hartig +Patrick Böänziger +Patrick Devine +Patrick Hemmer +Patrick Stapleton +pattichen +Paul +paul +Paul Annesley +Paul Bellamy +Paul Bowsher +Paul Furtado +Paul Hammond +Paul Jimenez +Paul Kehrer +Paul Lietar +Paul Liljenberg +Paul Morie +Paul Nasrat +Paul Weaver +Paulo Ribeiro +Pavel Lobashov +Pavel Pospisil +Pavel Sutyrin +Pavel Tikhomirov +Pavlos Ratis +Pavol Vargovcik +Peeyush Gupta +Peggy Li +Pei Su +Penghan Wang +perhapszzy@sina.com +pestophagous +Peter Bourgon +Peter Braden +Peter Choi +Peter Dave Hello +Peter Edge +Peter Ericson +Peter Esbensen +Peter Malmgren +Peter Salvatore +Peter Volpe +Peter Waller +Petr Švihlík +Phil +Phil Estes +Phil Spitler +Philip Monroe +Philipp Wahala +Philipp Weissensteiner +Phillip Alexander +pidster +Piergiuliano Bossi +Pierre +Pierre Carrier +Pierre Dal-Pra +Pierre Wacrenier +Pierre-Alain RIVIERE +Piotr Bogdan +pixelistik +Porjo +Poul Kjeldager Sørensen +Pradeep Chhetri +Prasanna Gautam +Prayag Verma +Przemek Hejman +pysqz +qhuang +Qiang Huang +Qinglan Peng +qudongfang +Quentin Brossard +Quentin Perez +Quentin Tayssier +r0n22 +Rafal Jeczalik +Rafe Colton +Raghavendra K T +Raghuram Devarakonda +Rajat Pandit +Rajdeep Dua +Ralf Sippl +Ralle +Ralph Bean +Ramkumar Ramachandra +Ramon Brooker +Ramon van Alteren +Ray Tsang +ReadmeCritic +Recursive Madman +Reficul +Regan McCooey +Remi Rampin +Renato Riccieri Santos Zannon +resouer +rgstephens +Rhys Hiltner +Rich Moyse +Rich Seymour +Richard +Richard Burnison +Richard Harvey +Richard Mathie +Richard Metzler +Richard Scothern +Richo Healey +Rick Bradley +Rick van de Loo +Rick Wieman +Rik Nijessen +Riku Voipio +Riley Guerin +Ritesh H Shukla +Riyaz Faizullabhoy +Rob Vesse +Robert Bachmann +Robert Bittle +Robert Obryk +Robert Stern +Robert Terhaar +Robert Wallis +Roberto G. Hashioka +Roberto Muñoz Fernández +Robin Naundorf +Robin Schneider +Robin Speekenbrink +robpc +Rodolfo Carvalho +Rodrigo Vaz +Roel Van Nyen +Roger Peppe +Rohit Jnagal +Rohit Kadam +Rojin George +Roland Huß +Roland Kammerer +Roland Moriz +Roma Sokolov +Roman Strashkin +Ron Smits +Ron Williams +root +root +root +root +root +Rory Hunter +Rory McCune +Ross Boucher +Rovanion Luckey +Rozhnov Alexandr +rsmoorthy +Rudolph Gottesheim +Rui Lopes +Runshen Zhu +Ryan Abrams +Ryan Anderson +Ryan Aslett +Ryan Belgrave +Ryan Detzel +Ryan Fowler +Ryan McLaughlin +Ryan O'Donnell +Ryan Seto +Ryan Thomas +Ryan Trauntvein +Ryan Wallner +Ryan Zhang +RyanDeng +Rémy Greinhofer +s. rannou +s00318865 +Sabin Basyal +Sachin Joshi +Sagar Hani +Sainath Grandhi +sakeven +Sally O'Malley +Sam Abed +Sam Alba +Sam Bailey +Sam J Sharpe +Sam Neirinck +Sam Reis +Sam Rijs +Sambuddha Basu +Sami Wagiaalla +Samuel Andaya +Samuel Dion-Girardeau +Samuel Karp +Samuel PHAN +Sandeep Bansal +Sankar சங்கர் +Sanket Saurav +Santhosh Manohar +sapphiredev +Satnam Singh +satoru +Satoshi Amemiya +Satoshi Tagomori +scaleoutsean +Scott Bessler +Scott Collier +Scott Johnston +Scott Stamp +Scott Walls +sdreyesg +Sean Christopherson +Sean Cronin +Sean McIntyre +Sean OMeara +Sean P. Kane +Sean Rodman +Sebastiaan van Steenis +Sebastiaan van Stijn +Senthil Kumar Selvaraj +Senthil Kumaran +SeongJae Park +Seongyeol Lim +Serge Hallyn +Sergey Alekseev +Sergey Evstifeev +Sergii Kabashniuk +Serhat Gülçiçek +Sevki Hasirci +Shane Canon +Shane da Silva +shaunol +Shawn Landden +Shawn Siefkas +shawnhe +Shayne Wang +Shekhar Gulati +Sheng Yang +Shengbo Song +Shev Yan +Shih-Yuan Lee +Shijiang Wei +Shishir Mahajan +Shoubhik Bose +Shourya Sarcar +shuai-z +Shukui Yang +Shuwei Hao +Sian Lerk Lau +sidharthamani +Silas Sewell +Silvan Jegen +Simei He +Simon Eskildsen +Simon Leinen +Simon Taranto +Sindhu S +Sjoerd Langkemper +skaasten +Solganik Alexander +Solomon Hykes +Song Gao +Soshi Katsuta +Soulou +Spencer Brown +Spencer Smith +Sridatta Thatipamala +Sridhar Ratnakumar +Srini Brahmaroutu +srinsriv +Steeve Morin +Stefan Berger +Stefan J. Wernli +Stefan Praszalowicz +Stefan S. +Stefan Scherer +Stefan Staudenmeyer +Stefan Weil +Stephen Crosby +Stephen Day +Stephen Drake +Stephen Rust +Steve Dougherty +Steve Durrheimer +Steve Francia +Steve Koch +Steven Burgess +Steven Erenst +Steven Hartland +Steven Iveson +Steven Merrill +Steven Richards +Steven Taylor +Subhajit Ghosh +Sujith Haridasan +Sun Gengze <690388648@qq.com> +Suryakumar Sudar +Sven Dowideit +Swapnil Daingade +Sylvain Baubeau +Sylvain Bellemare +Sébastien +Sébastien Luttringer +Sébastien Stormacq +Tadej Janež +TAGOMORI Satoshi +tang0th +Tangi COLIN +Tatsuki Sugiura +Tatsushi Inagaki +Taylor Jones +tbonza +Ted M. Young +Tehmasp Chaudhri +Tejesh Mehta +terryding77 <550147740@qq.com> +tgic +Thatcher Peskens +theadactyl +Thell 'Bo' Fowler +Thermionix +Thijs Terlouw +Thomas Bikeev +Thomas Frössman +Thomas Gazagnaire +Thomas Grainger +Thomas Hansen +Thomas Leonard +Thomas LEVEIL +Thomas Orozco +Thomas Riccardi +Thomas Schroeter +Thomas Sjögren +Thomas Swift +Thomas Tanaka +Thomas Texier +Tianon Gravi +Tianyi Wang +Tibor Vass +Tiffany Jernigan +Tiffany Low +Tim Bosse +Tim Dettrick +Tim Düsterhus +Tim Hockin +Tim Ruffles +Tim Smith +Tim Terhorst +Tim Wang +Tim Waugh +Tim Wraight +Tim Zju <21651152@zju.edu.cn> +timfeirg +Timothy Hobbs +tjwebb123 +tobe +Tobias Bieniek +Tobias Bradtke +Tobias Gesellchen +Tobias Klauser +Tobias Munk +Tobias Schmidt +Tobias Schwab +Todd Crane +Todd Lunter +Todd Whiteman +Toli Kuznets +Tom Barlow +Tom Booth +Tom Denham +Tom Fotherby +Tom Howe +Tom Hulihan +Tom Maaswinkel +Tom Wilkie +Tom X. Tobin +Tomas Tomecek +Tomasz Kopczynski +Tomasz Lipinski +Tomasz Nurkiewicz +Tommaso Visconti +Tomáš Hrčka +Tonny Xu +Tony Abboud +Tony Daws +Tony Miller +toogley +Torstein Husebø +Tõnis Tiigi +tpng +tracylihui <793912329@qq.com> +Trapier Marshall +Travis Cline +Travis Thieman +Trent Ogren +Trevor +Trevor Pounds +Trevor Sullivan +trishnaguha +Tristan Carel +Troy Denton +Tyler Brock +Tzu-Jung Lee +Ulysse Carion +unknown +vagrant +Vaidas Jablonskis +Veres Lajos +vgeta +Victor Algaze +Victor Coisne +Victor Costan +Victor I. Wood +Victor Lyuboslavsky +Victor Marmol +Victor Palma +Victor Vieux +Victoria Bialas +Vijaya Kumar K +Viktor Stanchev +Viktor Vojnovski +VinayRaghavanKS +Vincent Batts +Vincent Bernat +Vincent Bernat +Vincent Demeester +Vincent Giersch +Vincent Mayers +Vincent Woo +Vinod Kulkarni +Vishal Doshi +Vishnu Kannan +Vitor Monteiro +Vivek Agarwal +Vivek Dasgupta +Vivek Goyal +Vladimir Bulyga +Vladimir Kirillov +Vladimir Pouzanov +Vladimir Rutsky +Vladimir Varankin +VladimirAus +Vojtech Vitek (V-Teq) +waitingkuo +Walter Leibbrandt +Walter Stanish +WANG Chao +Wang Long +Wang Ping +Wang Xing +Wang Yuexiao +Ward Vandewege +WarheadsSE +Wayne Chang +Wei Wu +Wei-Ting Kuo +weiyan +Weiyang Zhu +Wen Cheng Ma +Wendel Fleming +Wenkai Yin +Wentao Zhang +Wenxuan Zhao +Wenyu You <21551128@zju.edu.cn> +Wenzhi Liang +Wes Morgan +Wewang Xiaorenfine +Will Dietz +Will Rouesnel +Will Weaver +willhf +William Delanoue +William Henry +William Hubbs +William Martin +William Riancho +William Thurston +WiseTrem +wlan0 +Wolfgang Powisch +wonderflow +Wonjun Kim +xamyzhao +Xianglin Gao +Xianlu Bird +XiaoBing Jiang +Xiaoxu Chen +xiekeyang +Xinbo Weng +Xinzi Zhou +Xiuming Chen +xlgao-zju +xuzhaokui +Yahya +YAMADA Tsuyoshi +Yan Feng +Yang Bai +yangshukui +Yanqiang Miao +Yasunori Mahata +Yestin Sun +Yi EungJun +Yibai Zhang +Yihang Ho +Ying Li +Yohei Ueda +Yong Tang +Yongzhi Pan +yorkie +You-Sheng Yang (楊有勝) +Youcef YEKHLEF +Yu Peng +Yuan Sun +yuchangchun +yuchengxia +Yunxiang Huang +Yurii Rashkovskii +yuzou +Zac Dover +Zach Borboa +Zachary Jaffee +Zain Memon +Zaiste! +Zane DeGraffenried +Zefan Li +Zen Lin(Zhinan Lin) +Zhang Kun +Zhang Wei +Zhang Wentao +zhangxianwei +Zhenan Ye <21551168@zju.edu.cn> +zhenghenghuo +Zhenkun Bi +zhouhao +Zhu Guihua +Zhu Kunjia +Zhuoyun Wei +Zilin Du +zimbatm +Ziming Dong +ZJUshuaizhou <21551191@zju.edu.cn> +zmarouf +Zoltan Tombol +zqh +Zuhayr Elahi +Zunayed Ali +Álex González +Álvaro Lázaro +Átila Camurça Alves +尹吉峰 +搏通 diff --git a/vendor/github.com/containers/buildah/docker/types.go b/vendor/github.com/containers/buildah/docker/types.go new file mode 100644 index 00000000000..b0ed2e4c021 --- /dev/null +++ b/vendor/github.com/containers/buildah/docker/types.go @@ -0,0 +1,258 @@ +package docker + +// +// Types extracted from Docker +// + +import ( + "time" + + "github.com/containers/image/v5/pkg/strslice" + digest "github.com/opencontainers/go-digest" +) + +// github.com/moby/moby/image/rootfs.go +const TypeLayers = "layers" + +// github.com/docker/distribution/manifest/schema2/manifest.go +const V2S2MediaTypeUncompressedLayer = "application/vnd.docker.image.rootfs.diff.tar" + +// github.com/moby/moby/image/rootfs.go +// V2S2RootFS describes images root filesystem +// This is currently a placeholder that only supports layers. In the future +// this can be made into an interface that supports different implementations. +type V2S2RootFS struct { + Type string `json:"type"` + DiffIDs []digest.Digest `json:"diff_ids,omitempty"` +} + +// github.com/moby/moby/image/image.go +// V2S2History stores build commands that were used to create an image +type V2S2History struct { + // Created is the timestamp at which the image was created + Created time.Time `json:"created"` + // Author is the name of the author that was specified when committing the image + Author string `json:"author,omitempty"` + // CreatedBy keeps the Dockerfile command used while building the image + CreatedBy string `json:"created_by,omitempty"` + // Comment is the commit message that was set when committing the image + Comment string `json:"comment,omitempty"` + // EmptyLayer is set to true if this history item did not generate a + // layer. Otherwise, the history item is associated with the next + // layer in the RootFS section. + EmptyLayer bool `json:"empty_layer,omitempty"` +} + +// github.com/moby/moby/image/image.go +// ID is the content-addressable ID of an image. +type ID digest.Digest + +// github.com/moby/moby/api/types/container/config.go +// HealthConfig holds configuration settings for the HEALTHCHECK feature. +type HealthConfig struct { + // Test is the test to perform to check that the container is healthy. + // An empty slice means to inherit the default. + // The options are: + // {} : inherit healthcheck + // {"NONE"} : disable healthcheck + // {"CMD", args...} : exec arguments directly + // {"CMD-SHELL", command} : run command with system's default shell + Test []string `json:",omitempty"` + + // Zero means to inherit. Durations are expressed as integer nanoseconds. + Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. + Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. + StartPeriod time.Duration `json:",omitempty"` // Time to wait after the container starts before running the first check. + + // Retries is the number of consecutive failures needed to consider a container as unhealthy. + // Zero means inherit. + Retries int `json:",omitempty"` +} + +// github.com/docker/go-connections/nat/nat.go +// PortSet is a collection of structs indexed by Port +type PortSet map[Port]struct{} + +// github.com/docker/go-connections/nat/nat.go +// Port is a string containing port number and protocol in the format "80/tcp" +type Port string + +// github.com/moby/moby/api/types/container/config.go +// Config contains the configuration data about a container. +// It should hold only portable information about the container. +// Here, "portable" means "independent from the host we are running on". +// Non-portable information *should* appear in HostConfig. +// All fields added to this struct must be marked `omitempty` to keep getting +// predictable hashes from the old `v1Compatibility` configuration. +type Config struct { + Hostname string // Hostname + Domainname string // Domainname + User string // User that will run the command(s) inside the container, also support user:group + AttachStdin bool // Attach the standard input, makes possible user interaction + AttachStdout bool // Attach the standard output + AttachStderr bool // Attach the standard error + ExposedPorts PortSet `json:",omitempty"` // List of exposed ports + Tty bool // Attach standard streams to a tty, including stdin if it is not closed. + OpenStdin bool // Open stdin + StdinOnce bool // If true, close stdin after the 1 attached client disconnects. + Env []string // List of environment variable to set in the container + Cmd strslice.StrSlice // Command to run when starting the container + Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy + ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific) + Image string // Name of the image as it was passed by the operator (e.g. could be symbolic) + Volumes map[string]struct{} // List of volumes (mounts) used for the container + WorkingDir string // Current directory (PWD) in the command will be launched + Entrypoint strslice.StrSlice // Entrypoint to run when starting the container + NetworkDisabled bool `json:",omitempty"` // Is network disabled + MacAddress string `json:",omitempty"` // Mac Address of the container + OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile + Labels map[string]string // List of labels set to this container + StopSignal string `json:",omitempty"` // Signal to stop a container + StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container + Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT +} + +// github.com/docker/distribution/manifest/schema1/config_builder.go +// For non-top-level layers, create fake V1Compatibility strings that +// fit the format and don't collide with anything else, but don't +// result in runnable images on their own. +type V1Compatibility struct { + ID string `json:"id"` + Parent string `json:"parent,omitempty"` + Comment string `json:"comment,omitempty"` + Created time.Time `json:"created"` + ContainerConfig struct { + Cmd []string + } `json:"container_config,omitempty"` + Author string `json:"author,omitempty"` + ThrowAway bool `json:"throwaway,omitempty"` +} + +// github.com/moby/moby/image/image.go +// V1Image stores the V1 image configuration. +type V1Image struct { + // ID is a unique 64 character identifier of the image + ID string `json:"id,omitempty"` + // Parent is the ID of the parent image + Parent string `json:"parent,omitempty"` + // Comment is the commit message that was set when committing the image + Comment string `json:"comment,omitempty"` + // Created is the timestamp at which the image was created + Created time.Time `json:"created"` + // Container is the id of the container used to commit + Container string `json:"container,omitempty"` + // ContainerConfig is the configuration of the container that is committed into the image + ContainerConfig Config `json:"container_config,omitempty"` + // DockerVersion specifies the version of Docker that was used to build the image + DockerVersion string `json:"docker_version,omitempty"` + // Author is the name of the author that was specified when committing the image + Author string `json:"author,omitempty"` + // Config is the configuration of the container received from the client + Config *Config `json:"config,omitempty"` + // Architecture is the hardware that the image is build and runs on + Architecture string `json:"architecture,omitempty"` + // Variant is a variant of the CPU that the image is built and runs on + Variant string `json:"variant,omitempty"` + // OS is the operating system used to build and run the image + OS string `json:"os,omitempty"` + // Size is the total size of the image including all layers it is composed of + Size int64 `json:",omitempty"` +} + +// github.com/moby/moby/image/image.go +// V2Image stores the image configuration +type V2Image struct { + V1Image + Parent ID `json:"parent,omitempty"` // nolint:govet + RootFS *V2S2RootFS `json:"rootfs,omitempty"` + History []V2S2History `json:"history,omitempty"` + OSVersion string `json:"os.version,omitempty"` + OSFeatures []string `json:"os.features,omitempty"` +} + +// github.com/docker/distribution/manifest/versioned.go +// V2Versioned provides a struct with the manifest schemaVersion and mediaType. +// Incoming content with unknown schema version can be decoded against this +// struct to check the version. +type V2Versioned struct { + // SchemaVersion is the image manifest schema that this image follows + SchemaVersion int `json:"schemaVersion"` + + // MediaType is the media type of this schema. + MediaType string `json:"mediaType,omitempty"` +} + +// github.com/docker/distribution/manifest/schema1/manifest.go +// V2S1FSLayer is a container struct for BlobSums defined in an image manifest +type V2S1FSLayer struct { + // BlobSum is the tarsum of the referenced filesystem image layer + BlobSum digest.Digest `json:"blobSum"` +} + +// github.com/docker/distribution/manifest/schema1/manifest.go +// V2S1History stores unstructured v1 compatibility information +type V2S1History struct { + // V1Compatibility is the raw v1 compatibility information + V1Compatibility string `json:"v1Compatibility"` +} + +// github.com/docker/distribution/manifest/schema1/manifest.go +// V2S1Manifest provides the base accessible fields for working with V2 image +// format in the registry. +type V2S1Manifest struct { + V2Versioned + + // Name is the name of the image's repository + Name string `json:"name"` + + // Tag is the tag of the image specified by this manifest + Tag string `json:"tag"` + + // Architecture is the host architecture on which this image is intended to + // run + Architecture string `json:"architecture"` + + // FSLayers is a list of filesystem layer blobSums contained in this image + FSLayers []V2S1FSLayer `json:"fsLayers"` + + // History is a list of unstructured historical data for v1 compatibility + History []V2S1History `json:"history"` +} + +// github.com/docker/distribution/blobs.go +// V2S2Descriptor describes targeted content. Used in conjunction with a blob +// store, a descriptor can be used to fetch, store and target any kind of +// blob. The struct also describes the wire protocol format. Fields should +// only be added but never changed. +type V2S2Descriptor struct { + // MediaType describe the type of the content. All text based formats are + // encoded as utf-8. + MediaType string `json:"mediaType,omitempty"` + + // Size in bytes of content. + Size int64 `json:"size,omitempty"` + + // Digest uniquely identifies the content. A byte stream can be verified + // against against this digest. + Digest digest.Digest `json:"digest,omitempty"` + + // URLs contains the source URLs of this content. + URLs []string `json:"urls,omitempty"` + + // NOTE: Before adding a field here, please ensure that all + // other options have been exhausted. Much of the type relationships + // depend on the simplicity of this type. +} + +// github.com/docker/distribution/manifest/schema2/manifest.go +// V2S2Manifest defines a schema2 manifest. +type V2S2Manifest struct { + V2Versioned + + // Config references the image configuration as a blob. + Config V2S2Descriptor `json:"config"` + + // Layers lists descriptors for the layers referenced by the + // configuration. + Layers []V2S2Descriptor `json:"layers"` +} diff --git a/vendor/github.com/containers/buildah/go.mod b/vendor/github.com/containers/buildah/go.mod new file mode 100644 index 00000000000..0637f77d6e4 --- /dev/null +++ b/vendor/github.com/containers/buildah/go.mod @@ -0,0 +1,48 @@ +module github.com/containers/buildah + +go 1.13 + +require ( + github.com/containerd/containerd v1.5.9 + github.com/containernetworking/cni v1.0.1 + github.com/containers/common v0.47.5 + github.com/containers/image/v5 v5.19.3 + github.com/containers/ocicrypt v1.1.4 + github.com/containers/storage v1.38.5 + github.com/docker/distribution v2.8.0+incompatible + github.com/docker/docker v20.10.12+incompatible + github.com/docker/go-units v0.4.0 + github.com/docker/libnetwork v0.8.0-dev.2.0.20190625141545-5a177b73e316 + github.com/fsouza/go-dockerclient v1.7.8 + github.com/ghodss/yaml v1.0.0 + github.com/hashicorp/go-multierror v1.1.1 + github.com/ishidawataru/sctp v0.0.0-20210226210310-f2269e66cdee // indirect + github.com/konsorten/go-windows-terminal-sequences v1.0.3 // indirect + github.com/mattn/go-shellwords v1.0.12 + github.com/onsi/ginkgo v1.16.5 + github.com/onsi/gomega v1.18.1 + github.com/opencontainers/go-digest v1.0.0 + github.com/opencontainers/image-spec v1.0.3-0.20211202193544-a5463b7f9c84 + github.com/opencontainers/runc v1.1.0 + github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 + github.com/opencontainers/runtime-tools v0.9.0 + github.com/opencontainers/selinux v1.10.0 + github.com/openshift/imagebuilder v1.2.2 + github.com/pkg/errors v0.9.1 + github.com/prometheus/client_golang v1.11.1 // indirect + github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921 + github.com/sirupsen/logrus v1.8.1 + github.com/spf13/cobra v1.3.0 + github.com/spf13/pflag v1.0.5 + github.com/stretchr/testify v1.7.0 + github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 + go.etcd.io/bbolt v1.3.6 + golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4 + golang.org/x/sync v0.0.0-20210220032951-036812b2e83c + golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 + golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b +) + +replace github.com/sirupsen/logrus => github.com/sirupsen/logrus v1.4.2 + +replace github.com/opencontainers/image-spec => github.com/opencontainers/image-spec v1.0.2-0.20211123152302-43a7dee1ec31 diff --git a/vendor/github.com/containers/buildah/go.sum b/vendor/github.com/containers/buildah/go.sum new file mode 100644 index 00000000000..2f661b3c637 --- /dev/null +++ b/vendor/github.com/containers/buildah/go.sum @@ -0,0 +1,1613 @@ +bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= +cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= +cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= +cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= +cloud.google.com/go v0.98.0/go.mod h1:ua6Ush4NALrHk5QXDWnjvZHN93OuF0HfuEPq9I1X0cM= +cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/firestore v1.6.1/go.mod h1:asNXNOzBdyVQmEU+ggO8UPodTkEVFW5Qx+rwHnAz+EY= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774 h1:SCbEWT58NSt7d2mcFdvxC9uyrdcTfvBbPLThhkDmXzg= +github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774/go.mod h1:6/0dYRLLXyJjbkIPeeGyoJ/eKOSI0eU6eTlCBYibgd0= +github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= +github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= +github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/toml v1.0.0 h1:dtDWrepsVPfW9H/4y7dDgFc2MBUSeJhlaDtK13CxFlU= +github.com/BurntSushi/toml v1.0.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= +github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= +github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= +github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.5.1 h1:aPJp2QD7OOrhO5tQXqQoGSJc+DjDtWTGLOmNyAm6FgY= +github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= +github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8= +github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg= +github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= +github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= +github.com/Microsoft/hcsshim v0.8.20/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= +github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= +github.com/Microsoft/hcsshim v0.8.22/go.mod h1:91uVCVzvX2QD16sMCenoxxXo6L1wJnLMX2PSufFMtF0= +github.com/Microsoft/hcsshim v0.8.23/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg= +github.com/Microsoft/hcsshim v0.9.2 h1:wB06W5aYFfUB3IvootYAY2WnOmIdgPGfqSI6tufQNnY= +github.com/Microsoft/hcsshim v0.9.2/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= +github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= +github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo= +github.com/ProtonMail/go-crypto v0.0.0-20210920160938-87db9fbc61c7/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo= +github.com/ProtonMail/go-crypto v0.0.0-20211112122917-428f8eabeeb3/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo= +github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= +github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow= +github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4= +github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8= +github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo= +github.com/acomagu/bufpipe v1.0.3/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= +github.com/alexflint/go-filemutex v1.1.0/go.mod h1:7P4iRhttt/nUvUOrYIhcpMzv2G6CY9UnI16Z+UJqRyk= +github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= +github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= +github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= +github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= +github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= +github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= +github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= +github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= +github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= +github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= +github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= +github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= +github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E= +github.com/chzyer/logex v1.1.10 h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e h1:fY5BOSpyZCqRo5OhCuC+XN+r/bBCmeuuJtjz+bCNIf8= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= +github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc= +github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= +github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211130200136-a8f946100490/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE= +github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU= +github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= +github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= +github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E= +github.com/containerd/btrfs v0.0.0-20210316141732-918d888fb676/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= +github.com/containerd/btrfs v1.0.0/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= +github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI= +github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= +github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM= +github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= +github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= +github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= +github.com/containerd/cgroups v1.0.1 h1:iJnMvco9XGvKUvNQkv88bE4uJXxRQH18efbKo9w5vHQ= +github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= +github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= +github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw= +github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= +github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= +github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.1-0.20191213020239-082f7e3aed57/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.9/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ= +github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU= +github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI= +github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s= +github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= +github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c= +github.com/containerd/containerd v1.5.9 h1:rs6Xg1gtIxaeyG+Smsb/0xaSDu1VgFhOCKBXxMxbsF4= +github.com/containerd/containerd v1.5.9/go.mod h1:fvQqCfadDGga5HZyn3j4+dx56qj2I9YwBrlSdalvJYQ= +github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo= +github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y= +github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ= +github.com/containerd/continuity v0.1.0 h1:UFRRY5JemiAhPZrr/uE0n8fMTLcZsUvySPr1+D7pgr8= +github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM= +github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= +github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= +github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= +github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= +github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU= +github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk= +github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= +github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= +github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g= +github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= +github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= +github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0= +github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA= +github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow= +github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms= +github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c= +github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= +github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= +github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM= +github.com/containerd/stargz-snapshotter/estargz v0.9.0/go.mod h1:aE5PCyhFMwR8sbrErO5eM2GcvkyXTTJremG883D4qF0= +github.com/containerd/stargz-snapshotter/estargz v0.11.0 h1:t0IW5kOmY7AXDAWRUs2uVzDhijAUOAYVr/dyRhOQvBg= +github.com/containerd/stargz-snapshotter/estargz v0.11.0/go.mod h1:/KsZXsJRllMbTKFfG0miFQWViQKdI9+9aSXs+HN0+ac= +github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= +github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= +github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= +github.com/containerd/ttrpc v1.1.0/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ= +github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= +github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk= +github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg= +github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s= +github.com/containerd/zfs v0.0.0-20200918131355-0a33824f23a2/go.mod h1:8IgZOBdv8fAgXddBT4dBXJPtxyRsejFIpXoklgxgEjw= +github.com/containerd/zfs v0.0.0-20210301145711-11e8f1707f62/go.mod h1:A9zfAbMlQwE+/is6hi0Xw8ktpL+6glmqZYtevJgaB8Y= +github.com/containerd/zfs v0.0.0-20210315114300-dde8f0fda960/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containerd/zfs v0.0.0-20210324211415-d5c4544f0433/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containerd/zfs v1.0.0/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/cni v1.0.1 h1:9OIL/sZmMYDBe+G8svzILAlulUpaDTUjeAbtH/JNLBo= +github.com/containernetworking/cni v1.0.1/go.mod h1:AKuhXbN5EzmD4yTNtfSsX3tPcmtrBI6QcRV0NiNt15Y= +github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM= +github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8= +github.com/containernetworking/plugins v1.0.1 h1:wwCfYbTCj5FC0EJgyzyjTXmqysOiJE9r712Z+2KVZAk= +github.com/containernetworking/plugins v1.0.1/go.mod h1:QHCfGpaTwYTbbH+nZXKVTxNBDZcxSOplJT5ico8/FLE= +github.com/containers/common v0.47.5 h1:Qm9o+wVPO9sbggTKubN3xYMtPRaPv7dmcrJQgongHHw= +github.com/containers/common v0.47.5/go.mod h1:HgX0mFXyB0Tbe2REEIp9x9CxET6iSzmHfwR6S/t2LZc= +github.com/containers/image/v5 v5.19.1/go.mod h1:ewoo3u+TpJvGmsz64XgzbyTHwHtM94q7mgK/pX+v2SE= +github.com/containers/image/v5 v5.19.3 h1:XljZWtaMNJOh9fE8IQyir+bw+7PhxnIwc2/kDF9GPoQ= +github.com/containers/image/v5 v5.19.3/go.mod h1:9nqGCbwNy4nrjykPRuxJL0aJG5F5CNfv4Nk4U1JezE4= +github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b h1:Q8ePgVfHDplZ7U33NwHZkrVELsZP5fYj9pM5WBZB2GE= +github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= +github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc= +github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4= +github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= +github.com/containers/ocicrypt v1.1.2/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= +github.com/containers/ocicrypt v1.1.4 h1:V0ktirShnF1iJ2ithuoYE4eNAOSL3af1PlTiykv3PLQ= +github.com/containers/ocicrypt v1.1.4/go.mod h1:xpdkbVAuaH3WzbEabUd5yDsl9SwJA5pABH85425Es2g= +github.com/containers/storage v1.37.0/go.mod h1:kqeJeS0b7DO2ZT1nVWs0XufrmPFbgV3c+Q/45RlH6r4= +github.com/containers/storage v1.38.2/go.mod h1:INP0RPLHWBxx+pTsO5uiHlDUGHDFvWZPWprAbAlQWPQ= +github.com/containers/storage v1.38.3/go.mod h1:INP0RPLHWBxx+pTsO5uiHlDUGHDFvWZPWprAbAlQWPQ= +github.com/containers/storage v1.38.5 h1:gymwFRLMvFAqiGcDJ0+OxaN6ZqQkv3FQSbhN7a2JBW0= +github.com/containers/storage v1.38.5/go.mod h1:INP0RPLHWBxx+pTsO5uiHlDUGHDFvWZPWprAbAlQWPQ= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= +github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= +github.com/coreos/go-iptables v0.6.0/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q= +github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw= +github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= +github.com/cyphar/filepath-securejoin v0.2.3 h1:YX6ebbZCZP7VkM3scTTokDgBL2TY741X51MTk3ycuNI= +github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= +github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= +github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= +github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= +github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I= +github.com/danieljoos/wincred v1.1.0/go.mod h1:XYlo+eRTsVA9aHGp7NGjFkPla4m+DCL7hqDjlFjiygg= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= +github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/disiqueira/gotree/v3 v3.0.2 h1:ik5iuLQQoufZBNPY518dXhiO5056hyNBIK9lWhkNRq8= +github.com/disiqueira/gotree/v3 v3.0.2/go.mod h1:ZuyjE4+mUQZlbpkI24AmruZKhg3VHEgPLDY8Qk+uUu8= +github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= +github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= +github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.8.0+incompatible h1:l9EaZDICImO1ngI+uTifW+ZYvvz7fKISBAKpg+MbWbY= +github.com/docker/distribution v2.8.0+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.3-0.20220208084023-a5c757555091+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.12+incompatible h1:CEeNmFM0QZIsJCZKMkZx0ZcahTiewkrgiwfYD+dfl1U= +github.com/docker/docker v20.10.12+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= +github.com/docker/docker-credential-helpers v0.6.4 h1:axCks+yV+2MR3/kZhAmy07yC56WZ2Pwu/fKWtKuZB0o= +github.com/docker/docker-credential-helpers v0.6.4/go.mod h1:ofX3UI0Gz1TteYBjtgs07O36Pyasyp66D2uKT7H8W1c= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11 h1:IPrmumsT9t5BS7XcPhgsCTlkWbYg80SEXUzDpReaU6Y= +github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11/go.mod h1:a6bNUGTbQBsY6VRHTr4h/rkOXjl244DyRD0tx3fgq4Q= +github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= +github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= +github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= +github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/libnetwork v0.8.0-dev.2.0.20190625141545-5a177b73e316 h1:moehPjPiGUaWdwgOl92xRyFHJyaqXDHcCyW9M6nmCK4= +github.com/docker/libnetwork v0.8.0-dev.2.0.20190625141545-5a177b73e316/go.mod h1:93m0aTqz6z+g32wla4l4WxTrdtvBRmVzYRkYvasA5Z8= +github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= +github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 h1:UhxFibDNY/bfvqU5CAUmr9zpesgbU6SWc8/B4mflAE4= +github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= +github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.1/go.mod h1:AY7fTTXNdv/aJ2O5jwpxAPOWUZ7hQAEvzN5Pf27BkQQ= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v0.6.2/go.mod h1:2t7qjJNvHPx8IjnBOzl9E9/baC+qXE/TeeyBRzgJDws= +github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= +github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= +github.com/fsouza/go-dockerclient v1.7.7/go.mod h1:njNCXvoZj3sLPjf3yO0DPHf1mdLdCPDYPc14GskKA4Y= +github.com/fsouza/go-dockerclient v1.7.8 h1:Tp7IYXyvmZsmrCDffMENOv6l2xN2Aw17EThY8Gokq48= +github.com/fsouza/go-dockerclient v1.7.8/go.mod h1:7cvopLQDrW3dJ5mcx2LzWMBfmpv/fq7MZUEPcQlAtLw= +github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= +github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= +github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= +github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E= +github.com/go-git/go-billy/v5 v5.2.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= +github.com/go-git/go-billy/v5 v5.3.1/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= +github.com/go-git/go-git-fixtures/v4 v4.2.1/go.mod h1:K8zd3kDUAykwTdDCr+I0per6Y6vMiRR/nnVTBtavnB0= +github.com/go-git/go-git/v5 v5.4.2/go.mod h1:gQ1kArt6d+n+BGd+/B/I74HwRTLhth2+zti4ihgckDc= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= +github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e h1:BWhy2j3IXJhjCbC68FptL43tDKIq8FladmaTs3Xs7Z8= +github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= +github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.0.6 h1:mkgN1ofwASrYnJ5W6U/BxG15eXXXjirgZc7CLqkcaro= +github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU= +github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0= +github.com/google/go-intervals v0.0.2 h1:FGrVEiUnTRKR8yE04qzXYaJMtnIYqobR5QbblK3ixcM= +github.com/google/go-intervals v0.0.2/go.mod h1:MkaR3LNRfeKLPmqgJYs4E66z5InYjmCjbbr4TQlcT6Y= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= +github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= +github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= +github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= +github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/hashicorp/consul/api v1.11.0/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= +github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= +github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v1.0.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY= +github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= +github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= +github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= +github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= +github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= +github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/ishidawataru/sctp v0.0.0-20210226210310-f2269e66cdee h1:PAXLXk1heNZ5yokbMBpVLZQxo43wCZxRwl00mX+dd44= +github.com/ishidawataru/sctp v0.0.0-20210226210310-f2269e66cdee/go.mod h1:co9pwDoBCm1kGxawmb4sPq0cSIOOWNPT4KnHotMP1Zg= +github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= +github.com/j-keck/arping v1.0.2/go.mod h1:aJbELhR92bSk7tp79AWM/ftfc90EfEi2bQJrbBFOsPw= +github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= +github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= +github.com/jinzhu/copier v0.3.5 h1:GlvfUwHk62RokgqVNvYsku0TATCF7bAHVwEXoBh3iJg= +github.com/jinzhu/copier v0.3.5/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg= +github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= +github.com/kevinburke/ssh_config v1.1.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.14.2 h1:S0OHlFk/Gbon/yauFJ4FfJJF5V0fc5HbBTJazi28pRw= +github.com/klauspost/compress v1.14.2/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE= +github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo= +github.com/lyft/protoc-gen-star v0.5.3/go.mod h1:V0xaHgaf5oCCqmcxYcWiDfTiKsZsRc87/1qhoTACD8w= +github.com/magefile/mage v1.11.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= +github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/manifoldco/promptui v0.9.0 h1:3V4HzJk1TtXW1MTZMP7mdlwbBpIinw3HztaIlYthEiA= +github.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GWtQEhdbn6Pgg= +github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= +github.com/matryer/is v1.2.0/go.mod h1:2fLPjFQM9rhQ15aVEtbuwhJinnOqrmgXPNdZsdwlWXA= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU= +github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk= +github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= +github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= +github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU= +github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible h1:aKW/4cBs+yK6gpqU3K/oIwk9Q/XICqd3zOX/UFuvqmk= +github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= +github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= +github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= +github.com/moby/sys/mount v0.2.0 h1:WhCW5B355jtxndN5ovugJlMFJawbUODuW8fSnEH6SSM= +github.com/moby/sys/mount v0.2.0/go.mod h1:aAivFE2LB3W4bACsUXChRHQ0qKWsetY4Y9V7sxOougM= +github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/sys/mountinfo v0.5.0 h1:2Ks8/r6lopsxWi9m58nlwjaeSzUX9iiL1vj5qB/9ObI= +github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= +github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= +github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= +github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc= +github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.13.0/go.mod h1:+REjRxOmWfHCjfv9TTWB1jD1Frx4XydAD3zm1lskyM0= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/ginkgo/v2 v2.0.0 h1:CcuG/HvWNkkaqCUpJifQY8z7qEMBJya6aLPx6ftGyjQ= +github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= +github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= +github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0= +github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE= +github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs= +github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.0.2-0.20211123152302-43a7dee1ec31 h1:Wh4aR2I6JFwySre9m3iHJYuMnvUFE/HT6qAXozRWi/E= +github.com/opencontainers/image-spec v1.0.2-0.20211123152302-43a7dee1ec31/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= +github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= +github.com/opencontainers/runc v1.0.3/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= +github.com/opencontainers/runc v1.1.0 h1:O9+X96OcDjkmmZyfaG996kV7yq8HsoU2h1XRRQcefG8= +github.com/opencontainers/runc v1.1.0/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc= +github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 h1:3snG66yBm59tKhhSPQrQ/0bCrv1LQbKt40LnUPiUxdc= +github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= +github.com/opencontainers/runtime-tools v0.9.0 h1:FYgwVsKRI/H9hU32MJ/4MLOzXWodKK5zsQavY8NPMkU= +github.com/opencontainers/runtime-tools v0.9.0/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= +github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= +github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= +github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= +github.com/opencontainers/selinux v1.8.5/go.mod h1:HTvjPFoGMbpQsG886e3lQwnsRWtE4TC1OF3OUvG9FAo= +github.com/opencontainers/selinux v1.10.0 h1:rAiKF8hTcgLI3w0DHm6i0ylVVcOrlgR1kK99DRLDhyU= +github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= +github.com/openshift/imagebuilder v1.2.2 h1:++jWWMkTVJKP2MIjTPaTk2MqwWIOYYlDaQbZyLlLBh0= +github.com/openshift/imagebuilder v1.2.2/go.mod h1:TRYHe4CH9U6nkDjxjBNM5klrLbJBrRbpJE5SaRwUBsQ= +github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913 h1:TnbXhKzrTOyuvWrjI8W6pcoI9XPbLHFXCdN2dtUw7Rw= +github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= +github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= +github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= +github.com/proglottis/gpgme v0.1.1 h1:72xI0pt/hy7pqsRxk32KExITkXp+RZErRizsA+up/lQ= +github.com/proglottis/gpgme v0.1.1/go.mod h1:fPbW/EZ0LvwQtH8Hy7eixhp1eF3G39dtx7GUN+0Gmy0= +github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.1 h1:+4eQaD7vAZ6DsfsxB15hbE0odUjGI5ARs9yskGu1v4s= +github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= +github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= +github.com/safchain/ethtool v0.0.0-20210803160452-9aa261dae9b1/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= +github.com/sagikazarmark/crypt v0.3.0/go.mod h1:uD/D+6UF4SrIR1uGEv7bBNkNqLGqUr43MRiaGWX1Nig= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw= +github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/sebdah/goldie/v2 v2.5.3 h1:9ES/mNN+HNUbNWpVAlrzuZ7jE+Nrczbj8uFRjM7624Y= +github.com/sebdah/goldie/v2 v2.5.3/go.mod h1:oZ9fp0+se1eapSRjfYbsV/0Hqhbuu3bJVvKI/NNtssI= +github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= +github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921 h1:58EBmR2dMNL2n/FnbQewK3D14nXr0V9CObDSvMJLq+Y= +github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= +github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/cobra v1.3.0 h1:R7cSvGu+Vv+qX0gW5R/85dx2kmmJT5z5NM8ifdYjdn0= +github.com/spf13/cobra v1.3.0/go.mod h1:BrRVncBjOJa/eUcVVm9CE+oC6as8k+VYr4NY7WCi9V4= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= +github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= +github.com/spf13/viper v1.10.0/go.mod h1:SoyBPwAtKDzypXNDFKN5kzH7ppppbGZtls1UpIy5AsM= +github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 h1:lIOOHPEbXzO3vnmx2gok1Tfs31Q8GQqKLc8vVqyQq/I= +github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= +github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/sylabs/release-tools v0.1.0/go.mod h1:pqP/z/11/rYMQ0OM/Nn7TxGijw7KfZwW9UolD/J1TUo= +github.com/sylabs/sif/v2 v2.3.1 h1:NHoc/rZpnOS05etmT+j8IJOZP2Cc8zHHG8rKSVosvZs= +github.com/sylabs/sif/v2 v2.3.1/go.mod h1:NnvveH62GiibimL00MrI6YYcZfb7DnZMcRo/40giY+0= +github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI= +github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= +github.com/tchap/go-patricia v2.3.0+incompatible h1:GkY4dP3cEfEASBPPkWd+AmjYxhmDkqO9/zg7R0lSQRs= +github.com/tchap/go-patricia v2.3.0+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/ulikunitz/xz v0.5.10 h1:t92gobL9l3HE202wg3rlk19F6X+JOxl9BBrCCMYEYd8= +github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli v1.22.4/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/vbatts/tar-split v0.11.2 h1:Via6XqJr0hceW4wff3QRzD5gAk/tatMw/4ZA7cTlIME= +github.com/vbatts/tar-split v0.11.2/go.mod h1:vV3ZuO2yWSVsz+pfFzDG/upWH1JhjOiEaWq6kXyQ3VI= +github.com/vbauerster/mpb/v7 v7.3.2 h1:tCuxMy8G9cLdjb61b6wO7I1vRT/LyMEzRbr3xCC0JPU= +github.com/vbauerster/mpb/v7 v7.3.2/go.mod h1:wfxIZcOJq/bG1/lAtfzMXcOiSvbqVi/5GX5WCSi+IsA= +github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= +github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= +github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= +github.com/vishvananda/netlink v1.1.1-0.20210330154013-f5de75959ad5 h1:+UB2BJA852UkGH42H+Oee69djmxS3ANzl2b/JtT1YiA= +github.com/vishvananda/netlink v1.1.1-0.20210330154013-f5de75959ad5/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= +github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= +github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= +github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= +github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f h1:p4VB7kIXpOQvVn1ZaTIVp+3vuYAXFe3OJEvjbUYJLaA= +github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= +github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= +github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= +github.com/xanzy/ssh-agent v0.3.0/go.mod h1:3s9xbODqPuuhK9JV1R321M/FlMZSBvE5aY6eAcqrDh0= +github.com/xanzy/ssh-agent v0.3.1/go.mod h1:QIE4lCeL7nkC25x+yA3LBIYfwCc1TFziCtG7cBAac6w= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonpointer v0.0.0-20190809123943-df4f5c81cb3b h1:6cLsL+2FW6dRAdl5iMtHgRogVCff0QpRi9653YmdcJA= +github.com/xeipuuv/gojsonpointer v0.0.0-20190809123943-df4f5c81cb3b/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= +github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= +github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= +github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= +go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= +go.etcd.io/etcd/api/v3 v3.5.1/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= +go.etcd.io/etcd/client/pkg/v3 v3.5.1/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/v2 v2.305.1/go.mod h1:pMEacxZW7o8pg4CrFE7pquyCJJzZvkvdD2RibOCCCGs= +go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1 h1:A/5uWzF44DlIgdm/PQFwfMkW0JX+cIcQi/SwLAmZP5M= +go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= +golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4 h1:kUhD7nTDoI3fVd9G4ORWrbV5NY0liEs/Jg2pv5f+bBA= +golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210326060303-6b1517762897/go.mod h1:uSPa2vr4CLtc/ILN5odXGNXS6mhrKVzTaCXzk9m6W3k= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210929193557-e81a3d93ecf6/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220114011407-0dd24b26b47d h1:1n1fc535VhN8SYtD4cDUyNlfpAF2ROMM9+11equK3hs= +golang.org/x/net v0.0.0-20220114011407-0dd24b26b47d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190522044717-8097e1b27ff5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210502180810-71e4cd670f79/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210820121016-41cdb8703e55/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211001092434-39dca1131b70/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= +golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b h1:9zKuko04nR4gjZ4+DNjHqRlAJqbJETHwiNKDqTfOjfE= +golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e h1:EHBhcS0mlXEAVwNyO2dLfjToGsyY4j24pTs2ScHnX7s= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20200916195026-c9a70fc28ce3/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= +google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= +google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/api v0.59.0/go.mod h1:sT2boj7M9YJxZzgeZqXogmhfmRWDtPzT31xkieUbuZU= +google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= +google.golang.org/api v0.62.0/go.mod h1:dKmwPCydfsad4qCH08MSdgWjfHOyfpd4VtDGgRFdavw= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190522204451-c2c4e71fbf69/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= +google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211008145708-270636b82663/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211028162531-8db9c33dc351/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211129164237-f09f9a12af12/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211203200212-54befc351ae9/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa h1:I0YcKz0I7OAhddo7ya8kMnvprhcWM045PmkBdMO9zN0= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.42.0 h1:XT2/MFpuPFsEX2fWh3YQtHkZ+WYZFQRfaUgLZYj/p6A= +google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.66.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w= +gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= +gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= +gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= +k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= +k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= +k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= +k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= +k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= +k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= +k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= +k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= +k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= +k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= +k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= +k8s.io/code-generator v0.19.7/go.mod h1:lwEq3YnLYb/7uVXLorOJfxg+cUu2oihFhHZ0n9NIla0= +k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= +k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= +k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM= +k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM= +k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= +k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= +k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc= +k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= +k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= +k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= +k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= +k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= +k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/vendor/github.com/containers/buildah/image.go b/vendor/github.com/containers/buildah/image.go new file mode 100644 index 00000000000..e859de183db --- /dev/null +++ b/vendor/github.com/containers/buildah/image.go @@ -0,0 +1,815 @@ +package buildah + +import ( + "archive/tar" + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" + + "github.com/containers/buildah/copier" + "github.com/containers/buildah/define" + "github.com/containers/buildah/docker" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/image" + "github.com/containers/image/v5/manifest" + is "github.com/containers/image/v5/storage" + "github.com/containers/image/v5/types" + "github.com/containers/storage" + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/ioutils" + digest "github.com/opencontainers/go-digest" + specs "github.com/opencontainers/image-spec/specs-go" + v1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const ( + // OCIv1ImageManifest is the MIME type of an OCIv1 image manifest, + // suitable for specifying as a value of the PreferredManifestType + // member of a CommitOptions structure. It is also the default. + OCIv1ImageManifest = define.OCIv1ImageManifest + // Dockerv2ImageManifest is the MIME type of a Docker v2s2 image + // manifest, suitable for specifying as a value of the + // PreferredManifestType member of a CommitOptions structure. + Dockerv2ImageManifest = define.Dockerv2ImageManifest +) + +type containerImageRef struct { + fromImageName string + fromImageID string + store storage.Store + compression archive.Compression + name reference.Named + names []string + containerID string + mountLabel string + layerID string + oconfig []byte + dconfig []byte + created *time.Time + createdBy string + historyComment string + annotations map[string]string + preferredManifestType string + squash bool + emptyLayer bool + idMappingOptions *define.IDMappingOptions + parent string + blobDirectory string + preEmptyLayers []v1.History + postEmptyLayers []v1.History +} + +type blobLayerInfo struct { + ID string + Size int64 +} + +type containerImageSource struct { + path string + ref *containerImageRef + store storage.Store + containerID string + mountLabel string + layerID string + names []string + compression archive.Compression + config []byte + configDigest digest.Digest + manifest []byte + manifestType string + blobDirectory string + blobLayers map[digest.Digest]blobLayerInfo +} + +func (i *containerImageRef) NewImage(ctx context.Context, sc *types.SystemContext) (types.ImageCloser, error) { + src, err := i.NewImageSource(ctx, sc) + if err != nil { + return nil, err + } + return image.FromSource(ctx, sc, src) +} + +func expectedOCIDiffIDs(image v1.Image) int { + expected := 0 + for _, history := range image.History { + if !history.EmptyLayer { + expected = expected + 1 + } + } + return expected +} + +func expectedDockerDiffIDs(image docker.V2Image) int { + expected := 0 + for _, history := range image.History { + if !history.EmptyLayer { + expected = expected + 1 + } + } + return expected +} + +// Compute the media types which we need to attach to a layer, given the type of +// compression that we'll be applying. +func computeLayerMIMEType(what string, layerCompression archive.Compression) (omediaType, dmediaType string, err error) { + omediaType = v1.MediaTypeImageLayer + dmediaType = docker.V2S2MediaTypeUncompressedLayer + if layerCompression != archive.Uncompressed { + switch layerCompression { + case archive.Gzip: + omediaType = v1.MediaTypeImageLayerGzip + dmediaType = manifest.DockerV2Schema2LayerMediaType + logrus.Debugf("compressing %s with gzip", what) + case archive.Bzip2: + // Until the image specs define a media type for bzip2-compressed layers, even if we know + // how to decompress them, we can't try to compress layers with bzip2. + return "", "", errors.New("media type for bzip2-compressed layers is not defined") + case archive.Xz: + // Until the image specs define a media type for xz-compressed layers, even if we know + // how to decompress them, we can't try to compress layers with xz. + return "", "", errors.New("media type for xz-compressed layers is not defined") + case archive.Zstd: + // Until the image specs define a media type for zstd-compressed layers, even if we know + // how to decompress them, we can't try to compress layers with zstd. + return "", "", errors.New("media type for zstd-compressed layers is not defined") + default: + logrus.Debugf("compressing %s with unknown compressor(?)", what) + } + } + return omediaType, dmediaType, nil +} + +// Extract the container's whole filesystem as if it were a single layer. +func (i *containerImageRef) extractRootfs() (io.ReadCloser, chan error, error) { + var uidMap, gidMap []idtools.IDMap + mountPoint, err := i.store.Mount(i.containerID, i.mountLabel) + if err != nil { + return nil, nil, errors.Wrapf(err, "error mounting container %q", i.containerID) + } + pipeReader, pipeWriter := io.Pipe() + errChan := make(chan error, 1) + go func() { + defer close(errChan) + if i.idMappingOptions != nil { + uidMap, gidMap = convertRuntimeIDMaps(i.idMappingOptions.UIDMap, i.idMappingOptions.GIDMap) + } + copierOptions := copier.GetOptions{ + UIDMap: uidMap, + GIDMap: gidMap, + } + err = copier.Get(mountPoint, mountPoint, copierOptions, []string{"."}, pipeWriter) + errChan <- err + pipeWriter.Close() + + }() + return ioutils.NewReadCloserWrapper(pipeReader, func() error { + if err = pipeReader.Close(); err != nil { + err = errors.Wrapf(err, "error closing tar archive of container %q", i.containerID) + } + if _, err2 := i.store.Unmount(i.containerID, false); err == nil { + if err2 != nil { + err2 = errors.Wrapf(err2, "error unmounting container %q", i.containerID) + } + err = err2 + } + return err + }), errChan, nil +} + +// Build fresh copies of the container configuration structures so that we can edit them +// without making unintended changes to the original Builder. +func (i *containerImageRef) createConfigsAndManifests() (v1.Image, v1.Manifest, docker.V2Image, docker.V2S2Manifest, error) { + created := time.Now().UTC() + if i.created != nil { + created = *i.created + } + + // Build an empty image, and then decode over it. + oimage := v1.Image{} + if err := json.Unmarshal(i.oconfig, &oimage); err != nil { + return v1.Image{}, v1.Manifest{}, docker.V2Image{}, docker.V2S2Manifest{}, err + } + // Always replace this value, since we're newer than our base image. + oimage.Created = &created + // Clear the list of diffIDs, since we always repopulate it. + oimage.RootFS.Type = docker.TypeLayers + oimage.RootFS.DiffIDs = []digest.Digest{} + // Only clear the history if we're squashing, otherwise leave it be so that we can append + // entries to it. + if i.squash { + oimage.History = []v1.History{} + } + + // Build an empty image, and then decode over it. + dimage := docker.V2Image{} + if err := json.Unmarshal(i.dconfig, &dimage); err != nil { + return v1.Image{}, v1.Manifest{}, docker.V2Image{}, docker.V2S2Manifest{}, err + } + dimage.Parent = docker.ID(i.parent) + dimage.Container = i.containerID + if dimage.Config != nil { + dimage.ContainerConfig = *dimage.Config + } + // Always replace this value, since we're newer than our base image. + dimage.Created = created + // Clear the list of diffIDs, since we always repopulate it. + dimage.RootFS = &docker.V2S2RootFS{} + dimage.RootFS.Type = docker.TypeLayers + dimage.RootFS.DiffIDs = []digest.Digest{} + // Only clear the history if we're squashing, otherwise leave it be so + // that we can append entries to it. Clear the parent, too, we no + // longer include its layers and history. + if i.squash { + dimage.Parent = "" + dimage.History = []docker.V2S2History{} + } + + // Build empty manifests. The Layers lists will be populated later. + omanifest := v1.Manifest{ + Versioned: specs.Versioned{ + SchemaVersion: 2, + }, + MediaType: v1.MediaTypeImageManifest, + Config: v1.Descriptor{ + MediaType: v1.MediaTypeImageConfig, + }, + Layers: []v1.Descriptor{}, + Annotations: i.annotations, + } + + dmanifest := docker.V2S2Manifest{ + V2Versioned: docker.V2Versioned{ + SchemaVersion: 2, + MediaType: manifest.DockerV2Schema2MediaType, + }, + Config: docker.V2S2Descriptor{ + MediaType: manifest.DockerV2Schema2ConfigMediaType, + }, + Layers: []docker.V2S2Descriptor{}, + } + + return oimage, omanifest, dimage, dmanifest, nil +} + +func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.SystemContext) (src types.ImageSource, err error) { + // Decide which type of manifest and configuration output we're going to provide. + manifestType := i.preferredManifestType + // If it's not a format we support, return an error. + if manifestType != v1.MediaTypeImageManifest && manifestType != manifest.DockerV2Schema2MediaType { + return nil, errors.Errorf("no supported manifest types (attempted to use %q, only know %q and %q)", + manifestType, v1.MediaTypeImageManifest, manifest.DockerV2Schema2MediaType) + } + // Start building the list of layers using the read-write layer. + layers := []string{} + layerID := i.layerID + layer, err := i.store.Layer(layerID) + if err != nil { + return nil, errors.Wrapf(err, "unable to read layer %q", layerID) + } + // Walk the list of parent layers, prepending each as we go. If we're squashing, + // stop at the layer ID of the top layer, which we won't really be using anyway. + for layer != nil { + layers = append(append([]string{}, layerID), layers...) + layerID = layer.Parent + if layerID == "" || i.squash { + err = nil + break + } + layer, err = i.store.Layer(layerID) + if err != nil { + return nil, errors.Wrapf(err, "unable to read layer %q", layerID) + } + } + logrus.Debugf("layer list: %q", layers) + + // Make a temporary directory to hold blobs. + path, err := ioutil.TempDir(os.TempDir(), define.Package) + if err != nil { + return nil, errors.Wrapf(err, "error creating temporary directory to hold layer blobs") + } + logrus.Debugf("using %q to hold temporary data", path) + defer func() { + if src == nil { + err2 := os.RemoveAll(path) + if err2 != nil { + logrus.Errorf("error removing layer blob directory: %v", err) + } + } + }() + + // Build fresh copies of the configurations and manifest so that we don't mess with any + // values in the Builder object itself. + oimage, omanifest, dimage, dmanifest, err := i.createConfigsAndManifests() + if err != nil { + return nil, err + } + + // Extract each layer and compute its digests, both compressed (if requested) and uncompressed. + blobLayers := make(map[digest.Digest]blobLayerInfo) + for _, layerID := range layers { + what := fmt.Sprintf("layer %q", layerID) + if i.squash { + what = fmt.Sprintf("container %q", i.containerID) + } + // The default layer media type assumes no compression. + omediaType := v1.MediaTypeImageLayer + dmediaType := docker.V2S2MediaTypeUncompressedLayer + // Look up this layer. + layer, err := i.store.Layer(layerID) + if err != nil { + return nil, errors.Wrapf(err, "unable to locate layer %q", layerID) + } + // If we're up to the final layer, but we don't want to include + // a diff for it, we're done. + if i.emptyLayer && layerID == i.layerID { + continue + } + // If we already know the digest of the contents of parent + // layers, reuse their blobsums, diff IDs, and sizes. + if !i.squash && layerID != i.layerID && layer.UncompressedDigest != "" { + layerBlobSum := layer.UncompressedDigest + layerBlobSize := layer.UncompressedSize + diffID := layer.UncompressedDigest + // Note this layer in the manifest, using the appropriate blobsum. + olayerDescriptor := v1.Descriptor{ + MediaType: omediaType, + Digest: layerBlobSum, + Size: layerBlobSize, + } + omanifest.Layers = append(omanifest.Layers, olayerDescriptor) + dlayerDescriptor := docker.V2S2Descriptor{ + MediaType: dmediaType, + Digest: layerBlobSum, + Size: layerBlobSize, + } + dmanifest.Layers = append(dmanifest.Layers, dlayerDescriptor) + // Note this layer in the list of diffIDs, again using the uncompressed digest. + oimage.RootFS.DiffIDs = append(oimage.RootFS.DiffIDs, diffID) + dimage.RootFS.DiffIDs = append(dimage.RootFS.DiffIDs, diffID) + blobLayers[diffID] = blobLayerInfo{ + ID: layer.ID, + Size: layerBlobSize, + } + continue + } + // Figure out if we need to change the media type, in case we've changed the compression. + omediaType, dmediaType, err = computeLayerMIMEType(what, i.compression) + if err != nil { + return nil, err + } + // Start reading either the layer or the whole container rootfs. + noCompression := archive.Uncompressed + diffOptions := &storage.DiffOptions{ + Compression: &noCompression, + } + var rc io.ReadCloser + var errChan chan error + if i.squash { + // Extract the root filesystem as a single layer. + rc, errChan, err = i.extractRootfs() + if err != nil { + return nil, err + } + } else { + // Extract this layer, one of possibly many. + rc, err = i.store.Diff("", layerID, diffOptions) + if err != nil { + return nil, errors.Wrapf(err, "error extracting %s", what) + } + } + srcHasher := digest.Canonical.Digester() + // Set up to write the possibly-recompressed blob. + layerFile, err := os.OpenFile(filepath.Join(path, "layer"), os.O_CREATE|os.O_WRONLY, 0600) + if err != nil { + rc.Close() + return nil, errors.Wrapf(err, "error opening file for %s", what) + } + + counter := ioutils.NewWriteCounter(layerFile) + var destHasher digest.Digester + var multiWriter io.Writer + // Avoid rehashing when we do not compress. + if i.compression != archive.Uncompressed { + destHasher = digest.Canonical.Digester() + multiWriter = io.MultiWriter(counter, destHasher.Hash()) + } else { + destHasher = srcHasher + multiWriter = counter + } + // Compress the layer, if we're recompressing it. + writeCloser, err := archive.CompressStream(multiWriter, i.compression) + if err != nil { + layerFile.Close() + rc.Close() + return nil, errors.Wrapf(err, "error compressing %s", what) + } + writer := io.MultiWriter(writeCloser, srcHasher.Hash()) + // Use specified timestamps in the layer, if we're doing that for + // history entries. + if i.created != nil { + nestedWriteCloser := ioutils.NewWriteCloserWrapper(writer, writeCloser.Close) + writeCloser = newTarFilterer(nestedWriteCloser, func(hdr *tar.Header) (bool, bool, io.Reader) { + // Changing a zeroed field to a non-zero field + // can affect the format that the library uses + // for writing the header, so only change + // fields that are already set to avoid + // changing the format (and as a result, + // changing the length) of the header that we + // write. + if !hdr.ModTime.IsZero() { + hdr.ModTime = *i.created + } + if !hdr.AccessTime.IsZero() { + hdr.AccessTime = *i.created + } + if !hdr.ChangeTime.IsZero() { + hdr.ChangeTime = *i.created + } + return false, false, nil + }) + writer = io.Writer(writeCloser) + } + size, err := io.Copy(writer, rc) + writeCloser.Close() + layerFile.Close() + rc.Close() + + if errChan != nil { + err = <-errChan + if err != nil { + return nil, err + } + } + + if err != nil { + return nil, errors.Wrapf(err, "error storing %s to file", what) + } + if i.compression == archive.Uncompressed { + if size != counter.Count { + return nil, errors.Errorf("error storing %s to file: inconsistent layer size (copied %d, wrote %d)", what, size, counter.Count) + } + } else { + size = counter.Count + } + logrus.Debugf("%s size is %d bytes, uncompressed digest %s, possibly-compressed digest %s", what, size, srcHasher.Digest().String(), destHasher.Digest().String()) + // Rename the layer so that we can more easily find it by digest later. + finalBlobName := filepath.Join(path, destHasher.Digest().String()) + if err = os.Rename(filepath.Join(path, "layer"), finalBlobName); err != nil { + return nil, errors.Wrapf(err, "error storing %s to file while renaming %q to %q", what, filepath.Join(path, "layer"), finalBlobName) + } + // Add a note in the manifest about the layer. The blobs are identified by their possibly- + // compressed blob digests. + olayerDescriptor := v1.Descriptor{ + MediaType: omediaType, + Digest: destHasher.Digest(), + Size: size, + } + omanifest.Layers = append(omanifest.Layers, olayerDescriptor) + dlayerDescriptor := docker.V2S2Descriptor{ + MediaType: dmediaType, + Digest: destHasher.Digest(), + Size: size, + } + dmanifest.Layers = append(dmanifest.Layers, dlayerDescriptor) + // Add a note about the diffID, which is always the layer's uncompressed digest. + oimage.RootFS.DiffIDs = append(oimage.RootFS.DiffIDs, srcHasher.Digest()) + dimage.RootFS.DiffIDs = append(dimage.RootFS.DiffIDs, srcHasher.Digest()) + } + + // Build history notes in the image configurations. + appendHistory := func(history []v1.History) { + for i := range history { + var created *time.Time + if history[i].Created != nil { + copiedTimestamp := *history[i].Created + created = &copiedTimestamp + } + onews := v1.History{ + Created: created, + CreatedBy: history[i].CreatedBy, + Author: history[i].Author, + Comment: history[i].Comment, + EmptyLayer: true, + } + oimage.History = append(oimage.History, onews) + if created == nil { + created = &time.Time{} + } + dnews := docker.V2S2History{ + Created: *created, + CreatedBy: history[i].CreatedBy, + Author: history[i].Author, + Comment: history[i].Comment, + EmptyLayer: true, + } + dimage.History = append(dimage.History, dnews) + } + } + appendHistory(i.preEmptyLayers) + created := time.Now().UTC() + if i.created != nil { + created = (*i.created).UTC() + } + comment := i.historyComment + // Add a comment for which base image is being used + if strings.Contains(i.parent, i.fromImageID) && i.fromImageName != i.fromImageID { + comment += "FROM " + i.fromImageName + } + onews := v1.History{ + Created: &created, + CreatedBy: i.createdBy, + Author: oimage.Author, + Comment: comment, + EmptyLayer: i.emptyLayer, + } + oimage.History = append(oimage.History, onews) + dnews := docker.V2S2History{ + Created: created, + CreatedBy: i.createdBy, + Author: dimage.Author, + Comment: comment, + EmptyLayer: i.emptyLayer, + } + dimage.History = append(dimage.History, dnews) + appendHistory(i.postEmptyLayers) + + // Sanity check that we didn't just create a mismatch between non-empty layers in the + // history and the number of diffIDs. + expectedDiffIDs := expectedOCIDiffIDs(oimage) + if len(oimage.RootFS.DiffIDs) != expectedDiffIDs { + return nil, errors.Errorf("internal error: history lists %d non-empty layers, but we have %d layers on disk", expectedDiffIDs, len(oimage.RootFS.DiffIDs)) + } + expectedDiffIDs = expectedDockerDiffIDs(dimage) + if len(dimage.RootFS.DiffIDs) != expectedDiffIDs { + return nil, errors.Errorf("internal error: history lists %d non-empty layers, but we have %d layers on disk", expectedDiffIDs, len(dimage.RootFS.DiffIDs)) + } + + // Encode the image configuration blob. + oconfig, err := json.Marshal(&oimage) + if err != nil { + return nil, errors.Wrapf(err, "error encoding %#v as json", oimage) + } + logrus.Debugf("OCIv1 config = %s", oconfig) + + // Add the configuration blob to the manifest. + omanifest.Config.Digest = digest.Canonical.FromBytes(oconfig) + omanifest.Config.Size = int64(len(oconfig)) + omanifest.Config.MediaType = v1.MediaTypeImageConfig + + // Encode the manifest. + omanifestbytes, err := json.Marshal(&omanifest) + if err != nil { + return nil, errors.Wrapf(err, "error encoding %#v as json", omanifest) + } + logrus.Debugf("OCIv1 manifest = %s", omanifestbytes) + + // Encode the image configuration blob. + dconfig, err := json.Marshal(&dimage) + if err != nil { + return nil, errors.Wrapf(err, "error encoding %#v as json", dimage) + } + logrus.Debugf("Docker v2s2 config = %s", dconfig) + + // Add the configuration blob to the manifest. + dmanifest.Config.Digest = digest.Canonical.FromBytes(dconfig) + dmanifest.Config.Size = int64(len(dconfig)) + dmanifest.Config.MediaType = manifest.DockerV2Schema2ConfigMediaType + + // Encode the manifest. + dmanifestbytes, err := json.Marshal(&dmanifest) + if err != nil { + return nil, errors.Wrapf(err, "error encoding %#v as json", dmanifest) + } + logrus.Debugf("Docker v2s2 manifest = %s", dmanifestbytes) + + // Decide which manifest and configuration blobs we'll actually output. + var config []byte + var imageManifest []byte + switch manifestType { + case v1.MediaTypeImageManifest: + imageManifest = omanifestbytes + config = oconfig + case manifest.DockerV2Schema2MediaType: + imageManifest = dmanifestbytes + config = dconfig + default: + panic("unreachable code: unsupported manifest type") + } + src = &containerImageSource{ + path: path, + ref: i, + store: i.store, + containerID: i.containerID, + mountLabel: i.mountLabel, + layerID: i.layerID, + names: i.names, + compression: i.compression, + config: config, + configDigest: digest.Canonical.FromBytes(config), + manifest: imageManifest, + manifestType: manifestType, + blobDirectory: i.blobDirectory, + blobLayers: blobLayers, + } + return src, nil +} + +func (i *containerImageRef) NewImageDestination(ctx context.Context, sc *types.SystemContext) (types.ImageDestination, error) { + return nil, errors.Errorf("can't write to a container") +} + +func (i *containerImageRef) DockerReference() reference.Named { + return i.name +} + +func (i *containerImageRef) StringWithinTransport() string { + if len(i.names) > 0 { + return i.names[0] + } + return "" +} + +func (i *containerImageRef) DeleteImage(context.Context, *types.SystemContext) error { + // we were never here + return nil +} + +func (i *containerImageRef) PolicyConfigurationIdentity() string { + return "" +} + +func (i *containerImageRef) PolicyConfigurationNamespaces() []string { + return nil +} + +func (i *containerImageRef) Transport() types.ImageTransport { + return is.Transport +} + +func (i *containerImageSource) Close() error { + err := os.RemoveAll(i.path) + if err != nil { + return errors.Wrapf(err, "error removing layer blob directory") + } + return nil +} + +func (i *containerImageSource) Reference() types.ImageReference { + return i.ref +} + +func (i *containerImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + return nil, nil +} + +func (i *containerImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { + return i.manifest, i.manifestType, nil +} + +func (i *containerImageSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) { + return nil, nil +} + +func (i *containerImageSource) HasThreadSafeGetBlob() bool { + return false +} + +func (i *containerImageSource) GetBlob(ctx context.Context, blob types.BlobInfo, cache types.BlobInfoCache) (reader io.ReadCloser, size int64, err error) { + if blob.Digest == i.configDigest { + logrus.Debugf("start reading config") + reader := bytes.NewReader(i.config) + closer := func() error { + logrus.Debugf("finished reading config") + return nil + } + return ioutils.NewReadCloserWrapper(reader, closer), reader.Size(), nil + } + var layerReadCloser io.ReadCloser + size = -1 + if blobLayerInfo, ok := i.blobLayers[blob.Digest]; ok { + noCompression := archive.Uncompressed + diffOptions := &storage.DiffOptions{ + Compression: &noCompression, + } + layerReadCloser, err = i.store.Diff("", blobLayerInfo.ID, diffOptions) + size = blobLayerInfo.Size + } else { + for _, blobDir := range []string{i.blobDirectory, i.path} { + var layerFile *os.File + layerFile, err = os.OpenFile(filepath.Join(blobDir, blob.Digest.String()), os.O_RDONLY, 0600) + if err == nil { + st, err := layerFile.Stat() + if err != nil { + logrus.Warnf("error reading size of layer file %q: %v", blob.Digest.String(), err) + } else { + size = st.Size() + layerReadCloser = layerFile + break + } + layerFile.Close() + } + if !os.IsNotExist(err) { + logrus.Debugf("error checking for layer %q in %q: %v", blob.Digest.String(), blobDir, err) + } + } + } + if err != nil || layerReadCloser == nil || size == -1 { + logrus.Debugf("error reading layer %q: %v", blob.Digest.String(), err) + return nil, -1, errors.Wrap(err, "error opening layer blob") + } + logrus.Debugf("reading layer %q", blob.Digest.String()) + closer := func() error { + logrus.Debugf("finished reading layer %q", blob.Digest.String()) + if err := layerReadCloser.Close(); err != nil { + return errors.Wrapf(err, "error closing layer %q after reading", blob.Digest.String()) + } + return nil + } + return ioutils.NewReadCloserWrapper(layerReadCloser, closer), size, nil +} + +func (b *Builder) makeImageRef(options CommitOptions) (types.ImageReference, error) { + var name reference.Named + container, err := b.store.Container(b.ContainerID) + if err != nil { + return nil, errors.Wrapf(err, "error locating container %q", b.ContainerID) + } + if len(container.Names) > 0 { + if parsed, err2 := reference.ParseNamed(container.Names[0]); err2 == nil { + name = parsed + } + } + manifestType := options.PreferredManifestType + if manifestType == "" { + manifestType = define.OCIv1ImageManifest + } + + for _, u := range options.UnsetEnvs { + b.UnsetEnv(u) + } + oconfig, err := json.Marshal(&b.OCIv1) + if err != nil { + return nil, errors.Wrapf(err, "error encoding OCI-format image configuration %#v", b.OCIv1) + } + dconfig, err := json.Marshal(&b.Docker) + if err != nil { + return nil, errors.Wrapf(err, "error encoding docker-format image configuration %#v", b.Docker) + } + var created *time.Time + if options.HistoryTimestamp != nil { + historyTimestampUTC := options.HistoryTimestamp.UTC() + created = &historyTimestampUTC + } + createdBy := b.CreatedBy() + if createdBy == "" { + createdBy = strings.Join(b.Shell(), " ") + if createdBy == "" { + createdBy = "/bin/sh" + } + } + + parent := "" + if b.FromImageID != "" { + parentDigest := digest.NewDigestFromEncoded(digest.Canonical, b.FromImageID) + if parentDigest.Validate() == nil { + parent = parentDigest.String() + } + } + + ref := &containerImageRef{ + fromImageName: b.FromImage, + fromImageID: b.FromImageID, + store: b.store, + compression: options.Compression, + name: name, + names: container.Names, + containerID: container.ID, + mountLabel: b.MountLabel, + layerID: container.LayerID, + oconfig: oconfig, + dconfig: dconfig, + created: created, + createdBy: createdBy, + historyComment: b.HistoryComment(), + annotations: b.Annotations(), + preferredManifestType: manifestType, + squash: options.Squash, + emptyLayer: options.EmptyLayer && !options.Squash, + idMappingOptions: &b.IDMappingOptions, + parent: parent, + blobDirectory: options.BlobDirectory, + preEmptyLayers: b.PrependedEmptyLayers, + postEmptyLayers: b.AppendedEmptyLayers, + } + return ref, nil +} diff --git a/vendor/github.com/containers/buildah/imagebuildah/build.go b/vendor/github.com/containers/buildah/imagebuildah/build.go new file mode 100644 index 00000000000..77d8b6d5473 --- /dev/null +++ b/vendor/github.com/containers/buildah/imagebuildah/build.go @@ -0,0 +1,651 @@ +package imagebuildah + +import ( + "bytes" + "context" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" + "sync" + + "github.com/containerd/containerd/platforms" + "github.com/containers/buildah/define" + "github.com/containers/buildah/util" + "github.com/containers/common/libimage" + "github.com/containers/common/pkg/config" + "github.com/containers/image/v5/docker" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/shortnames" + istorage "github.com/containers/image/v5/storage" + "github.com/containers/image/v5/types" + "github.com/containers/storage" + "github.com/containers/storage/pkg/archive" + "github.com/hashicorp/go-multierror" + v1 "github.com/opencontainers/image-spec/specs-go/v1" + specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/openshift/imagebuilder" + "github.com/openshift/imagebuilder/dockerfile/parser" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sync/semaphore" +) + +const ( + PullIfMissing = define.PullIfMissing + PullAlways = define.PullAlways + PullIfNewer = define.PullIfNewer + PullNever = define.PullNever + + Gzip = archive.Gzip + Bzip2 = archive.Bzip2 + Xz = archive.Xz + Zstd = archive.Zstd + Uncompressed = archive.Uncompressed +) + +// Mount is a mountpoint for the build container. +type Mount = specs.Mount + +type BuildOptions = define.BuildOptions + +// BuildDockerfiles parses a set of one or more Dockerfiles (which may be +// URLs), creates one or more new Executors, and then runs +// Prepare/Execute/Commit/Delete over the entire set of instructions. +// If the Manifest option is set, returns the ID of the manifest list, else it +// returns the ID of the built image, and if a name was assigned to it, a +// canonical reference for that image. +func BuildDockerfiles(ctx context.Context, store storage.Store, options define.BuildOptions, paths ...string) (id string, ref reference.Canonical, err error) { + if options.CommonBuildOpts == nil { + options.CommonBuildOpts = &define.CommonBuildOptions{} + } + + if len(paths) == 0 { + return "", nil, errors.Errorf("error building: no dockerfiles specified") + } + if len(options.Platforms) > 1 && options.IIDFile != "" { + return "", nil, errors.Errorf("building multiple images, but iidfile %q can only be used to store one image ID", options.IIDFile) + } + + logger := logrus.New() + if options.Err != nil { + logger.SetOutput(options.Err) + } else { + logger.SetOutput(os.Stderr) + } + logger.SetLevel(logrus.GetLevel()) + + var dockerfiles []io.ReadCloser + defer func(dockerfiles ...io.ReadCloser) { + for _, d := range dockerfiles { + d.Close() + } + }(dockerfiles...) + + for _, tag := range append([]string{options.Output}, options.AdditionalTags...) { + if tag == "" { + continue + } + if _, err := util.VerifyTagName(tag); err != nil { + return "", nil, errors.Wrapf(err, "tag %s", tag) + } + } + + for _, dfile := range paths { + var data io.ReadCloser + + if strings.HasPrefix(dfile, "http://") || strings.HasPrefix(dfile, "https://") { + logger.Debugf("reading remote Dockerfile %q", dfile) + resp, err := http.Get(dfile) + if err != nil { + return "", nil, err + } + if resp.ContentLength == 0 { + resp.Body.Close() + return "", nil, errors.Errorf("no contents in %q", dfile) + } + data = resp.Body + } else { + dinfo, err := os.Stat(dfile) + if err != nil { + // If the Dockerfile isn't available, try again with + // context directory prepended (if not prepended yet). + if !strings.HasPrefix(dfile, options.ContextDirectory) { + dfile = filepath.Join(options.ContextDirectory, dfile) + dinfo, err = os.Stat(dfile) + } + } + if err != nil { + return "", nil, err + } + + var contents *os.File + // If given a directory, add '/Dockerfile' to it. + if dinfo.Mode().IsDir() { + for _, file := range []string{"Containerfile", "Dockerfile"} { + f := filepath.Join(dfile, file) + logger.Debugf("reading local %q", f) + contents, err = os.Open(f) + if err == nil { + break + } + } + } else { + contents, err = os.Open(dfile) + } + + if err != nil { + return "", nil, err + } + dinfo, err = contents.Stat() + if err != nil { + contents.Close() + return "", nil, errors.Wrapf(err, "error reading info about %q", dfile) + } + if dinfo.Mode().IsRegular() && dinfo.Size() == 0 { + contents.Close() + return "", nil, errors.Errorf("no contents in %q", dfile) + } + data = contents + } + + // pre-process Dockerfiles with ".in" suffix + if strings.HasSuffix(dfile, ".in") { + pData, err := preprocessContainerfileContents(logger, dfile, data, options.ContextDirectory) + if err != nil { + return "", nil, err + } + data = ioutil.NopCloser(pData) + } + + dockerfiles = append(dockerfiles, data) + } + + var files [][]byte + for _, dockerfile := range dockerfiles { + var b bytes.Buffer + if _, err := b.ReadFrom(dockerfile); err != nil { + return "", nil, err + } + files = append(files, b.Bytes()) + } + + if options.JobSemaphore == nil { + if options.Jobs != nil { + if *options.Jobs < 0 { + return "", nil, errors.New("error building: invalid value for jobs. It must be a positive integer") + } + if *options.Jobs > 0 { + options.JobSemaphore = semaphore.NewWeighted(int64(*options.Jobs)) + } + } else { + options.JobSemaphore = semaphore.NewWeighted(1) + } + } + + manifestList := options.Manifest + options.Manifest = "" + type instance struct { + v1.Platform + ID string + } + var instances []instance + var instancesLock sync.Mutex + + var builds multierror.Group + if options.SystemContext == nil { + options.SystemContext = &types.SystemContext{} + } + + if len(options.Platforms) == 0 { + options.Platforms = append(options.Platforms, struct{ OS, Arch, Variant string }{ + OS: options.SystemContext.OSChoice, + Arch: options.SystemContext.ArchitectureChoice, + }) + } + + if options.AllPlatforms { + options.Platforms, err = platformsForBaseImages(ctx, logger, paths, files, options.From, options.Args, options.SystemContext) + if err != nil { + return "", nil, err + } + } + + systemContext := options.SystemContext + for _, platform := range options.Platforms { + platformContext := *systemContext + platformSpec := platforms.Normalize(v1.Platform{ + OS: platform.OS, + Architecture: platform.Arch, + Variant: platform.Variant, + }) + // platforms.Normalize converts an empty os value to GOOS + // so we have to check the original value here to not overwrite the default for no reason + if platform.OS != "" { + platformContext.OSChoice = platformSpec.OS + } + if platform.Arch != "" { + platformContext.ArchitectureChoice = platformSpec.Architecture + platformContext.VariantChoice = platformSpec.Variant + } + platformOptions := options + platformOptions.SystemContext = &platformContext + platformOptions.OS = platformContext.OSChoice + platformOptions.Architecture = platformContext.ArchitectureChoice + logPrefix := "" + if len(options.Platforms) > 1 { + logPrefix = "[" + platforms.Format(platformSpec) + "] " + } + builds.Go(func() error { + thisID, thisRef, err := buildDockerfilesOnce(ctx, store, logger, logPrefix, platformOptions, paths, files) + if err != nil { + return err + } + id, ref = thisID, thisRef + instancesLock.Lock() + instances = append(instances, instance{ + ID: thisID, + Platform: platformSpec, + }) + instancesLock.Unlock() + return nil + }) + } + + if merr := builds.Wait(); merr != nil { + if merr.Len() == 1 { + return "", nil, merr.Errors[0] + } + return "", nil, merr.ErrorOrNil() + } + + if manifestList != "" { + rt, err := libimage.RuntimeFromStore(store, nil) + if err != nil { + return "", nil, err + } + // Create the manifest list ourselves, so that it's not in a + // partially-populated state at any point if we're creating it + // fresh. + list, err := rt.LookupManifestList(manifestList) + if err != nil && errors.Cause(err) == storage.ErrImageUnknown { + list, err = rt.CreateManifestList(manifestList) + } + if err != nil { + return "", nil, err + } + // Add each instance to the list in turn. + storeTransportName := istorage.Transport.Name() + for _, instance := range instances { + instanceDigest, err := list.Add(ctx, storeTransportName+":"+instance.ID, nil) + if err != nil { + return "", nil, err + } + err = list.AnnotateInstance(instanceDigest, &libimage.ManifestListAnnotateOptions{ + Architecture: instance.Architecture, + OS: instance.OS, + Variant: instance.Variant, + }) + if err != nil { + return "", nil, err + } + } + id, ref = list.ID(), nil + // Put together a canonical reference + storeRef, err := istorage.Transport.NewStoreReference(store, nil, list.ID()) + if err != nil { + return "", nil, err + } + imgSource, err := storeRef.NewImageSource(ctx, nil) + if err != nil { + return "", nil, err + } + defer imgSource.Close() + manifestBytes, _, err := imgSource.GetManifest(ctx, nil) + if err != nil { + return "", nil, err + } + manifestDigest, err := manifest.Digest(manifestBytes) + if err != nil { + return "", nil, err + } + img, err := store.Image(id) + if err != nil { + return "", nil, err + } + for _, name := range img.Names { + if named, err := reference.ParseNamed(name); err == nil { + if r, err := reference.WithDigest(reference.TrimNamed(named), manifestDigest); err == nil { + ref = r + break + } + } + } + } + + return id, ref, nil +} + +func buildDockerfilesOnce(ctx context.Context, store storage.Store, logger *logrus.Logger, logPrefix string, options define.BuildOptions, dockerfiles []string, dockerfilecontents [][]byte) (string, reference.Canonical, error) { + mainNode, err := imagebuilder.ParseDockerfile(bytes.NewReader(dockerfilecontents[0])) + if err != nil { + return "", nil, errors.Wrapf(err, "error parsing main Dockerfile: %s", dockerfiles[0]) + } + + warnOnUnsetBuildArgs(logger, mainNode, options.Args) + + for i, d := range dockerfilecontents[1:] { + additionalNode, err := imagebuilder.ParseDockerfile(bytes.NewReader(d)) + if err != nil { + return "", nil, errors.Wrapf(err, "error parsing additional Dockerfile %s", dockerfiles[i]) + } + mainNode.Children = append(mainNode.Children, additionalNode.Children...) + } + + // Check if any modifications done to labels + // add them to node-layer so it becomes regular + // layer. + // Reason: Docker adds label modification as + // last step which can be processed as regular + // steps and if no modification is done to layers + // its easier to re-use cached layers. + if len(options.Labels) > 0 { + for _, labelSpec := range options.Labels { + label := strings.SplitN(labelSpec, "=", 2) + labelLine := "" + key := label[0] + value := "" + if len(label) > 1 { + value = label[1] + } + // check from only empty key since docker supports empty value + if key != "" { + labelLine = fmt.Sprintf("LABEL %q=%q\n", key, value) + additionalNode, err := imagebuilder.ParseDockerfile(strings.NewReader(labelLine)) + if err != nil { + return "", nil, errors.Wrapf(err, "error while adding additional LABEL steps") + } + mainNode.Children = append(mainNode.Children, additionalNode.Children...) + } + } + } + + exec, err := newExecutor(logger, logPrefix, store, options, mainNode) + if err != nil { + return "", nil, errors.Wrapf(err, "error creating build executor") + } + b := imagebuilder.NewBuilder(options.Args) + defaultContainerConfig, err := config.Default() + if err != nil { + return "", nil, errors.Wrapf(err, "failed to get container config") + } + b.Env = append(defaultContainerConfig.GetDefaultEnv(), b.Env...) + stages, err := imagebuilder.NewStages(mainNode, b) + if err != nil { + return "", nil, errors.Wrap(err, "error reading multiple stages") + } + if options.Target != "" { + stagesTargeted, ok := stages.ThroughTarget(options.Target) + if !ok { + return "", nil, errors.Errorf("The target %q was not found in the provided Dockerfile", options.Target) + } + stages = stagesTargeted + } + return exec.Build(ctx, stages) +} + +func warnOnUnsetBuildArgs(logger *logrus.Logger, node *parser.Node, args map[string]string) { + argFound := make(map[string]bool) + for _, child := range node.Children { + switch strings.ToUpper(child.Value) { + case "ARG": + argName := child.Next.Value + if strings.Contains(argName, "=") { + res := strings.Split(argName, "=") + if res[1] != "" { + argFound[res[0]] = true + } + } + argHasValue := true + if !strings.Contains(argName, "=") { + argHasValue = argFound[argName] + } + if _, ok := args[argName]; !argHasValue && !ok { + logger.Warnf("missing %q build argument. Try adding %q to the command line", argName, fmt.Sprintf("--build-arg %s=", argName)) + } + default: + continue + } + } +} + +// preprocessContainerfileContents runs CPP(1) in preprocess-only mode on the input +// dockerfile content and will use ctxDir as the base include path. +func preprocessContainerfileContents(logger *logrus.Logger, containerfile string, r io.Reader, ctxDir string) (stdout io.Reader, err error) { + cppCommand := "cpp" + cppPath, err := exec.LookPath(cppCommand) + if err != nil { + if os.IsNotExist(err) { + err = errors.Errorf("error: %s support requires %s to be installed", containerfile, cppPath) + } + return nil, err + } + + stdoutBuffer := bytes.Buffer{} + stderrBuffer := bytes.Buffer{} + + cmd := exec.Command(cppPath, "-E", "-iquote", ctxDir, "-traditional", "-undef", "-") + cmd.Stdin = r + cmd.Stdout = &stdoutBuffer + cmd.Stderr = &stderrBuffer + + if err = cmd.Start(); err != nil { + return nil, errors.Wrapf(err, "preprocessing %s", containerfile) + } + if err = cmd.Wait(); err != nil { + if stderrBuffer.Len() != 0 { + logger.Warnf("Ignoring %s\n", stderrBuffer.String()) + } + if stdoutBuffer.Len() == 0 { + return nil, errors.Wrapf(err, "error preprocessing %s: preprocessor produced no output", containerfile) + } + } + return &stdoutBuffer, nil +} + +// platformsForBaseImages resolves the names of base images from the +// dockerfiles, and if they are all valid references to manifest lists, returns +// the list of platforms that are supported by all of the base images. +func platformsForBaseImages(ctx context.Context, logger *logrus.Logger, dockerfilepaths []string, dockerfiles [][]byte, from string, args map[string]string, systemContext *types.SystemContext) ([]struct{ OS, Arch, Variant string }, error) { + baseImages, err := baseImages(dockerfilepaths, dockerfiles, from, args) + if err != nil { + return nil, errors.Wrapf(err, "determining list of base images") + } + logrus.Debugf("unresolved base images: %v", baseImages) + if len(baseImages) == 0 { + return nil, errors.Wrapf(err, "build uses no non-scratch base images") + } + targetPlatforms := make(map[string]struct{}) + var platformList []struct{ OS, Arch, Variant string } + for baseImageIndex, baseImage := range baseImages { + resolved, err := shortnames.Resolve(systemContext, baseImage) + if err != nil { + return nil, errors.Wrapf(err, "resolving image name %q", baseImage) + } + var manifestBytes []byte + var manifestType string + for _, candidate := range resolved.PullCandidates { + ref, err := docker.NewReference(candidate.Value) + if err != nil { + logrus.Debugf("parsing image reference %q: %v", candidate.Value.String(), err) + continue + } + src, err := ref.NewImageSource(ctx, systemContext) + if err != nil { + logrus.Debugf("preparing to read image manifest for %q: %v", baseImage, err) + continue + } + candidateBytes, candidateType, err := src.GetManifest(ctx, nil) + _ = src.Close() + if err != nil { + logrus.Debugf("reading image manifest for %q: %v", baseImage, err) + continue + } + if !manifest.MIMETypeIsMultiImage(candidateType) { + logrus.Debugf("base image %q is not a reference to a manifest list: %v", baseImage, err) + continue + } + if err := candidate.Record(); err != nil { + logrus.Debugf("error recording name %q for base image %q: %v", candidate.Value.String(), baseImage, err) + continue + } + baseImage = candidate.Value.String() + manifestBytes, manifestType = candidateBytes, candidateType + break + } + if len(manifestBytes) == 0 { + if len(resolved.PullCandidates) > 0 { + return nil, errors.Errorf("base image name %q didn't resolve to a manifest list", baseImage) + } + return nil, errors.Errorf("base image name %q didn't resolve to anything", baseImage) + } + if manifestType != v1.MediaTypeImageIndex { + list, err := manifest.ListFromBlob(manifestBytes, manifestType) + if err != nil { + return nil, errors.Wrapf(err, "parsing manifest list from base image %q", baseImage) + } + list, err = list.ConvertToMIMEType(v1.MediaTypeImageIndex) + if err != nil { + return nil, errors.Wrapf(err, "converting manifest list from base image %q to v2s2 list", baseImage) + } + manifestBytes, err = list.Serialize() + if err != nil { + return nil, errors.Wrapf(err, "encoding converted v2s2 manifest list for base image %q", baseImage) + } + } + index, err := manifest.OCI1IndexFromManifest(manifestBytes) + if err != nil { + return nil, errors.Wrapf(err, "decoding manifest list for base image %q", baseImage) + } + if baseImageIndex == 0 { + // populate the list with the first image's normalized platforms + for _, instance := range index.Manifests { + if instance.Platform == nil { + continue + } + platform := platforms.Normalize(*instance.Platform) + targetPlatforms[platforms.Format(platform)] = struct{}{} + logger.Debugf("image %q supports %q", baseImage, platforms.Format(platform)) + } + } else { + // prune the list of any normalized platforms this base image doesn't support + imagePlatforms := make(map[string]struct{}) + for _, instance := range index.Manifests { + if instance.Platform == nil { + continue + } + platform := platforms.Normalize(*instance.Platform) + imagePlatforms[platforms.Format(platform)] = struct{}{} + logger.Debugf("image %q supports %q", baseImage, platforms.Format(platform)) + } + var removed []string + for platform := range targetPlatforms { + if _, present := imagePlatforms[platform]; !present { + removed = append(removed, platform) + logger.Debugf("image %q does not support %q", baseImage, platform) + } + } + for _, remove := range removed { + delete(targetPlatforms, remove) + } + } + if baseImageIndex == len(baseImages)-1 && len(targetPlatforms) > 0 { + // extract the list + for platform := range targetPlatforms { + platform, err := platforms.Parse(platform) + if err != nil { + return nil, errors.Wrapf(err, "parsing platform double/triple %q", platform) + } + platformList = append(platformList, struct{ OS, Arch, Variant string }{ + OS: platform.OS, + Arch: platform.Architecture, + Variant: platform.Variant, + }) + logger.Debugf("base images all support %q", platform) + } + } + } + if len(platformList) == 0 { + return nil, errors.New("base images have no platforms in common") + } + return platformList, nil +} + +// baseImages parses the dockerfilecontents, possibly replacing the first +// stage's base image with FROM, and returns the list of base images as +// provided. Each entry in the dockerfilenames slice corresponds to a slice in +// dockerfilecontents. +func baseImages(dockerfilenames []string, dockerfilecontents [][]byte, from string, args map[string]string) ([]string, error) { + mainNode, err := imagebuilder.ParseDockerfile(bytes.NewReader(dockerfilecontents[0])) + if err != nil { + return nil, errors.Wrapf(err, "error parsing main Dockerfile: %s", dockerfilenames[0]) + } + + for i, d := range dockerfilecontents[1:] { + additionalNode, err := imagebuilder.ParseDockerfile(bytes.NewReader(d)) + if err != nil { + return nil, errors.Wrapf(err, "error parsing additional Dockerfile %s", dockerfilenames[i]) + } + mainNode.Children = append(mainNode.Children, additionalNode.Children...) + } + + b := imagebuilder.NewBuilder(args) + defaultContainerConfig, err := config.Default() + if err != nil { + return nil, errors.Wrapf(err, "failed to get container config") + } + b.Env = defaultContainerConfig.GetDefaultEnv() + stages, err := imagebuilder.NewStages(mainNode, b) + if err != nil { + return nil, errors.Wrap(err, "error reading multiple stages") + } + var baseImages []string + nicknames := make(map[string]bool) + for stageIndex, stage := range stages { + node := stage.Node // first line + for node != nil { // each line + for _, child := range node.Children { // tokens on this line, though we only care about the first + switch strings.ToUpper(child.Value) { // first token - instruction + case "FROM": + if child.Next != nil { // second token on this line + // If we have a fromOverride, replace the value of + // image name for the first FROM in the Containerfile. + if from != "" { + child.Next.Value = from + from = "" + } + base := child.Next.Value + if base != "scratch" && !nicknames[base] { + // TODO: this didn't undergo variable and arg + // expansion, so if the AS clause in another + // FROM instruction uses argument values, + // we might not record the right value here. + baseImages = append(baseImages, base) + } + } + } + } + node = node.Next // next line + } + if stage.Name != strconv.Itoa(stageIndex) { + nicknames[stage.Name] = true + } + } + return baseImages, nil +} diff --git a/vendor/github.com/containers/buildah/imagebuildah/executor.go b/vendor/github.com/containers/buildah/imagebuildah/executor.go new file mode 100644 index 00000000000..5183456d0d5 --- /dev/null +++ b/vendor/github.com/containers/buildah/imagebuildah/executor.go @@ -0,0 +1,808 @@ +package imagebuildah + +import ( + "context" + "fmt" + "io" + "io/ioutil" + "os" + "sort" + "strconv" + "strings" + "sync" + "time" + + "github.com/containers/buildah" + "github.com/containers/buildah/define" + "github.com/containers/buildah/pkg/parse" + "github.com/containers/buildah/pkg/sshagent" + "github.com/containers/buildah/util" + "github.com/containers/common/libimage" + nettypes "github.com/containers/common/libnetwork/types" + "github.com/containers/common/pkg/config" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/manifest" + is "github.com/containers/image/v5/storage" + storageTransport "github.com/containers/image/v5/storage" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/transports/alltransports" + "github.com/containers/image/v5/types" + encconfig "github.com/containers/ocicrypt/config" + "github.com/containers/storage" + "github.com/containers/storage/pkg/archive" + digest "github.com/opencontainers/go-digest" + v1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/openshift/imagebuilder" + "github.com/openshift/imagebuilder/dockerfile/parser" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sync/semaphore" +) + +// builtinAllowedBuildArgs is list of built-in allowed build args. Normally we +// complain if we're given values for arguments which have no corresponding ARG +// instruction in the Dockerfile, since that's usually an indication of a user +// error, but for these values we make exceptions and ignore them. +var builtinAllowedBuildArgs = map[string]bool{ + "HTTP_PROXY": true, + "http_proxy": true, + "HTTPS_PROXY": true, + "https_proxy": true, + "FTP_PROXY": true, + "ftp_proxy": true, + "NO_PROXY": true, + "no_proxy": true, +} + +// Executor is a buildah-based implementation of the imagebuilder.Executor +// interface. It coordinates the entire build by using one or more +// StageExecutors to handle each stage of the build. +type Executor struct { + containerSuffix string + logger *logrus.Logger + stages map[string]*StageExecutor + store storage.Store + contextDir string + pullPolicy define.PullPolicy + registry string + ignoreUnrecognizedInstructions bool + quiet bool + runtime string + runtimeArgs []string + transientMounts []Mount + compression archive.Compression + output string + outputFormat string + additionalTags []string + log func(format string, args ...interface{}) // can be nil + in io.Reader + out io.Writer + err io.Writer + signaturePolicyPath string + systemContext *types.SystemContext + reportWriter io.Writer + isolation define.Isolation + namespaceOptions []define.NamespaceOption + configureNetwork define.NetworkConfigurationPolicy + cniPluginPath string + cniConfigDir string + // NetworkInterface is the libnetwork network interface used to setup CNI or netavark networks. + networkInterface nettypes.ContainerNetwork + idmappingOptions *define.IDMappingOptions + commonBuildOptions *define.CommonBuildOptions + defaultMountsFilePath string + iidfile string + squash bool + labels []string + annotations []string + layers bool + useCache bool + removeIntermediateCtrs bool + forceRmIntermediateCtrs bool + imageMap map[string]string // Used to map images that we create to handle the AS construct. + containerMap map[string]*buildah.Builder // Used to map from image names to only-created-for-the-rootfs containers. + baseMap map[string]bool // Holds the names of every base image, as given. + rootfsMap map[string]bool // Holds the names of every stage whose rootfs is referenced in a COPY or ADD instruction. + blobDirectory string + excludes []string + ignoreFile string + unusedArgs map[string]struct{} + capabilities []string + devices define.ContainerDevices + signBy string + architecture string + timestamp *time.Time + os string + maxPullPushRetries int + retryPullPushDelay time.Duration + ociDecryptConfig *encconfig.DecryptConfig + lastError error + terminatedStage map[string]error + stagesLock sync.Mutex + stagesSemaphore *semaphore.Weighted + logRusage bool + rusageLogFile io.Writer + imageInfoLock sync.Mutex + imageInfoCache map[string]imageTypeAndHistoryAndDiffIDs + fromOverride string + manifest string + secrets map[string]define.Secret + sshsources map[string]*sshagent.Source + logPrefix string + unsetEnvs []string + processLabel string // Shares processLabel of first stage container with containers of other stages in same build + mountLabel string // Shares mountLabel of first stage container with containers of other stages in same build +} + +type imageTypeAndHistoryAndDiffIDs struct { + manifestType string + history []v1.History + diffIDs []digest.Digest + err error +} + +// newExecutor creates a new instance of the imagebuilder.Executor interface. +func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, options define.BuildOptions, mainNode *parser.Node) (*Executor, error) { + defaultContainerConfig, err := config.Default() + if err != nil { + return nil, errors.Wrapf(err, "failed to get container config") + } + + excludes := options.Excludes + if len(excludes) == 0 { + excludes, options.IgnoreFile, err = parse.ContainerIgnoreFile(options.ContextDirectory, options.IgnoreFile) + if err != nil { + return nil, err + } + } + capabilities, err := defaultContainerConfig.Capabilities("", options.AddCapabilities, options.DropCapabilities) + if err != nil { + return nil, err + } + + devices := define.ContainerDevices{} + for _, device := range append(defaultContainerConfig.Containers.Devices, options.Devices...) { + dev, err := parse.DeviceFromPath(device) + if err != nil { + return nil, err + } + devices = append(dev, devices...) + } + + transientMounts := []Mount{} + for _, volume := range append(defaultContainerConfig.Containers.Volumes, options.TransientMounts...) { + mount, err := parse.Volume(volume) + if err != nil { + return nil, err + } + transientMounts = append([]Mount{mount}, transientMounts...) + } + + secrets, err := parse.Secrets(options.CommonBuildOpts.Secrets) + if err != nil { + return nil, err + } + sshsources, err := parse.SSH(options.CommonBuildOpts.SSHSources) + if err != nil { + return nil, err + } + + writer := options.ReportWriter + if options.Quiet { + writer = ioutil.Discard + } + + var rusageLogFile io.Writer + + if options.LogRusage && !options.Quiet { + if options.RusageLogFile == "" { + rusageLogFile = options.Out + } else { + rusageLogFile, err = os.OpenFile(options.RusageLogFile, os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + return nil, err + } + } + } + + exec := Executor{ + containerSuffix: options.ContainerSuffix, + logger: logger, + stages: make(map[string]*StageExecutor), + store: store, + contextDir: options.ContextDirectory, + excludes: excludes, + ignoreFile: options.IgnoreFile, + pullPolicy: options.PullPolicy, + registry: options.Registry, + ignoreUnrecognizedInstructions: options.IgnoreUnrecognizedInstructions, + quiet: options.Quiet, + runtime: options.Runtime, + runtimeArgs: options.RuntimeArgs, + transientMounts: transientMounts, + compression: options.Compression, + output: options.Output, + outputFormat: options.OutputFormat, + additionalTags: options.AdditionalTags, + signaturePolicyPath: options.SignaturePolicyPath, + systemContext: options.SystemContext, + log: options.Log, + in: options.In, + out: options.Out, + err: options.Err, + reportWriter: writer, + isolation: options.Isolation, + namespaceOptions: options.NamespaceOptions, + configureNetwork: options.ConfigureNetwork, + cniPluginPath: options.CNIPluginPath, + cniConfigDir: options.CNIConfigDir, + networkInterface: options.NetworkInterface, + idmappingOptions: options.IDMappingOptions, + commonBuildOptions: options.CommonBuildOpts, + defaultMountsFilePath: options.DefaultMountsFilePath, + iidfile: options.IIDFile, + squash: options.Squash, + labels: append([]string{}, options.Labels...), + annotations: append([]string{}, options.Annotations...), + layers: options.Layers, + useCache: !options.NoCache, + removeIntermediateCtrs: options.RemoveIntermediateCtrs, + forceRmIntermediateCtrs: options.ForceRmIntermediateCtrs, + imageMap: make(map[string]string), + containerMap: make(map[string]*buildah.Builder), + baseMap: make(map[string]bool), + rootfsMap: make(map[string]bool), + blobDirectory: options.BlobDirectory, + unusedArgs: make(map[string]struct{}), + capabilities: capabilities, + devices: devices, + signBy: options.SignBy, + architecture: options.Architecture, + timestamp: options.Timestamp, + os: options.OS, + maxPullPushRetries: options.MaxPullPushRetries, + retryPullPushDelay: options.PullPushRetryDelay, + ociDecryptConfig: options.OciDecryptConfig, + terminatedStage: make(map[string]error), + stagesSemaphore: options.JobSemaphore, + logRusage: options.LogRusage, + rusageLogFile: rusageLogFile, + imageInfoCache: make(map[string]imageTypeAndHistoryAndDiffIDs), + fromOverride: options.From, + manifest: options.Manifest, + secrets: secrets, + sshsources: sshsources, + logPrefix: logPrefix, + unsetEnvs: options.UnsetEnvs, + } + if exec.err == nil { + exec.err = os.Stderr + } + if exec.out == nil { + exec.out = os.Stdout + } + + for arg := range options.Args { + if _, isBuiltIn := builtinAllowedBuildArgs[arg]; !isBuiltIn { + exec.unusedArgs[arg] = struct{}{} + } + } + for _, line := range mainNode.Children { + node := line + for node != nil { // tokens on this line, though we only care about the first + switch strings.ToUpper(node.Value) { // first token - instruction + case "ARG": + arg := node.Next + if arg != nil { + // We have to be careful here - it's either an argument + // and value, or just an argument, since they can be + // separated by either "=" or whitespace. + list := strings.SplitN(arg.Value, "=", 2) + delete(exec.unusedArgs, list[0]) + } + } + break + } + } + return &exec, nil +} + +// startStage creates a new stage executor that will be referenced whenever a +// COPY or ADD statement uses a --from=NAME flag. +func (b *Executor) startStage(ctx context.Context, stage *imagebuilder.Stage, stages imagebuilder.Stages, output string) *StageExecutor { + stageExec := &StageExecutor{ + ctx: ctx, + executor: b, + log: b.log, + index: stage.Position, + stages: stages, + name: stage.Name, + volumeCache: make(map[string]string), + volumeCacheInfo: make(map[string]os.FileInfo), + output: output, + stage: stage, + } + b.stages[stage.Name] = stageExec + if idx := strconv.Itoa(stage.Position); idx != stage.Name { + b.stages[idx] = stageExec + } + return stageExec +} + +// resolveNameToImageRef creates a types.ImageReference for the output name in local storage +func (b *Executor) resolveNameToImageRef(output string) (types.ImageReference, error) { + if imageRef, err := alltransports.ParseImageName(output); err == nil { + return imageRef, nil + } + runtime, err := libimage.RuntimeFromStore(b.store, &libimage.RuntimeOptions{SystemContext: b.systemContext}) + if err != nil { + return nil, err + } + resolved, err := runtime.ResolveName(output) + if err != nil { + return nil, err + } + imageRef, err := storageTransport.Transport.ParseStoreReference(b.store, resolved) + if err == nil { + return imageRef, nil + } + + return imageRef, err +} + +// waitForStage waits for an entry to be added to terminatedStage indicating +// that the specified stage has finished. If there is no stage defined by that +// name, then it will return (false, nil). If there is a stage defined by that +// name, it will return true along with any error it encounters. +func (b *Executor) waitForStage(ctx context.Context, name string, stages imagebuilder.Stages) (bool, error) { + found := false + for _, otherStage := range stages { + if otherStage.Name == name || strconv.Itoa(otherStage.Position) == name { + found = true + break + } + } + if !found { + return false, nil + } + for { + if b.lastError != nil { + return true, b.lastError + } + + b.stagesLock.Lock() + terminationError, terminated := b.terminatedStage[name] + b.stagesLock.Unlock() + + if terminationError != nil { + return false, terminationError + } + if terminated { + return true, nil + } + + b.stagesSemaphore.Release(1) + time.Sleep(time.Millisecond * 10) + if err := b.stagesSemaphore.Acquire(ctx, 1); err != nil { + return true, errors.Wrapf(err, "error reacquiring job semaphore") + } + } +} + +// getImageTypeAndHistoryAndDiffIDs returns the manifest type, history, and diff IDs list of imageID. +func (b *Executor) getImageTypeAndHistoryAndDiffIDs(ctx context.Context, imageID string) (string, []v1.History, []digest.Digest, error) { + b.imageInfoLock.Lock() + imageInfo, ok := b.imageInfoCache[imageID] + b.imageInfoLock.Unlock() + if ok { + return imageInfo.manifestType, imageInfo.history, imageInfo.diffIDs, imageInfo.err + } + imageRef, err := is.Transport.ParseStoreReference(b.store, "@"+imageID) + if err != nil { + return "", nil, nil, errors.Wrapf(err, "error getting image reference %q", imageID) + } + ref, err := imageRef.NewImage(ctx, nil) + if err != nil { + return "", nil, nil, errors.Wrapf(err, "error creating new image from reference to image %q", imageID) + } + defer ref.Close() + oci, err := ref.OCIConfig(ctx) + if err != nil { + return "", nil, nil, errors.Wrapf(err, "error getting possibly-converted OCI config of image %q", imageID) + } + manifestBytes, manifestFormat, err := ref.Manifest(ctx) + if err != nil { + return "", nil, nil, errors.Wrapf(err, "error getting manifest of image %q", imageID) + } + if manifestFormat == "" && len(manifestBytes) > 0 { + manifestFormat = manifest.GuessMIMEType(manifestBytes) + } + b.imageInfoLock.Lock() + b.imageInfoCache[imageID] = imageTypeAndHistoryAndDiffIDs{ + manifestType: manifestFormat, + history: oci.History, + diffIDs: oci.RootFS.DiffIDs, + err: nil, + } + b.imageInfoLock.Unlock() + return manifestFormat, oci.History, oci.RootFS.DiffIDs, nil +} + +func (b *Executor) buildStage(ctx context.Context, cleanupStages map[int]*StageExecutor, stages imagebuilder.Stages, stageIndex int) (imageID string, ref reference.Canonical, err error) { + stage := stages[stageIndex] + ib := stage.Builder + node := stage.Node + base, err := ib.From(node) + + // If this is the last stage, then the image that we produce at + // its end should be given the desired output name. + output := "" + if stageIndex == len(stages)-1 { + output = b.output + } + + if err != nil { + logrus.Debugf("buildStage(node.Children=%#v)", node.Children) + return "", nil, err + } + + b.stagesLock.Lock() + stageExecutor := b.startStage(ctx, &stage, stages, output) + if stageExecutor.log == nil { + stepCounter := 0 + stageExecutor.log = func(format string, args ...interface{}) { + prefix := b.logPrefix + if len(stages) > 1 { + prefix += fmt.Sprintf("[%d/%d] ", stageIndex+1, len(stages)) + } + if !strings.HasPrefix(format, "COMMIT") { + stepCounter++ + prefix += fmt.Sprintf("STEP %d", stepCounter) + if stepCounter <= len(stage.Node.Children)+1 { + prefix += fmt.Sprintf("/%d", len(stage.Node.Children)+1) + } + prefix += ": " + } + suffix := "\n" + fmt.Fprintf(stageExecutor.executor.out, prefix+format+suffix, args...) + } + } + b.stagesLock.Unlock() + + // If this a single-layer build, or if it's a multi-layered + // build and b.forceRmIntermediateCtrs is set, make sure we + // remove the intermediate/build containers, regardless of + // whether or not the stage's build fails. + // Skip cleanup if the stage has no instructions. + if b.forceRmIntermediateCtrs || !b.layers && len(stage.Node.Children) > 0 { + b.stagesLock.Lock() + cleanupStages[stage.Position] = stageExecutor + b.stagesLock.Unlock() + } + + // Build this stage. + if imageID, ref, err = stageExecutor.Execute(ctx, base); err != nil { + return "", nil, err + } + + // The stage succeeded, so remove its build container if we're + // told to delete successful intermediate/build containers for + // multi-layered builds. + // Skip cleanup if the stage has no instructions. + if b.removeIntermediateCtrs && len(stage.Node.Children) > 0 { + b.stagesLock.Lock() + cleanupStages[stage.Position] = stageExecutor + b.stagesLock.Unlock() + } + + return imageID, ref, nil +} + +// Build takes care of the details of running Prepare/Execute/Commit/Delete +// over each of the one or more parsed Dockerfiles and stages. +func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (imageID string, ref reference.Canonical, err error) { + if len(stages) == 0 { + return "", nil, errors.New("error building: no stages to build") + } + var cleanupImages []string + cleanupStages := make(map[int]*StageExecutor) + + stdout := b.out + if b.quiet { + b.out = ioutil.Discard + } + + cleanup := func() error { + var lastErr error + // Clean up any containers associated with the final container + // built by a stage, for stages that succeeded, since we no + // longer need their filesystem contents. + + b.stagesLock.Lock() + for _, stage := range cleanupStages { + if err := stage.Delete(); err != nil { + logrus.Debugf("Failed to cleanup stage containers: %v", err) + lastErr = err + } + } + cleanupStages = nil + b.stagesLock.Unlock() + + // Clean up any builders that we used to get data from images. + for _, builder := range b.containerMap { + if err := builder.Delete(); err != nil { + logrus.Debugf("Failed to cleanup image containers: %v", err) + lastErr = err + } + } + b.containerMap = nil + // Clean up any intermediate containers associated with stages, + // since we're not keeping them for debugging. + if b.removeIntermediateCtrs { + if err := b.deleteSuccessfulIntermediateCtrs(); err != nil { + logrus.Debugf("Failed to cleanup intermediate containers: %v", err) + lastErr = err + } + } + // Remove images from stages except the last one, since we're + // not going to use them as a starting point for any new + // stages. + for i := range cleanupImages { + removeID := cleanupImages[len(cleanupImages)-i-1] + if removeID == imageID { + continue + } + if _, err := b.store.DeleteImage(removeID, true); err != nil { + logrus.Debugf("failed to remove intermediate image %q: %v", removeID, err) + if b.forceRmIntermediateCtrs || errors.Cause(err) != storage.ErrImageUsedByContainer { + lastErr = err + } + } + } + cleanupImages = nil + + if b.rusageLogFile != nil && b.rusageLogFile != b.out { + // we deliberately ignore the error here, as this + // function can be called multiple times + if closer, ok := b.rusageLogFile.(interface{ Close() error }); ok { + closer.Close() + } + } + return lastErr + } + + defer func() { + if cleanupErr := cleanup(); cleanupErr != nil { + if err == nil { + err = cleanupErr + } else { + err = errors.Wrap(err, cleanupErr.Error()) + } + } + }() + + // Build maps of every named base image and every referenced stage root + // filesystem. Individual stages can use them to determine whether or + // not they can skip certain steps near the end of their stages. + for stageIndex, stage := range stages { + node := stage.Node // first line + for node != nil { // each line + for _, child := range node.Children { // tokens on this line, though we only care about the first + switch strings.ToUpper(child.Value) { // first token - instruction + case "FROM": + if child.Next != nil { // second token on this line + // If we have a fromOverride, replace the value of + // image name for the first FROM in the Containerfile. + if b.fromOverride != "" { + child.Next.Value = b.fromOverride + b.fromOverride = "" + } + base := child.Next.Value + if base != "scratch" { + // TODO: this didn't undergo variable and arg + // expansion, so if the AS clause in another + // FROM instruction uses argument values, + // we might not record the right value here. + b.baseMap[base] = true + logrus.Debugf("base for stage %d: %q", stageIndex, base) + } + } + case "ADD", "COPY": + for _, flag := range child.Flags { // flags for this instruction + if strings.HasPrefix(flag, "--from=") { + // TODO: this didn't undergo variable and + // arg expansion, so if the previous stage + // was named using argument values, we might + // not record the right value here. + rootfs := strings.TrimPrefix(flag, "--from=") + b.rootfsMap[rootfs] = true + logrus.Debugf("rootfs needed for COPY in stage %d: %q", stageIndex, rootfs) + } + } + } + } + node = node.Next // next line + } + } + + type Result struct { + Index int + ImageID string + Ref reference.Canonical + Error error + } + + ch := make(chan Result, len(stages)) + + if b.stagesSemaphore == nil { + b.stagesSemaphore = semaphore.NewWeighted(int64(len(stages))) + } + + var wg sync.WaitGroup + wg.Add(len(stages)) + + go func() { + cancel := false + for stageIndex := range stages { + index := stageIndex + // Acquire the semaphore before creating the goroutine so we are sure they + // run in the specified order. + if err := b.stagesSemaphore.Acquire(ctx, 1); err != nil { + cancel = true + b.lastError = err + ch <- Result{ + Index: index, + Error: err, + } + wg.Done() + continue + } + b.stagesLock.Lock() + cleanupStages := cleanupStages + b.stagesLock.Unlock() + go func() { + defer b.stagesSemaphore.Release(1) + defer wg.Done() + if cancel || cleanupStages == nil { + var err error + if stages[index].Name != strconv.Itoa(index) { + err = errors.Errorf("not building stage %d: build canceled", index) + } else { + err = errors.Errorf("not building stage %d (%s): build canceled", index, stages[index].Name) + } + ch <- Result{ + Index: index, + Error: err, + } + return + } + stageID, stageRef, stageErr := b.buildStage(ctx, cleanupStages, stages, index) + if stageErr != nil { + cancel = true + ch <- Result{ + Index: index, + Error: stageErr, + } + return + } + + ch <- Result{ + Index: index, + ImageID: stageID, + Ref: stageRef, + Error: nil, + } + }() + } + }() + go func() { + wg.Wait() + close(ch) + }() + + for r := range ch { + stage := stages[r.Index] + + b.stagesLock.Lock() + b.terminatedStage[stage.Name] = r.Error + b.terminatedStage[strconv.Itoa(stage.Position)] = r.Error + + if r.Error != nil { + b.stagesLock.Unlock() + b.lastError = r.Error + return "", nil, r.Error + } + + // If this is an intermediate stage, make a note of the ID, so + // that we can look it up later. + if r.Index < len(stages)-1 && r.ImageID != "" { + b.imageMap[stage.Name] = r.ImageID + // We're not populating the cache with intermediate + // images, so add this one to the list of images that + // we'll remove later. + if !b.layers { + cleanupImages = append(cleanupImages, r.ImageID) + } + } + if r.Index == len(stages)-1 { + imageID = r.ImageID + ref = r.Ref + } + b.stagesLock.Unlock() + } + + if len(b.unusedArgs) > 0 { + unusedList := make([]string, 0, len(b.unusedArgs)) + for k := range b.unusedArgs { + unusedList = append(unusedList, k) + } + sort.Strings(unusedList) + fmt.Fprintf(b.out, "[Warning] one or more build args were not consumed: %v\n", unusedList) + } + + // Add additional tags and print image names recorded in storage + if dest, err := b.resolveNameToImageRef(b.output); err == nil { + switch dest.Transport().Name() { + case is.Transport.Name(): + img, err := is.Transport.GetStoreImage(b.store, dest) + if err != nil { + return imageID, ref, errors.Wrapf(err, "error locating just-written image %q", transports.ImageName(dest)) + } + if len(b.additionalTags) > 0 { + if err = util.AddImageNames(b.store, "", b.systemContext, img, b.additionalTags); err != nil { + return imageID, ref, errors.Wrapf(err, "error setting image names to %v", append(img.Names, b.additionalTags...)) + } + logrus.Debugf("assigned names %v to image %q", img.Names, img.ID) + } + // Report back the caller the tags applied, if any. + img, err = is.Transport.GetStoreImage(b.store, dest) + if err != nil { + return imageID, ref, errors.Wrapf(err, "error locating just-written image %q", transports.ImageName(dest)) + } + for _, name := range img.Names { + fmt.Fprintf(b.out, "Successfully tagged %s\n", name) + } + + default: + if len(b.additionalTags) > 0 { + b.logger.Warnf("don't know how to add tags to images stored in %q transport", dest.Transport().Name()) + } + } + } + + if err := cleanup(); err != nil { + return "", nil, err + } + logrus.Debugf("printing final image id %q", imageID) + if b.iidfile != "" { + if err = ioutil.WriteFile(b.iidfile, []byte("sha256:"+imageID), 0644); err != nil { + return imageID, ref, errors.Wrapf(err, "failed to write image ID to file %q", b.iidfile) + } + } else { + if _, err := stdout.Write([]byte(imageID + "\n")); err != nil { + return imageID, ref, errors.Wrapf(err, "failed to write image ID to stdout") + } + } + return imageID, ref, nil +} + +// deleteSuccessfulIntermediateCtrs goes through the container IDs in each +// stage's containerIDs list and deletes the containers associated with those +// IDs. +func (b *Executor) deleteSuccessfulIntermediateCtrs() error { + var lastErr error + for _, s := range b.stages { + for _, ctr := range s.containerIDs { + if err := b.store.DeleteContainer(ctr); err != nil { + b.logger.Errorf("error deleting build container %q: %v\n", ctr, err) + lastErr = err + } + } + // The stages map includes some stages under multiple keys, so + // clearing their lists after we process a given stage is + // necessary to avoid triggering errors that would occur if we + // tried to delete a given stage's containers multiple times. + s.containerIDs = nil + } + return lastErr +} diff --git a/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go b/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go new file mode 100644 index 00000000000..81a65dea4cb --- /dev/null +++ b/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go @@ -0,0 +1,1572 @@ +package imagebuildah + +import ( + "context" + "fmt" + "io" + "os" + "path/filepath" + "sort" + "strconv" + "strings" + "time" + + "github.com/containers/buildah" + "github.com/containers/buildah/copier" + "github.com/containers/buildah/define" + buildahdocker "github.com/containers/buildah/docker" + "github.com/containers/buildah/internal" + "github.com/containers/buildah/pkg/parse" + "github.com/containers/buildah/pkg/rusage" + "github.com/containers/buildah/util" + config "github.com/containers/common/pkg/config" + cp "github.com/containers/image/v5/copy" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/manifest" + is "github.com/containers/image/v5/storage" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" + "github.com/containers/storage" + "github.com/containers/storage/pkg/chrootarchive" + docker "github.com/fsouza/go-dockerclient" + digest "github.com/opencontainers/go-digest" + v1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/opencontainers/runtime-spec/specs-go" + "github.com/openshift/imagebuilder" + "github.com/openshift/imagebuilder/dockerfile/parser" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// StageExecutor bundles up what we need to know when executing one stage of a +// (possibly multi-stage) build. +// Each stage may need to produce an image to be used as the base in a later +// stage (with the last stage's image being the end product of the build), and +// it may need to leave its working container in place so that the container's +// root filesystem's contents can be used as the source for a COPY instruction +// in a later stage. +// Each stage has its own base image, so it starts with its own configuration +// and set of volumes. +// If we're naming the result of the build, only the last stage will apply that +// name to the image that it produces. +type StageExecutor struct { + ctx context.Context + executor *Executor + log func(format string, args ...interface{}) + index int + stages imagebuilder.Stages + name string + builder *buildah.Builder + preserved int + volumes imagebuilder.VolumeSet + volumeCache map[string]string + volumeCacheInfo map[string]os.FileInfo + mountPoint string + output string + containerIDs []string + stage *imagebuilder.Stage + argsFromContainerfile []string +} + +// Preserve informs the stage executor that from this point on, it needs to +// ensure that only COPY and ADD instructions can modify the contents of this +// directory or anything below it. +// The StageExecutor handles this by caching the contents of directories which +// have been marked this way before executing a RUN instruction, invalidating +// that cache when an ADD or COPY instruction sets any location under the +// directory as the destination, and using the cache to reset the contents of +// the directory tree after processing each RUN instruction. +// It would be simpler if we could just mark the directory as a read-only bind +// mount of itself during Run(), but the directory is expected to be remain +// writeable while the RUN instruction is being handled, even if any changes +// made within the directory are ultimately discarded. +func (s *StageExecutor) Preserve(path string) error { + logrus.Debugf("PRESERVE %q", path) + if s.volumes.Covers(path) { + // This path is already a subdirectory of a volume path that + // we're already preserving, so there's nothing new to be done + // except ensure that it exists. + createdDirPerms := os.FileMode(0755) + if err := copier.Mkdir(s.mountPoint, filepath.Join(s.mountPoint, path), copier.MkdirOptions{ChmodNew: &createdDirPerms}); err != nil { + return errors.Wrapf(err, "error ensuring volume path exists") + } + if err := s.volumeCacheInvalidate(path); err != nil { + return errors.Wrapf(err, "error ensuring volume path %q is preserved", filepath.Join(s.mountPoint, path)) + } + return nil + } + // Figure out where the cache for this volume would be stored. + s.preserved++ + cacheDir, err := s.executor.store.ContainerDirectory(s.builder.ContainerID) + if err != nil { + return errors.Errorf("unable to locate temporary directory for container") + } + cacheFile := filepath.Join(cacheDir, fmt.Sprintf("volume%d.tar", s.preserved)) + // Save info about the top level of the location that we'll be archiving. + var archivedPath string + + // Try and resolve the symlink (if one exists) + // Set archivedPath and path based on whether a symlink is found or not + if evaluated, err := copier.Eval(s.mountPoint, filepath.Join(s.mountPoint, path), copier.EvalOptions{}); err == nil { + symLink, err := filepath.Rel(s.mountPoint, evaluated) + if err != nil { + return errors.Wrapf(err, "making evaluated path %q relative to %q", evaluated, s.mountPoint) + } + if strings.HasPrefix(symLink, ".."+string(os.PathSeparator)) { + return errors.Errorf("evaluated path %q was not below %q", evaluated, s.mountPoint) + } + archivedPath = evaluated + path = string(os.PathSeparator) + symLink + } else { + return errors.Wrapf(err, "error evaluating path %q", path) + } + + st, err := os.Stat(archivedPath) + if os.IsNotExist(err) { + createdDirPerms := os.FileMode(0755) + if err = copier.Mkdir(s.mountPoint, archivedPath, copier.MkdirOptions{ChmodNew: &createdDirPerms}); err != nil { + return errors.Wrapf(err, "error ensuring volume path exists") + } + st, err = os.Stat(archivedPath) + } + if err != nil { + logrus.Debugf("error reading info about %q: %v", archivedPath, err) + return err + } + s.volumeCacheInfo[path] = st + if !s.volumes.Add(path) { + // This path is not a subdirectory of a volume path that we're + // already preserving, so adding it to the list should work. + return errors.Errorf("error adding %q to the volume cache", path) + } + s.volumeCache[path] = cacheFile + // Now prune cache files for volumes that are now supplanted by this one. + removed := []string{} + for cachedPath := range s.volumeCache { + // Walk our list of cached volumes, and check that they're + // still in the list of locations that we need to cache. + found := false + for _, volume := range s.volumes { + if volume == cachedPath { + // We need to keep this volume's cache. + found = true + break + } + } + if !found { + // We don't need to keep this volume's cache. Make a + // note to remove it. + removed = append(removed, cachedPath) + } + } + // Actually remove the caches that we decided to remove. + for _, cachedPath := range removed { + archivedPath := filepath.Join(s.mountPoint, cachedPath) + logrus.Debugf("no longer need cache of %q in %q", archivedPath, s.volumeCache[cachedPath]) + if err := os.Remove(s.volumeCache[cachedPath]); err != nil { + if os.IsNotExist(err) { + continue + } + return err + } + delete(s.volumeCache, cachedPath) + } + return nil +} + +// Remove any volume cache item which will need to be re-saved because we're +// writing to part of it. +func (s *StageExecutor) volumeCacheInvalidate(path string) error { + invalidated := []string{} + for cachedPath := range s.volumeCache { + if strings.HasPrefix(path, cachedPath+string(os.PathSeparator)) { + invalidated = append(invalidated, cachedPath) + } + } + for _, cachedPath := range invalidated { + if err := os.Remove(s.volumeCache[cachedPath]); err != nil { + if os.IsNotExist(err) { + continue + } + return err + } + archivedPath := filepath.Join(s.mountPoint, cachedPath) + logrus.Debugf("invalidated volume cache %q for %q from %q", archivedPath, path, s.volumeCache[cachedPath]) + } + return nil +} + +// Save the contents of each of the executor's list of volumes for which we +// don't already have a cache file. +func (s *StageExecutor) volumeCacheSaveVFS() (mounts []specs.Mount, err error) { + for cachedPath, cacheFile := range s.volumeCache { + archivedPath, err := copier.Eval(s.mountPoint, filepath.Join(s.mountPoint, cachedPath), copier.EvalOptions{}) + if err != nil { + return nil, errors.Wrapf(err, "error evaluating volume path") + } + relativePath, err := filepath.Rel(s.mountPoint, archivedPath) + if err != nil { + return nil, errors.Wrapf(err, "error converting %q into a path relative to %q", archivedPath, s.mountPoint) + } + if strings.HasPrefix(relativePath, ".."+string(os.PathSeparator)) { + return nil, errors.Errorf("error converting %q into a path relative to %q", archivedPath, s.mountPoint) + } + _, err = os.Stat(cacheFile) + if err == nil { + logrus.Debugf("contents of volume %q are already cached in %q", archivedPath, cacheFile) + continue + } + if !os.IsNotExist(err) { + return nil, err + } + createdDirPerms := os.FileMode(0755) + if err := copier.Mkdir(s.mountPoint, archivedPath, copier.MkdirOptions{ChmodNew: &createdDirPerms}); err != nil { + return nil, errors.Wrapf(err, "error ensuring volume path exists") + } + logrus.Debugf("caching contents of volume %q in %q", archivedPath, cacheFile) + cache, err := os.Create(cacheFile) + if err != nil { + return nil, err + } + defer cache.Close() + rc, err := chrootarchive.Tar(archivedPath, nil, s.mountPoint) + if err != nil { + return nil, errors.Wrapf(err, "error archiving %q", archivedPath) + } + defer rc.Close() + _, err = io.Copy(cache, rc) + if err != nil { + return nil, errors.Wrapf(err, "error archiving %q to %q", archivedPath, cacheFile) + } + mount := specs.Mount{ + Source: archivedPath, + Destination: string(os.PathSeparator) + relativePath, + Type: "bind", + Options: []string{"private"}, + } + mounts = append(mounts, mount) + } + return nil, nil +} + +// Restore the contents of each of the executor's list of volumes. +func (s *StageExecutor) volumeCacheRestoreVFS() (err error) { + for cachedPath, cacheFile := range s.volumeCache { + archivedPath, err := copier.Eval(s.mountPoint, filepath.Join(s.mountPoint, cachedPath), copier.EvalOptions{}) + if err != nil { + return errors.Wrapf(err, "error evaluating volume path") + } + logrus.Debugf("restoring contents of volume %q from %q", archivedPath, cacheFile) + cache, err := os.Open(cacheFile) + if err != nil { + return err + } + defer cache.Close() + if err := copier.Remove(s.mountPoint, archivedPath, copier.RemoveOptions{All: true}); err != nil { + return err + } + createdDirPerms := os.FileMode(0755) + if err := copier.Mkdir(s.mountPoint, archivedPath, copier.MkdirOptions{ChmodNew: &createdDirPerms}); err != nil { + return err + } + err = chrootarchive.Untar(cache, archivedPath, nil) + if err != nil { + return errors.Wrapf(err, "error extracting archive at %q", archivedPath) + } + if st, ok := s.volumeCacheInfo[cachedPath]; ok { + if err := os.Chmod(archivedPath, st.Mode()); err != nil { + return err + } + uid := 0 + gid := 0 + if st.Sys() != nil { + uid = util.UID(st) + gid = util.GID(st) + } + if err := os.Chown(archivedPath, uid, gid); err != nil { + return err + } + if err := os.Chtimes(archivedPath, st.ModTime(), st.ModTime()); err != nil { + return err + } + } + } + return nil +} + +// Save the contents of each of the executor's list of volumes for which we +// don't already have a cache file. +func (s *StageExecutor) volumeCacheSaveOverlay() (mounts []specs.Mount, err error) { + for cachedPath := range s.volumeCache { + err = copier.Mkdir(s.mountPoint, filepath.Join(s.mountPoint, cachedPath), copier.MkdirOptions{}) + if err != nil { + return nil, errors.Wrapf(err, "ensuring volume exists") + } + volumePath := filepath.Join(s.mountPoint, cachedPath) + mount := specs.Mount{ + Source: volumePath, + Destination: cachedPath, + Options: []string{"O", "private"}, + } + mounts = append(mounts, mount) + } + return mounts, nil +} + +// Reset the contents of each of the executor's list of volumes. +func (s *StageExecutor) volumeCacheRestoreOverlay() error { + return nil +} + +// Save the contents of each of the executor's list of volumes for which we +// don't already have a cache file. +func (s *StageExecutor) volumeCacheSave() (mounts []specs.Mount, err error) { + switch s.executor.store.GraphDriverName() { + case "overlay": + return s.volumeCacheSaveOverlay() + } + return s.volumeCacheSaveVFS() +} + +// Reset the contents of each of the executor's list of volumes. +func (s *StageExecutor) volumeCacheRestore() error { + switch s.executor.store.GraphDriverName() { + case "overlay": + return s.volumeCacheRestoreOverlay() + } + return s.volumeCacheRestoreVFS() +} + +// Copy copies data into the working tree. The "Download" field is how +// imagebuilder tells us the instruction was "ADD" and not "COPY". +func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) error { + s.builder.ContentDigester.Restart() + for _, copy := range copies { + if copy.Download { + logrus.Debugf("ADD %#v, %#v", excludes, copy) + } else { + logrus.Debugf("COPY %#v, %#v", excludes, copy) + } + if err := s.volumeCacheInvalidate(copy.Dest); err != nil { + return err + } + var sources []string + // The From field says to read the content from another + // container. Update the ID mappings and + // all-content-comes-from-below-this-directory value. + var idMappingOptions *define.IDMappingOptions + var copyExcludes []string + stripSetuid := false + stripSetgid := false + preserveOwnership := false + contextDir := s.executor.contextDir + if len(copy.From) > 0 { + // If from has an argument within it, resolve it to its + // value. Otherwise just return the value found. + from, fromErr := imagebuilder.ProcessWord(copy.From, s.stage.Builder.Arguments()) + if fromErr != nil { + return errors.Wrapf(fromErr, "unable to resolve argument %q", copy.From) + } + if isStage, err := s.executor.waitForStage(s.ctx, from, s.stages[:s.index]); isStage && err != nil { + return err + } + if other, ok := s.executor.stages[from]; ok && other.index < s.index { + contextDir = other.mountPoint + idMappingOptions = &other.builder.IDMappingOptions + } else if builder, ok := s.executor.containerMap[copy.From]; ok { + contextDir = builder.MountPoint + idMappingOptions = &builder.IDMappingOptions + } else { + return errors.Errorf("the stage %q has not been built", copy.From) + } + preserveOwnership = true + copyExcludes = excludes + } else { + copyExcludes = append(s.executor.excludes, excludes...) + stripSetuid = true // did this change between 18.06 and 19.03? + stripSetgid = true // did this change between 18.06 and 19.03? + } + for _, src := range copy.Src { + if strings.HasPrefix(src, "http://") || strings.HasPrefix(src, "https://") { + // Source is a URL, allowed for ADD but not COPY. + if copy.Download { + sources = append(sources, src) + } else { + // returns an error to be compatible with docker + return errors.Errorf("source can't be a URL for COPY") + } + } else { + sources = append(sources, filepath.Join(contextDir, src)) + } + } + options := buildah.AddAndCopyOptions{ + Chmod: copy.Chmod, + Chown: copy.Chown, + PreserveOwnership: preserveOwnership, + ContextDir: contextDir, + Excludes: copyExcludes, + IgnoreFile: s.executor.ignoreFile, + IDMappingOptions: idMappingOptions, + StripSetuidBit: stripSetuid, + StripSetgidBit: stripSetgid, + } + if err := s.builder.Add(copy.Dest, copy.Download, options, sources...); err != nil { + return err + } + } + return nil +} + +// Returns a map of StageName/ImageName:internal.StageMountDetails for RunOpts if any --mount with from is provided +// Stage can automatically cleanup this mounts when a stage is removed +// check if RUN contains `--mount` with `from`. If yes pre-mount images or stages from executor for Run. +// stages mounted here will we used be Run(). +func (s *StageExecutor) runStageMountPoints(mountList []string) (map[string]internal.StageMountDetails, error) { + stageMountPoints := make(map[string]internal.StageMountDetails) + for _, flag := range mountList { + if strings.Contains(flag, "from") { + arr := strings.SplitN(flag, ",", 2) + if len(arr) < 2 { + return nil, errors.Errorf("Invalid --mount command: %s", flag) + } + tokens := strings.Split(arr[1], ",") + for _, val := range tokens { + kv := strings.SplitN(val, "=", 2) + switch kv[0] { + case "from": + if len(kv) == 1 { + return nil, errors.Errorf("unable to resolve argument for `from=`: bad argument") + } + if kv[1] == "" { + return nil, errors.Errorf("unable to resolve argument for `from=`: from points to an empty value") + } + from, fromErr := imagebuilder.ProcessWord(kv[1], s.stage.Builder.Arguments()) + if fromErr != nil { + return nil, errors.Wrapf(fromErr, "unable to resolve argument %q", kv[1]) + } + // If the source's name corresponds to the + // result of an earlier stage, wait for that + // stage to finish being built. + if isStage, err := s.executor.waitForStage(s.ctx, from, s.stages[:s.index]); isStage && err != nil { + return nil, err + } + if otherStage, ok := s.executor.stages[from]; ok && otherStage.index < s.index { + stageMountPoints[from] = internal.StageMountDetails{IsStage: true, MountPoint: otherStage.mountPoint} + break + } else { + mountPoint, err := s.getImageRootfs(s.ctx, from) + if err != nil { + return nil, errors.Errorf("%s from=%s: no stage or image found with that name", flag, from) + } + stageMountPoints[from] = internal.StageMountDetails{IsStage: false, MountPoint: mountPoint} + break + } + default: + continue + } + } + } + } + return stageMountPoints, nil +} + +// Run executes a RUN instruction using the stage's current working container +// as a root directory. +func (s *StageExecutor) Run(run imagebuilder.Run, config docker.Config) error { + logrus.Debugf("RUN %#v, %#v", run, config) + stageMountPoints, err := s.runStageMountPoints(run.Mounts) + if err != nil { + return err + } + if s.builder == nil { + return errors.Errorf("no build container available") + } + stdin := s.executor.in + if stdin == nil { + devNull, err := os.Open(os.DevNull) + if err != nil { + return errors.Errorf("error opening %q for reading: %v", os.DevNull, err) + } + defer devNull.Close() + stdin = devNull + } + options := buildah.RunOptions{ + Logger: s.executor.logger, + Hostname: config.Hostname, + Runtime: s.executor.runtime, + Args: s.executor.runtimeArgs, + NoPivot: os.Getenv("BUILDAH_NOPIVOT") != "", + Mounts: append([]Mount{}, s.executor.transientMounts...), + Env: config.Env, + User: config.User, + WorkingDir: config.WorkingDir, + Entrypoint: config.Entrypoint, + ContextDir: s.executor.contextDir, + Cmd: config.Cmd, + Stdin: stdin, + Stdout: s.executor.out, + Stderr: s.executor.err, + Quiet: s.executor.quiet, + NamespaceOptions: s.executor.namespaceOptions, + Terminal: buildah.WithoutTerminal, + Secrets: s.executor.secrets, + SSHSources: s.executor.sshsources, + RunMounts: run.Mounts, + StageMountPoints: stageMountPoints, + SystemContext: s.executor.systemContext, + } + if config.NetworkDisabled { + options.ConfigureNetwork = buildah.NetworkDisabled + } else { + options.ConfigureNetwork = buildah.NetworkEnabled + } + + args := run.Args + if run.Shell { + if len(config.Shell) > 0 && s.builder.Format == define.Dockerv2ImageManifest { + args = append(config.Shell, args...) + } else { + args = append([]string{"/bin/sh", "-c"}, args...) + } + } + mounts, err := s.volumeCacheSave() + if err != nil { + return err + } + options.Mounts = append(options.Mounts, mounts...) + err = s.builder.Run(args, options) + if err2 := s.volumeCacheRestore(); err2 != nil { + if err == nil { + return err2 + } + } + return err +} + +// UnrecognizedInstruction is called when we encounter an instruction that the +// imagebuilder parser didn't understand. +func (s *StageExecutor) UnrecognizedInstruction(step *imagebuilder.Step) error { + errStr := fmt.Sprintf("Build error: Unknown instruction: %q ", strings.ToUpper(step.Command)) + err := fmt.Sprintf(errStr+"%#v", step) + if s.executor.ignoreUnrecognizedInstructions { + logrus.Debugf(err) + return nil + } + + switch logrus.GetLevel() { + case logrus.ErrorLevel: + s.executor.logger.Errorf(errStr) + case logrus.DebugLevel: + logrus.Debugf(err) + default: + s.executor.logger.Errorf("+(UNHANDLED LOGLEVEL) %#v", step) + } + + return errors.Errorf(err) +} + +// prepare creates a working container based on the specified image, or if one +// isn't specified, the first argument passed to the first FROM instruction we +// can find in the stage's parsed tree. +func (s *StageExecutor) prepare(ctx context.Context, from string, initializeIBConfig, rebase bool, pullPolicy define.PullPolicy) (builder *buildah.Builder, err error) { + stage := s.stage + ib := stage.Builder + node := stage.Node + + if from == "" { + base, err := ib.From(node) + if err != nil { + logrus.Debugf("prepare(node.Children=%#v)", node.Children) + return nil, errors.Wrapf(err, "error determining starting point for build") + } + from = base + } + displayFrom := from + + // stage.Name will be a numeric string for all stages without an "AS" clause + asImageName := stage.Name + if asImageName != "" { + if _, err := strconv.Atoi(asImageName); err != nil { + displayFrom = from + " AS " + asImageName + } + } + + if initializeIBConfig && rebase { + logrus.Debugf("FROM %#v", displayFrom) + if !s.executor.quiet { + s.log("FROM %s", displayFrom) + } + } + + builderSystemContext := s.executor.systemContext + // get platform string from stage + if stage.Builder.Platform != "" { + os, arch, variant, err := parse.Platform(stage.Builder.Platform) + if err != nil { + return nil, errors.Wrapf(err, "unable to parse platform %q", stage.Builder.Platform) + } + if arch != "" || variant != "" { + builderSystemContext.ArchitectureChoice = arch + builderSystemContext.VariantChoice = variant + } + if os != "" { + builderSystemContext.OSChoice = os + } + } + + builderOptions := buildah.BuilderOptions{ + Args: ib.Args, + FromImage: from, + PullPolicy: pullPolicy, + ContainerSuffix: s.executor.containerSuffix, + Registry: s.executor.registry, + BlobDirectory: s.executor.blobDirectory, + SignaturePolicyPath: s.executor.signaturePolicyPath, + ReportWriter: s.executor.reportWriter, + SystemContext: builderSystemContext, + Isolation: s.executor.isolation, + NamespaceOptions: s.executor.namespaceOptions, + ConfigureNetwork: s.executor.configureNetwork, + CNIPluginPath: s.executor.cniPluginPath, + CNIConfigDir: s.executor.cniConfigDir, + NetworkInterface: s.executor.networkInterface, + IDMappingOptions: s.executor.idmappingOptions, + CommonBuildOpts: s.executor.commonBuildOptions, + DefaultMountsFilePath: s.executor.defaultMountsFilePath, + Format: s.executor.outputFormat, + Capabilities: s.executor.capabilities, + Devices: s.executor.devices, + MaxPullRetries: s.executor.maxPullPushRetries, + PullRetryDelay: s.executor.retryPullPushDelay, + OciDecryptConfig: s.executor.ociDecryptConfig, + Logger: s.executor.logger, + ProcessLabel: s.executor.processLabel, + MountLabel: s.executor.mountLabel, + } + + builder, err = buildah.NewBuilder(ctx, s.executor.store, builderOptions) + if err != nil { + return nil, errors.Wrapf(err, "error creating build container") + } + + // If executor's ProcessLabel and MountLabel is empty means this is the first stage + // Make sure we share first stage's ProcessLabel and MountLabel with all other subsequent stages + // Doing this will ensure and one stage in same build can mount another stage even if `selinux` + // is enabled. + + if s.executor.mountLabel == "" && s.executor.processLabel == "" { + s.executor.mountLabel = builder.MountLabel + s.executor.processLabel = builder.ProcessLabel + } + + if initializeIBConfig { + volumes := map[string]struct{}{} + for _, v := range builder.Volumes() { + volumes[v] = struct{}{} + } + ports := map[docker.Port]struct{}{} + for _, p := range builder.Ports() { + ports[docker.Port(p)] = struct{}{} + } + dConfig := docker.Config{ + Hostname: builder.Hostname(), + Domainname: builder.Domainname(), + User: builder.User(), + Env: builder.Env(), + Cmd: builder.Cmd(), + Image: from, + Volumes: volumes, + WorkingDir: builder.WorkDir(), + Entrypoint: builder.Entrypoint(), + Labels: builder.Labels(), + Shell: builder.Shell(), + StopSignal: builder.StopSignal(), + OnBuild: builder.OnBuild(), + ExposedPorts: ports, + } + var rootfs *docker.RootFS + if builder.Docker.RootFS != nil { + rootfs = &docker.RootFS{ + Type: builder.Docker.RootFS.Type, + } + for _, id := range builder.Docker.RootFS.DiffIDs { + rootfs.Layers = append(rootfs.Layers, id.String()) + } + } + dImage := docker.Image{ + Parent: builder.FromImage, + ContainerConfig: dConfig, + Container: builder.Container, + Author: builder.Maintainer(), + Architecture: builder.Architecture(), + RootFS: rootfs, + } + dImage.Config = &dImage.ContainerConfig + err = ib.FromImage(&dImage, node) + if err != nil { + if err2 := builder.Delete(); err2 != nil { + logrus.Debugf("error deleting container which we failed to update: %v", err2) + } + return nil, errors.Wrapf(err, "error updating build context") + } + } + mountPoint, err := builder.Mount(builder.MountLabel) + if err != nil { + if err2 := builder.Delete(); err2 != nil { + logrus.Debugf("error deleting container which we failed to mount: %v", err2) + } + return nil, errors.Wrapf(err, "error mounting new container") + } + if rebase { + // Make this our "current" working container. + s.mountPoint = mountPoint + s.builder = builder + } + logrus.Debugln("Container ID:", builder.ContainerID) + return builder, nil +} + +// Delete deletes the stage's working container, if we have one. +func (s *StageExecutor) Delete() (err error) { + if s.builder != nil { + err = s.builder.Delete() + s.builder = nil + } + return err +} + +// stepRequiresLayer indicates whether or not the step should be followed by +// committing a layer container when creating an intermediate image. +func (*StageExecutor) stepRequiresLayer(step *imagebuilder.Step) bool { + switch strings.ToUpper(step.Command) { + case "ADD", "COPY", "RUN": + return true + } + return false +} + +// getImageRootfs checks for an image matching the passed-in name in local +// storage. If it isn't found, it pulls down a copy. Then, if we don't have a +// working container root filesystem based on the image, it creates one. Then +// it returns that root filesystem's location. +func (s *StageExecutor) getImageRootfs(ctx context.Context, image string) (mountPoint string, err error) { + if builder, ok := s.executor.containerMap[image]; ok { + return builder.MountPoint, nil + } + builder, err := s.prepare(ctx, image, false, false, s.executor.pullPolicy) + if err != nil { + return "", err + } + s.executor.containerMap[image] = builder + return builder.MountPoint, nil +} + +// Execute runs each of the steps in the stage's parsed tree, in turn. +func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, ref reference.Canonical, err error) { + var resourceUsage rusage.Rusage + stage := s.stage + ib := stage.Builder + checkForLayers := s.executor.layers && s.executor.useCache + moreStages := s.index < len(s.stages)-1 + lastStage := !moreStages + imageIsUsedLater := moreStages && (s.executor.baseMap[stage.Name] || s.executor.baseMap[strconv.Itoa(stage.Position)]) + rootfsIsUsedLater := moreStages && (s.executor.rootfsMap[stage.Name] || s.executor.rootfsMap[strconv.Itoa(stage.Position)]) + + // If the base image's name corresponds to the result of an earlier + // stage, make sure that stage has finished building an image, and + // substitute that image's ID for the base image's name here and force + // the pull policy to "never" to avoid triggering an error when it's + // set to "always", which doesn't make sense for image IDs. + // If not, then go on assuming that it's just a regular image that's + // either in local storage, or one that we have to pull from a + // registry, subject to the passed-in pull policy. + if isStage, err := s.executor.waitForStage(ctx, base, s.stages[:s.index]); isStage && err != nil { + return "", nil, err + } + pullPolicy := s.executor.pullPolicy + s.executor.stagesLock.Lock() + if stageImage, isPreviousStage := s.executor.imageMap[base]; isPreviousStage { + base = stageImage + pullPolicy = define.PullNever + } + s.executor.stagesLock.Unlock() + + // Set things up so that we can log resource usage as we go. + logRusage := func() { + if rusage.Supported() { + usage, err := rusage.Get() + if err != nil { + fmt.Fprintf(s.executor.out, "error gathering resource usage information: %v\n", err) + return + } + if s.executor.rusageLogFile != nil { + fmt.Fprintf(s.executor.rusageLogFile, "%s\n", rusage.FormatDiff(usage.Subtract(resourceUsage))) + } + resourceUsage = usage + } + } + + // Start counting resource usage before we potentially pull a base image. + if rusage.Supported() { + if resourceUsage, err = rusage.Get(); err != nil { + return "", nil, err + } + // Log the final incremental resource usage counter before we return. + defer logRusage() + } + + // Create the (first) working container for this stage. Reinitializing + // the imagebuilder configuration may alter the list of steps we have, + // so take a snapshot of them *after* that. + if _, err := s.prepare(ctx, base, true, true, pullPolicy); err != nil { + return "", nil, err + } + children := stage.Node.Children + + // A helper function to only log "COMMIT" as an explicit step if it's + // the very last step of a (possibly multi-stage) build. + logCommit := func(output string, instruction int) { + moreInstructions := instruction < len(children)-1 + if moreInstructions || moreStages { + return + } + commitMessage := "COMMIT" + if output != "" { + commitMessage = fmt.Sprintf("%s %s", commitMessage, output) + } + logrus.Debugf(commitMessage) + if !s.executor.quiet { + s.log(commitMessage) + } + } + logCacheHit := func(cacheID string) { + if !s.executor.quiet { + cacheHitMessage := "--> Using cache" + fmt.Fprintf(s.executor.out, "%s %s\n", cacheHitMessage, cacheID) + } + } + logImageID := func(imgID string) { + if len(imgID) > 11 { + imgID = imgID[0:11] + } + if s.executor.iidfile == "" { + fmt.Fprintf(s.executor.out, "--> %s\n", imgID) + } + } + + if len(children) == 0 { + // There are no steps. + if s.builder.FromImageID == "" || s.executor.squash { + // We either don't have a base image, or we need to + // squash the contents of the base image. Whichever is + // the case, we need to commit() to create a new image. + logCommit(s.output, -1) + if imgID, ref, err = s.commit(ctx, s.getCreatedBy(nil, ""), false, s.output); err != nil { + return "", nil, errors.Wrapf(err, "error committing base container") + } + } else if len(s.executor.labels) > 0 || len(s.executor.annotations) > 0 { + // The image would be modified by the labels passed + // via the command line, so we need to commit. + logCommit(s.output, -1) + if imgID, ref, err = s.commit(ctx, s.getCreatedBy(stage.Node, ""), true, s.output); err != nil { + return "", nil, err + } + } else { + // We don't need to squash the base image, and the + // image wouldn't be modified by the command line + // options, so just reuse the base image. + logCommit(s.output, -1) + if imgID, ref, err = s.tagExistingImage(ctx, s.builder.FromImageID, s.output); err != nil { + return "", nil, err + } + } + logImageID(imgID) + } + + for i, node := range children { + logRusage() + moreInstructions := i < len(children)-1 + lastInstruction := !moreInstructions + // Resolve any arguments in this instruction. + step := ib.Step() + if err := step.Resolve(node); err != nil { + return "", nil, errors.Wrapf(err, "error resolving step %+v", *node) + } + logrus.Debugf("Parsed Step: %+v", *step) + if !s.executor.quiet { + s.log("%s", step.Original) + } + + // Check if there's a --from if the step command is COPY. + // Also check the chmod and the chown flags for validity. + for _, flag := range step.Flags { + command := strings.ToUpper(step.Command) + // chmod, chown and from flags should have an '=' sign, '--chmod=', '--chown=' or '--from=' + if command == "COPY" && (flag == "--chmod" || flag == "--chown" || flag == "--from") { + return "", nil, errors.Errorf("COPY only supports the --chmod= --chown= and the --from= flags") + } + if command == "ADD" && (flag == "--chmod" || flag == "--chown") { + return "", nil, errors.Errorf("ADD only supports the --chmod= and the --chown= flags") + } + if strings.Contains(flag, "--from") && command == "COPY" { + arr := strings.Split(flag, "=") + if len(arr) != 2 { + return "", nil, errors.Errorf("%s: invalid --from flag, should be --from=", command) + } + // If arr[1] has an argument within it, resolve it to its + // value. Otherwise just return the value found. + from, fromErr := imagebuilder.ProcessWord(arr[1], s.stage.Builder.Arguments()) + if fromErr != nil { + return "", nil, errors.Wrapf(fromErr, "unable to resolve argument %q", arr[1]) + } + // If the source's name corresponds to the + // result of an earlier stage, wait for that + // stage to finish being built. + if isStage, err := s.executor.waitForStage(ctx, from, s.stages[:s.index]); isStage && err != nil { + return "", nil, err + } + if otherStage, ok := s.executor.stages[from]; ok && otherStage.index < s.index { + break + } else if _, err = s.getImageRootfs(ctx, from); err != nil { + return "", nil, errors.Errorf("%s --from=%s: no stage or image found with that name", command, from) + } + break + } + } + + // Determine if there are any RUN instructions to be run after + // this step. If not, we won't have to bother preserving the + // contents of any volumes declared between now and when we + // finish. + noRunsRemaining := false + if moreInstructions { + noRunsRemaining = !ib.RequiresStart(&parser.Node{Children: children[i+1:]}) + } + + // If we're doing a single-layer build, just process the + // instruction. + if !s.executor.layers { + err := ib.Run(step, s, noRunsRemaining) + if err != nil { + logrus.Debugf("%v", errors.Wrapf(err, "error building at step %+v", *step)) + return "", nil, errors.Wrapf(err, "error building at STEP \"%s\"", step.Message) + } + // In case we added content, retrieve its digest. + addedContentType, addedContentDigest := s.builder.ContentDigester.Digest() + addedContentSummary := addedContentType + if addedContentDigest != "" { + if addedContentSummary != "" { + addedContentSummary = addedContentSummary + ":" + } + addedContentSummary = addedContentSummary + addedContentDigest.Encoded() + logrus.Debugf("added content %s", addedContentSummary) + } + if moreInstructions { + // There are still more instructions to process + // for this stage. Make a note of the + // instruction in the history that we'll write + // for the image when we eventually commit it. + timestamp := time.Now().UTC() + if s.executor.timestamp != nil { + timestamp = *s.executor.timestamp + } + s.builder.AddPrependedEmptyLayer(×tamp, s.getCreatedBy(node, addedContentSummary), "", "") + continue + } else { + // This is the last instruction for this stage, + // so we should commit this container to create + // an image, but only if it's the last stage, + // or if it's used as the basis for a later + // stage. + if lastStage || imageIsUsedLater { + logCommit(s.output, i) + imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), false, s.output) + if err != nil { + return "", nil, errors.Wrapf(err, "error committing container for step %+v", *step) + } + logImageID(imgID) + } else { + imgID = "" + } + break + } + } + + // We're in a multi-layered build. + var ( + commitName string + cacheID string + err error + rebase bool + addedContentSummary string + ) + + // If we have to commit for this instruction, only assign the + // stage's configured output name to the last layer. + if lastInstruction { + commitName = s.output + } + + // Check if there's already an image based on our parent that + // has the same change that we're about to make, so far as we + // can tell. + // Only do this if the step we are on is not an ARG step, + // we need to call ib.Run() to correctly put the args together before + // determining if a cached layer with the same build args already exists + // and that is done in the if block below. + if checkForLayers && step.Command != "arg" { + cacheID, err = s.intermediateImageExists(ctx, node, addedContentSummary, s.stepRequiresLayer(step)) + if err != nil { + return "", nil, errors.Wrap(err, "error checking if cached image exists from a previous build") + } + } + + // If we didn't find a cache entry, or we need to add content + // to find the digest of the content to check for a cached + // image, run the step so that we can check if the result + // matches a cache. + if cacheID == "" { + // Process the instruction directly. + if err = ib.Run(step, s, noRunsRemaining); err != nil { + logrus.Debugf("%v", errors.Wrapf(err, "error building at step %+v", *step)) + return "", nil, errors.Wrapf(err, "error building at STEP \"%s\"", step.Message) + } + + // In case we added content, retrieve its digest. + addedContentType, addedContentDigest := s.builder.ContentDigester.Digest() + addedContentSummary = addedContentType + if addedContentDigest != "" { + if addedContentSummary != "" { + addedContentSummary = addedContentSummary + ":" + } + addedContentSummary = addedContentSummary + addedContentDigest.Encoded() + logrus.Debugf("added content %s", addedContentSummary) + } + + // Check if there's already an image based on our parent that + // has the same change that we just made. + if checkForLayers { + cacheID, err = s.intermediateImageExists(ctx, node, addedContentSummary, s.stepRequiresLayer(step)) + if err != nil { + return "", nil, errors.Wrap(err, "error checking if cached image exists from a previous build") + } + } + } else { + // If the instruction would affect our configuration, + // process the configuration change so that, if we fall + // off the cache path, the filesystem changes from the + // last cache image will be all that we need, since we + // still don't want to restart using the image's + // configuration blob. + if !s.stepRequiresLayer(step) { + err := ib.Run(step, s, noRunsRemaining) + if err != nil { + logrus.Debugf("%v", errors.Wrapf(err, "error building at step %+v", *step)) + return "", nil, errors.Wrapf(err, "error building at STEP \"%s\"", step.Message) + } + } + } + + // We want to save history for other layers during a squashed build. + // Toggle flag allows executor to treat other instruction and layers + // as regular builds and only perform squashing at last + squashToggle := false + // Note: If the build has squash, we must try to re-use as many layers as possible if cache is found. + // So only perform commit if its the lastInstruction of lastStage. + if cacheID != "" { + logCacheHit(cacheID) + // A suitable cached image was found, so we can just + // reuse it. If we need to add a name to the resulting + // image because it's the last step in this stage, add + // the name to the image. + imgID = cacheID + if commitName != "" { + logCommit(commitName, i) + if imgID, ref, err = s.tagExistingImage(ctx, cacheID, commitName); err != nil { + return "", nil, err + } + } + } else { + if s.executor.squash { + // We want to save history for other layers during a squashed build. + // squashToggle flag allows executor to treat other instruction and layers + // as regular builds and only perform squashing at last + s.executor.squash = false + squashToggle = true + } + // We're not going to find any more cache hits, so we + // can stop looking for them. + checkForLayers = false + // Create a new image, maybe with a new layer, with the + // name for this stage if it's the last instruction. + logCommit(s.output, i) + imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), !s.stepRequiresLayer(step), commitName) + if err != nil { + return "", nil, errors.Wrapf(err, "error committing container for step %+v", *step) + } + } + + // Perform final squash for this build as we are one the, + // last instruction of last stage + if (s.executor.squash || squashToggle) && lastInstruction && lastStage { + s.executor.squash = true + imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), !s.stepRequiresLayer(step), commitName) + if err != nil { + return "", nil, errors.Wrapf(err, "error committing final squash step %+v", *step) + } + } + + logImageID(imgID) + + // Update our working container to be based off of the cached + // image, if we might need to use it as a basis for the next + // instruction, or if we need the root filesystem to match the + // image contents for the sake of a later stage that wants to + // copy content from it. + rebase = moreInstructions || rootfsIsUsedLater + + if rebase { + // Since we either committed the working container or + // are about to replace it with one based on a cached + // image, add the current working container's ID to the + // list of successful intermediate containers that + // we'll clean up later. + s.containerIDs = append(s.containerIDs, s.builder.ContainerID) + + // Prepare for the next step or subsequent phases by + // creating a new working container with the + // just-committed or updated cached image as its new + // base image. + // Enforce pull "never" since we already have an image + // ID that we really should not be pulling anymore (see + // containers/podman/issues/10307). + if _, err := s.prepare(ctx, imgID, false, true, define.PullNever); err != nil { + return "", nil, errors.Wrap(err, "error preparing container for next step") + } + } + } + return imgID, ref, nil +} + +func historyEntriesEqual(base, derived v1.History) bool { + if base.CreatedBy != derived.CreatedBy { + return false + } + if base.Comment != derived.Comment { + return false + } + if base.Author != derived.Author { + return false + } + if base.EmptyLayer != derived.EmptyLayer { + return false + } + if base.Created != nil && derived.Created == nil { + return false + } + if base.Created == nil && derived.Created != nil { + return false + } + if base.Created != nil && derived.Created != nil && !base.Created.Equal(*derived.Created) { + return false + } + return true +} + +// historyAndDiffIDsMatch returns true if a candidate history matches the +// history of our base image (if we have one), plus the current instruction, +// and if the list of diff IDs for the images do for the part of the history +// that we're comparing. +// Used to verify whether a cache of the intermediate image exists and whether +// to run the build again. +func (s *StageExecutor) historyAndDiffIDsMatch(baseHistory []v1.History, baseDiffIDs []digest.Digest, child *parser.Node, history []v1.History, diffIDs []digest.Digest, addedContentSummary string, buildAddsLayer bool) bool { + // our history should be as long as the base's, plus one entry for what + // we're doing + if len(history) != len(baseHistory)+1 { + return false + } + // check that each entry in the base history corresponds to an entry in + // our history, and count how many of them add a layer diff + expectedDiffIDs := 0 + for i := range baseHistory { + if !historyEntriesEqual(baseHistory[i], history[i]) { + return false + } + if !baseHistory[i].EmptyLayer { + expectedDiffIDs++ + } + } + if len(baseDiffIDs) != expectedDiffIDs { + return false + } + if buildAddsLayer { + // we're adding a layer, so we should have exactly one more + // layer than the base image + if len(diffIDs) != expectedDiffIDs+1 { + return false + } + } else { + // we're not adding a layer, so we should have exactly the same + // layers as the base image + if len(diffIDs) != expectedDiffIDs { + return false + } + } + // compare the diffs for the layers that we should have in common + for i := range baseDiffIDs { + if diffIDs[i] != baseDiffIDs[i] { + return false + } + } + return history[len(baseHistory)].CreatedBy == s.getCreatedBy(child, addedContentSummary) +} + +// getCreatedBy returns the command the image at node will be created by. If +// the passed-in CompositeDigester is not nil, it is assumed to have the digest +// information for the content if the node is ADD or COPY. +func (s *StageExecutor) getCreatedBy(node *parser.Node, addedContentSummary string) string { + if node == nil { + return "/bin/sh" + } + switch strings.ToUpper(node.Value) { + case "ARG": + for _, variable := range strings.Fields(node.Original) { + if variable != "ARG" { + s.argsFromContainerfile = append(s.argsFromContainerfile, variable) + } + } + buildArgs := s.getBuildArgsKey() + return "/bin/sh -c #(nop) ARG " + buildArgs + case "RUN": + buildArgs := s.getBuildArgsResolvedForRun() + if buildArgs != "" { + return "|" + strconv.Itoa(len(strings.Split(buildArgs, " "))) + " " + buildArgs + " /bin/sh -c " + node.Original[4:] + } + return "/bin/sh -c " + node.Original[4:] + case "ADD", "COPY": + destination := node + for destination.Next != nil { + destination = destination.Next + } + return "/bin/sh -c #(nop) " + strings.ToUpper(node.Value) + " " + addedContentSummary + " in " + destination.Value + " " + default: + return "/bin/sh -c #(nop) " + node.Original + } +} + +// getBuildArgs returns a string of the build-args specified during the build process +// it excludes any build-args that were not used in the build process +// values for args are overridden by the values specified using ENV. +// Reason: Values from ENV will always override values specified arg. +func (s *StageExecutor) getBuildArgsResolvedForRun() string { + var envs []string + configuredEnvs := make(map[string]string) + dockerConfig := s.stage.Builder.Config() + + for _, env := range dockerConfig.Env { + splitv := strings.SplitN(env, "=", 2) + if len(splitv) == 2 { + configuredEnvs[splitv[0]] = splitv[1] + } + } + + for key, value := range s.stage.Builder.Args { + if _, ok := s.stage.Builder.AllowedArgs[key]; ok { + // if value was in image it will be given higher priority + // so please embed that into build history + _, inImage := configuredEnvs[key] + if inImage { + envs = append(envs, fmt.Sprintf("%s=%s", key, configuredEnvs[key])) + } else { + // By default everything must be added to history. + // Following variable is configured to false only for special cases. + addToHistory := true + + // Following value is being assigned from build-args, + // check if this key belongs to any of the predefined allowlist args e.g Proxy Variables + // and if that arg is not manually set in Containerfile/Dockerfile + // then don't write its value to history. + // Following behaviour ensures parity with docker/buildkit. + for _, variable := range config.ProxyEnv { + if key == variable { + // found in predefined args + // so don't add to history + // unless user did explicit `ARG ` + addToHistory = false + for _, processedArg := range s.argsFromContainerfile { + if key == processedArg { + addToHistory = true + } + } + } + } + if addToHistory { + envs = append(envs, fmt.Sprintf("%s=%s", key, value)) + } + } + } + } + sort.Strings(envs) + return strings.Join(envs, " ") +} + +// getBuildArgs key returns set args are key which were specified during the build process +// following function will be exclusively used by build history +func (s *StageExecutor) getBuildArgsKey() string { + var envs []string + for key := range s.stage.Builder.Args { + if _, ok := s.stage.Builder.AllowedArgs[key]; ok { + envs = append(envs, key) + } + } + sort.Strings(envs) + return strings.Join(envs, " ") +} + +// tagExistingImage adds names to an image already in the store +func (s *StageExecutor) tagExistingImage(ctx context.Context, cacheID, output string) (string, reference.Canonical, error) { + // If we don't need to attach a name to the image, just return the cache ID. + if output == "" { + return cacheID, nil, nil + } + + // Get the destination image reference. + dest, err := s.executor.resolveNameToImageRef(output) + if err != nil { + return "", nil, err + } + + policyContext, err := util.GetPolicyContext(s.executor.systemContext) + if err != nil { + return "", nil, err + } + defer func() { + if destroyErr := policyContext.Destroy(); destroyErr != nil { + if err == nil { + err = destroyErr + } else { + err = errors.Wrap(err, destroyErr.Error()) + } + } + }() + + // Look up the source image, expecting it to be in local storage + src, err := is.Transport.ParseStoreReference(s.executor.store, cacheID) + if err != nil { + return "", nil, errors.Wrapf(err, "error getting source imageReference for %q", cacheID) + } + options := cp.Options{ + RemoveSignatures: true, // more like "ignore signatures", since they don't get removed when src and dest are the same image + } + manifestBytes, err := cp.Image(ctx, policyContext, dest, src, &options) + if err != nil { + return "", nil, errors.Wrapf(err, "error copying image %q", cacheID) + } + manifestDigest, err := manifest.Digest(manifestBytes) + if err != nil { + return "", nil, errors.Wrapf(err, "error computing digest of manifest for image %q", cacheID) + } + img, err := is.Transport.GetStoreImage(s.executor.store, dest) + if err != nil { + return "", nil, errors.Wrapf(err, "error locating new copy of image %q (i.e., %q)", cacheID, transports.ImageName(dest)) + } + var ref reference.Canonical + if dref := dest.DockerReference(); dref != nil { + if ref, err = reference.WithDigest(dref, manifestDigest); err != nil { + return "", nil, errors.Wrapf(err, "error computing canonical reference for new image %q (i.e., %q)", cacheID, transports.ImageName(dest)) + } + } + return img.ID, ref, nil +} + +// intermediateImageExists returns true if an intermediate image of currNode exists in the image store from a previous build. +// It verifies this by checking the parent of the top layer of the image and the history. +func (s *StageExecutor) intermediateImageExists(ctx context.Context, currNode *parser.Node, addedContentDigest string, buildAddsLayer bool) (string, error) { + // Get the list of images available in the image store + images, err := s.executor.store.Images() + if err != nil { + return "", errors.Wrap(err, "error getting image list from store") + } + var baseHistory []v1.History + var baseDiffIDs []digest.Digest + if s.builder.FromImageID != "" { + _, baseHistory, baseDiffIDs, err = s.executor.getImageTypeAndHistoryAndDiffIDs(ctx, s.builder.FromImageID) + if err != nil { + return "", errors.Wrapf(err, "error getting history of base image %q", s.builder.FromImageID) + } + } + for _, image := range images { + var imageTopLayer *storage.Layer + var imageParentLayerID string + if image.TopLayer != "" { + imageTopLayer, err = s.executor.store.Layer(image.TopLayer) + if err != nil { + return "", errors.Wrapf(err, "error getting top layer info") + } + // Figure out which layer from this image we should + // compare our container's base layer to. + imageParentLayerID = imageTopLayer.ID + // If we haven't added a layer here, then our base + // layer should be the same as the image's layer. If + // did add a layer, then our base layer should be the + // same as the parent of the image's layer. + if buildAddsLayer { + imageParentLayerID = imageTopLayer.Parent + } + } + // If the parent of the top layer of an image is equal to the current build image's top layer, + // it means that this image is potentially a cached intermediate image from a previous + // build. + if s.builder.TopLayer != imageParentLayerID { + continue + } + // Next we double check that the history of this image is equivalent to the previous + // lines in the Dockerfile up till the point we are at in the build. + manifestType, history, diffIDs, err := s.executor.getImageTypeAndHistoryAndDiffIDs(ctx, image.ID) + if err != nil { + // It's possible that this image is for another architecture, which results + // in a custom-crafted error message that we'd have to use substring matching + // to recognize. Instead, ignore the image. + logrus.Debugf("error getting history of %q (%v), ignoring it", image.ID, err) + continue + } + // If this candidate isn't of the type that we're building, then it may have lost + // some format-specific information that a building-without-cache run wouldn't lose. + if manifestType != s.executor.outputFormat { + continue + } + // children + currNode is the point of the Dockerfile we are currently at. + if s.historyAndDiffIDsMatch(baseHistory, baseDiffIDs, currNode, history, diffIDs, addedContentDigest, buildAddsLayer) { + return image.ID, nil + } + } + return "", nil +} + +// commit writes the container's contents to an image, using a passed-in tag as +// the name if there is one, generating a unique ID-based one otherwise. +func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer bool, output string) (string, reference.Canonical, error) { + ib := s.stage.Builder + var imageRef types.ImageReference + if output != "" { + imageRef2, err := s.executor.resolveNameToImageRef(output) + if err != nil { + return "", nil, err + } + imageRef = imageRef2 + } + + if ib.Author != "" { + s.builder.SetMaintainer(ib.Author) + } + config := ib.Config() + if createdBy != "" { + s.builder.SetCreatedBy(createdBy) + } + s.builder.SetHostname(config.Hostname) + s.builder.SetDomainname(config.Domainname) + if s.executor.architecture != "" { + s.builder.SetArchitecture(s.executor.architecture) + } + if s.executor.os != "" { + s.builder.SetOS(s.executor.os) + } + s.builder.SetUser(config.User) + s.builder.ClearPorts() + for p := range config.ExposedPorts { + s.builder.SetPort(string(p)) + } + for _, envSpec := range config.Env { + spec := strings.SplitN(envSpec, "=", 2) + s.builder.SetEnv(spec[0], spec[1]) + } + s.builder.SetCmd(config.Cmd) + s.builder.ClearVolumes() + for v := range config.Volumes { + s.builder.AddVolume(v) + } + s.builder.ClearOnBuild() + for _, onBuildSpec := range config.OnBuild { + s.builder.SetOnBuild(onBuildSpec) + } + s.builder.SetWorkDir(config.WorkingDir) + s.builder.SetEntrypoint(config.Entrypoint) + s.builder.SetShell(config.Shell) + s.builder.SetStopSignal(config.StopSignal) + if config.Healthcheck != nil { + s.builder.SetHealthcheck(&buildahdocker.HealthConfig{ + Test: append([]string{}, config.Healthcheck.Test...), + Interval: config.Healthcheck.Interval, + Timeout: config.Healthcheck.Timeout, + StartPeriod: config.Healthcheck.StartPeriod, + Retries: config.Healthcheck.Retries, + }) + } else { + s.builder.SetHealthcheck(nil) + } + s.builder.ClearLabels() + + for k, v := range config.Labels { + s.builder.SetLabel(k, v) + } + for _, labelSpec := range s.executor.labels { + label := strings.SplitN(labelSpec, "=", 2) + if len(label) > 1 { + s.builder.SetLabel(label[0], label[1]) + } else { + s.builder.SetLabel(label[0], "") + } + } + s.builder.SetLabel(buildah.BuilderIdentityAnnotation, define.Version) + for _, annotationSpec := range s.executor.annotations { + annotation := strings.SplitN(annotationSpec, "=", 2) + if len(annotation) > 1 { + s.builder.SetAnnotation(annotation[0], annotation[1]) + } else { + s.builder.SetAnnotation(annotation[0], "") + } + } + if imageRef != nil { + logName := transports.ImageName(imageRef) + logrus.Debugf("COMMIT %q", logName) + } else { + logrus.Debugf("COMMIT") + } + writer := s.executor.reportWriter + if s.executor.layers || !s.executor.useCache { + writer = nil + } + options := buildah.CommitOptions{ + Compression: s.executor.compression, + SignaturePolicyPath: s.executor.signaturePolicyPath, + ReportWriter: writer, + PreferredManifestType: s.executor.outputFormat, + SystemContext: s.executor.systemContext, + Squash: s.executor.squash, + EmptyLayer: emptyLayer, + BlobDirectory: s.executor.blobDirectory, + SignBy: s.executor.signBy, + MaxRetries: s.executor.maxPullPushRetries, + RetryDelay: s.executor.retryPullPushDelay, + HistoryTimestamp: s.executor.timestamp, + Manifest: s.executor.manifest, + UnsetEnvs: s.executor.unsetEnvs, + } + imgID, _, manifestDigest, err := s.builder.Commit(ctx, imageRef, options) + if err != nil { + return "", nil, err + } + var ref reference.Canonical + if imageRef != nil { + if dref := imageRef.DockerReference(); dref != nil { + if ref, err = reference.WithDigest(dref, manifestDigest); err != nil { + return "", nil, errors.Wrapf(err, "error computing canonical reference for new image %q", imgID) + } + } + } + return imgID, ref, nil +} + +func (s *StageExecutor) EnsureContainerPath(path string) error { + return copier.Mkdir(s.mountPoint, filepath.Join(s.mountPoint, path), copier.MkdirOptions{}) +} diff --git a/vendor/github.com/containers/buildah/imagebuildah/util.go b/vendor/github.com/containers/buildah/imagebuildah/util.go new file mode 100644 index 00000000000..598e407a86d --- /dev/null +++ b/vendor/github.com/containers/buildah/imagebuildah/util.go @@ -0,0 +1,12 @@ +package imagebuildah + +import ( + "github.com/containers/buildah" +) + +// InitReexec is a wrapper for buildah.InitReexec(). It should be called at +// the start of main(), and if it returns true, main() should return +// successfully immediately. +func InitReexec() bool { + return buildah.InitReexec() +} diff --git a/vendor/github.com/containers/buildah/import.go b/vendor/github.com/containers/buildah/import.go new file mode 100644 index 00000000000..a4fcee47686 --- /dev/null +++ b/vendor/github.com/containers/buildah/import.go @@ -0,0 +1,174 @@ +package buildah + +import ( + "context" + + "github.com/containers/buildah/define" + "github.com/containers/buildah/docker" + "github.com/containers/buildah/util" + "github.com/containers/image/v5/image" + "github.com/containers/image/v5/manifest" + is "github.com/containers/image/v5/storage" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" + "github.com/containers/storage" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +func importBuilderDataFromImage(ctx context.Context, store storage.Store, systemContext *types.SystemContext, imageID, containerName, containerID string) (*Builder, error) { + if imageID == "" { + return nil, errors.Errorf("Internal error: imageID is empty in importBuilderDataFromImage") + } + + storeopts, err := storage.DefaultStoreOptions(false, 0) + if err != nil { + return nil, err + } + uidmap, gidmap := convertStorageIDMaps(storeopts.UIDMap, storeopts.GIDMap) + + ref, err := is.Transport.ParseStoreReference(store, imageID) + if err != nil { + return nil, errors.Wrapf(err, "no such image %q", imageID) + } + src, err := ref.NewImageSource(ctx, systemContext) + if err != nil { + return nil, errors.Wrapf(err, "error instantiating image source") + } + defer src.Close() + + imageDigest := "" + manifestBytes, manifestType, err := src.GetManifest(ctx, nil) + if err != nil { + return nil, errors.Wrapf(err, "error loading image manifest for %q", transports.ImageName(ref)) + } + if manifestDigest, err := manifest.Digest(manifestBytes); err == nil { + imageDigest = manifestDigest.String() + } + + var instanceDigest *digest.Digest + if manifest.MIMETypeIsMultiImage(manifestType) { + list, err := manifest.ListFromBlob(manifestBytes, manifestType) + if err != nil { + return nil, errors.Wrapf(err, "error parsing image manifest for %q as list", transports.ImageName(ref)) + } + instance, err := list.ChooseInstance(systemContext) + if err != nil { + return nil, errors.Wrapf(err, "error finding an appropriate image in manifest list %q", transports.ImageName(ref)) + } + instanceDigest = &instance + } + + image, err := image.FromUnparsedImage(ctx, systemContext, image.UnparsedInstance(src, instanceDigest)) + if err != nil { + return nil, errors.Wrapf(err, "error instantiating image for %q instance %q", transports.ImageName(ref), instanceDigest) + } + + imageName := "" + if img, err3 := store.Image(imageID); err3 == nil { + if len(img.Names) > 0 { + imageName = img.Names[0] + } + if img.TopLayer != "" { + layer, err4 := store.Layer(img.TopLayer) + if err4 != nil { + return nil, errors.Wrapf(err4, "error reading information about image's top layer") + } + uidmap, gidmap = convertStorageIDMaps(layer.UIDMap, layer.GIDMap) + } + } + + defaultNamespaceOptions, err := DefaultNamespaceOptions() + if err != nil { + return nil, err + } + + netInt, err := getNetworkInterface(store, "", "") + if err != nil { + return nil, err + } + + builder := &Builder{ + store: store, + Type: containerType, + FromImage: imageName, + FromImageID: imageID, + FromImageDigest: imageDigest, + Container: containerName, + ContainerID: containerID, + ImageAnnotations: map[string]string{}, + ImageCreatedBy: "", + NamespaceOptions: defaultNamespaceOptions, + IDMappingOptions: define.IDMappingOptions{ + HostUIDMapping: len(uidmap) == 0, + HostGIDMapping: len(uidmap) == 0, + UIDMap: uidmap, + GIDMap: gidmap, + }, + NetworkInterface: netInt, + } + + if err := builder.initConfig(ctx, image, systemContext); err != nil { + return nil, errors.Wrapf(err, "error preparing image configuration") + } + + return builder, nil +} + +func importBuilder(ctx context.Context, store storage.Store, options ImportOptions) (*Builder, error) { + if options.Container == "" { + return nil, errors.Errorf("container name must be specified") + } + + c, err := store.Container(options.Container) + if err != nil { + return nil, err + } + + systemContext := getSystemContext(store, &types.SystemContext{}, options.SignaturePolicyPath) + + builder, err := importBuilderDataFromImage(ctx, store, systemContext, c.ImageID, options.Container, c.ID) + if err != nil { + return nil, err + } + + if builder.FromImageID != "" { + if d, err2 := digest.Parse(builder.FromImageID); err2 == nil { + builder.Docker.Parent = docker.ID(d) + } else { + builder.Docker.Parent = docker.ID(digest.NewDigestFromHex(digest.Canonical.String(), builder.FromImageID)) + } + } + if builder.FromImage != "" { + builder.Docker.ContainerConfig.Image = builder.FromImage + } + builder.IDMappingOptions.UIDMap, builder.IDMappingOptions.GIDMap = convertStorageIDMaps(c.UIDMap, c.GIDMap) + + err = builder.Save() + if err != nil { + return nil, errors.Wrapf(err, "error saving builder state") + } + + return builder, nil +} + +func importBuilderFromImage(ctx context.Context, store storage.Store, options ImportFromImageOptions) (*Builder, error) { + if options.Image == "" { + return nil, errors.Errorf("image name must be specified") + } + + systemContext := getSystemContext(store, options.SystemContext, options.SignaturePolicyPath) + + _, img, err := util.FindImage(store, "", systemContext, options.Image) + if err != nil { + return nil, errors.Wrapf(err, "importing settings") + } + + builder, err := importBuilderDataFromImage(ctx, store, systemContext, img.ID, "", "") + if err != nil { + return nil, errors.Wrapf(err, "error importing build settings from image %q", options.Image) + } + + builder.setupLogger() + return builder, nil +} diff --git a/vendor/github.com/containers/buildah/info.go b/vendor/github.com/containers/buildah/info.go new file mode 100644 index 00000000000..12b69c9ba60 --- /dev/null +++ b/vendor/github.com/containers/buildah/info.go @@ -0,0 +1,222 @@ +package buildah + +import ( + "bufio" + "bytes" + "fmt" + "io/ioutil" + "os" + "runtime" + "strconv" + "strings" + "time" + + "github.com/containerd/containerd/platforms" + "github.com/containers/buildah/util" + "github.com/containers/storage" + "github.com/containers/storage/pkg/system" + "github.com/containers/storage/pkg/unshare" + v1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// InfoData holds the info type, i.e store, host etc and the data for each type +type InfoData struct { + Type string + Data map[string]interface{} +} + +// Info returns the store and host information +func Info(store storage.Store) ([]InfoData, error) { + info := []InfoData{} + // get host information + hostInfo := hostInfo() + info = append(info, InfoData{Type: "host", Data: hostInfo}) + + // get store information + storeInfo, err := storeInfo(store) + if err != nil { + logrus.Error(err, "error getting store info") + } + info = append(info, InfoData{Type: "store", Data: storeInfo}) + return info, nil +} + +func hostInfo() map[string]interface{} { + info := map[string]interface{}{} + ps := platforms.Normalize(v1.Platform{OS: runtime.GOOS, Architecture: runtime.GOARCH}) + info["os"] = ps.OS + info["arch"] = ps.Architecture + info["variant"] = ps.Variant + info["cpus"] = runtime.NumCPU() + info["rootless"] = unshare.IsRootless() + + unified, err := util.IsCgroup2UnifiedMode() + if err != nil { + logrus.Error(err, "err reading cgroups mode") + } + cgroupVersion := "v1" + ociruntime := util.Runtime() + if unified { + cgroupVersion = "v2" + } + info["CgroupVersion"] = cgroupVersion + info["OCIRuntime"] = ociruntime + + mi, err := system.ReadMemInfo() + if err != nil { + logrus.Error(err, "err reading memory info") + info["MemTotal"] = "" + info["MemFree"] = "" + info["SwapTotal"] = "" + info["SwapFree"] = "" + } else { + info["MemTotal"] = mi.MemTotal + info["MemFree"] = mi.MemFree + info["SwapTotal"] = mi.SwapTotal + info["SwapFree"] = mi.SwapFree + } + hostDistributionInfo := getHostDistributionInfo() + info["Distribution"] = map[string]interface{}{ + "distribution": hostDistributionInfo["Distribution"], + "version": hostDistributionInfo["Version"], + } + + kv, err := readKernelVersion() + if err != nil { + logrus.Error(err, "error reading kernel version") + } + info["kernel"] = kv + + up, err := readUptime() + if err != nil { + logrus.Error(err, "error reading up time") + } + // Convert uptime in seconds to a human-readable format + upSeconds := up + "s" + upDuration, err := time.ParseDuration(upSeconds) + if err != nil { + logrus.Error(err, "error parsing system uptime") + } + + hoursFound := false + var timeBuffer bytes.Buffer + var hoursBuffer bytes.Buffer + for _, elem := range upDuration.String() { + timeBuffer.WriteRune(elem) + if elem == 'h' || elem == 'm' { + timeBuffer.WriteRune(' ') + if elem == 'h' { + hoursFound = true + } + } + if !hoursFound { + hoursBuffer.WriteRune(elem) + } + } + + info["uptime"] = timeBuffer.String() + if hoursFound { + hours, err := strconv.ParseFloat(hoursBuffer.String(), 64) + if err == nil { + days := hours / 24 + info["uptime"] = fmt.Sprintf("%s (Approximately %.2f days)", info["uptime"], days) + } + } + + host, err := os.Hostname() + if err != nil { + logrus.Error(err, "error getting hostname") + } + info["hostname"] = host + + return info +} + +// top-level "store" info +func storeInfo(store storage.Store) (map[string]interface{}, error) { + // lets say storage driver in use, number of images, number of containers + info := map[string]interface{}{} + info["GraphRoot"] = store.GraphRoot() + info["RunRoot"] = store.RunRoot() + info["GraphDriverName"] = store.GraphDriverName() + info["GraphOptions"] = store.GraphOptions() + statusPairs, err := store.Status() + if err != nil { + return nil, err + } + status := map[string]string{} + for _, pair := range statusPairs { + status[pair[0]] = pair[1] + } + info["GraphStatus"] = status + images, err := store.Images() + if err != nil { + logrus.Error(err, "error getting number of images") + } + info["ImageStore"] = map[string]interface{}{ + "number": len(images), + } + + containers, err := store.Containers() + if err != nil { + logrus.Error(err, "error getting number of containers") + } + info["ContainerStore"] = map[string]interface{}{ + "number": len(containers), + } + + return info, nil +} + +func readKernelVersion() (string, error) { + buf, err := ioutil.ReadFile("/proc/version") + if err != nil { + return "", err + } + f := bytes.Fields(buf) + if len(f) < 2 { + return string(bytes.TrimSpace(buf)), nil + } + return string(f[2]), nil +} + +func readUptime() (string, error) { + buf, err := ioutil.ReadFile("/proc/uptime") + if err != nil { + return "", err + } + f := bytes.Fields(buf) + if len(f) < 1 { + return "", errors.Errorf("invalid uptime") + } + return string(f[0]), nil +} + +// getHostDistributionInfo returns a map containing the host's distribution and version +func getHostDistributionInfo() map[string]string { + dist := make(map[string]string) + + // Populate values in case we cannot find the values + // or the file + dist["Distribution"] = "unknown" + dist["Version"] = "unknown" + + f, err := os.Open("/etc/os-release") + if err != nil { + return dist + } + defer f.Close() + + l := bufio.NewScanner(f) + for l.Scan() { + if strings.HasPrefix(l.Text(), "ID=") { + dist["Distribution"] = strings.TrimPrefix(l.Text(), "ID=") + } + if strings.HasPrefix(l.Text(), "VERSION_ID=") { + dist["Version"] = strings.Trim(strings.TrimPrefix(l.Text(), "VERSION_ID="), "\"") + } + } + return dist +} diff --git a/vendor/github.com/containers/buildah/install.md b/vendor/github.com/containers/buildah/install.md new file mode 100644 index 00000000000..2a09821f3c1 --- /dev/null +++ b/vendor/github.com/containers/buildah/install.md @@ -0,0 +1,481 @@ +![buildah logo](https://cdn.rawgit.com/containers/buildah/main/logos/buildah-logo_large.png) + +# Installation Instructions + +## Installing packaged versions of buildah + +### [Arch Linux](https://www.archlinux.org) + +```bash +sudo pacman -S buildah +``` + +#### [CentOS](https://www.centos.org) + +Buildah is available in the default Extras repos for CentOS 7 and in +the AppStream repo for CentOS 8 and Stream, however the available version often +lags the upstream release. + +```bash +sudo yum -y install buildah +``` + +The [Kubic project](https://build.opensuse.org/project/show/devel:kubic:libcontainers:stable) +provides updated packages for CentOS 8 and CentOS 8 Stream. + +```bash +# CentOS 8 +sudo dnf -y module disable container-tools +sudo dnf -y install 'dnf-command(copr)' +sudo dnf -y copr enable rhcontainerbot/container-selinux +sudo curl -L -o /etc/yum.repos.d/devel:kubic:libcontainers:stable.repo https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable/CentOS_8/devel:kubic:libcontainers:stable.repo +# OPTIONAL FOR RUNC USERS: crun will be installed by default. Install runc first if you prefer runc +sudo dnf -y --refresh install runc +# Install Buildah +sudo dnf -y --refresh install buildah + +# CentOS 8 Stream +sudo dnf -y module disable container-tools +sudo dnf -y install 'dnf-command(copr)' +sudo dnf -y copr enable rhcontainerbot/container-selinux +sudo curl -L -o /etc/yum.repos.d/devel:kubic:libcontainers:stable.repo https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable/CentOS_8_Stream/devel:kubic:libcontainers:stable.repo +# OPTIONAL FOR RUNC USERS: crun will be installed by default. Install runc first if you prefer runc +sudo dnf -y --refresh install runc +# Install Buildah +sudo dnf -y --refresh install buildah +``` + + +#### [Debian](https://debian.org) + +The buildah package is available in +the [Bullseye (testing) branch](https://packages.debian.org/bullseye/buildah), which +will be the next stable release (Debian 11) as well as Debian Unstable/Sid. + +```bash +# Debian Testing/Bullseye or Unstable/Sid +sudo apt-get update +sudo apt-get -y install buildah +``` + + +### [Fedora](https://www.fedoraproject.org) + +```bash +sudo dnf -y install buildah +``` + +### [Fedora SilverBlue](https://silverblue.fedoraproject.org) + +Installed by default + +### [Fedora CoreOS](https://coreos.fedoraproject.org) + +Not Available. Must be installed via package layering. + +rpm-ostree install buildah + +Note: [`podman`](https://podman.io) build is available by default. + +### [Gentoo](https://www.gentoo.org) + +```bash +sudo emerge app-emulation/libpod +``` + +### [openSUSE](https://www.opensuse.org) + +```bash +sudo zypper install buildah +``` + +### [openSUSE Kubic](https://kubic.opensuse.org) + +transactional-update pkg in buildah + +### [RHEL7](https://www.redhat.com/en/technologies/linux-platforms/enterprise-linux) + +Subscribe, then enable Extras channel and install buildah. + +```bash +sudo subscription-manager repos --enable=rhel-7-server-extras-rpms +sudo yum -y install buildah +``` + +#### [Raspberry Pi OS arm64 (beta)](https://downloads.raspberrypi.org/raspios_arm64/images/) + +Raspberry Pi OS use the standard Debian's repositories, +so it is fully compatible with Debian's arm64 repository. +You can simply follow the [steps for Debian](#debian) to install buildah. + + +### [RHEL8 Beta](https://www.redhat.com/en/blog/powering-its-future-while-preserving-present-introducing-red-hat-enterprise-linux-8-beta?intcmp=701f2000001Cz6OAAS) + +```bash +sudo yum module enable -y container-tools:1.0 +sudo yum module install -y buildah +``` + +### [Ubuntu](https://www.ubuntu.com) + +The buildah package is available in the official repositories for Ubuntu 20.10 +and newer. + +```bash +# Ubuntu 20.10 and newer +sudo apt-get -y update +sudo apt-get -y install buildah +``` + +If you would prefer newer (though not as well-tested) packages, +the [Kubic project](https://build.opensuse.org/package/show/devel:kubic:libcontainers:stable/buildah) +provides packages for active Ubuntu releases 20.04 and newer (it should also work with direct derivatives like Pop!\_OS). +The packages in Kubic project repos are more frequently updated than the one in Ubuntu's official repositories, due to how Debian/Ubuntu works. +Checkout the Kubic project page for a list of supported Ubuntu version and architecture combinations. +The build sources for the Kubic packages can be found [here](https://gitlab.com/rhcontainerbot/buildah/-/tree/debian/debian). + +CAUTION: On Ubuntu 20.10 and newer, we highly recommend you use Buildah, Podman and Skopeo ONLY from EITHER the Kubic repo +OR the official Ubuntu repos. Mixing and matching may lead to unpredictable situations including installation conflicts. + + +```bash +. /etc/os-release +sudo sh -c "echo 'deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/x${ID^}_${VERSION_ID}/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list" +wget -nv https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable/x${ID^}_${VERSION_ID}/Release.key -O Release.key +sudo apt-key add - < Release.key +sudo apt-get update -qq +sudo apt-get -qq -y install buildah +``` + +# Building from scratch + +## System Requirements + +### Kernel Version Requirements +To run Buildah on Red Hat Enterprise Linux or CentOS, version 7.4 or higher is required. +On other Linux distributions Buildah requires a kernel version that supports the OverlayFS and/or fuse-overlayfs filesystem -- you'll need to consult your distribution's documentation to determine a minimum version number. + +### runc Requirement + +Buildah uses `runc` to run commands when `buildah run` is used, or when `buildah build` +encounters a `RUN` instruction, so you'll also need to build and install a compatible version of +[runc](https://github.com/opencontainers/runc) for Buildah to call for those cases. If Buildah is installed +via a package manager such as yum, dnf or apt-get, runc will be installed as part of that process. + +### CNI Requirement + +When Buildah uses `runc` to run commands, it defaults to running those commands +in the host's network namespace. If the command is being run in a separate +user namespace, though, for example when ID mapping is used, then the command +will also be run in a separate network namespace. + +A newly-created network namespace starts with no network interfaces, so +commands which are run in that namespace are effectively disconnected from the +network unless additional setup is done. Buildah relies on the CNI +[library](https://github.com/containernetworking/cni) and +[plugins](https://github.com/containernetworking/plugins) to set up interfaces +and routing for network namespaces. + +If Buildah is installed via a package manager such as yum, dnf or apt-get, a +package containing CNI plugins may be available (in Fedora, the package is +named `containernetworking-cni`). If not, they will need to be installed, +for example using: +``` + git clone https://github.com/containernetworking/plugins + ( cd ./plugins; ./build_linux.sh ) + sudo mkdir -p /opt/cni/bin + sudo install -v ./plugins/bin/* /opt/cni/bin +``` + +The CNI library needs to be configured so that it will know which plugins to +call to set up namespaces. Usually, this configuration takes the form of one +or more configuration files in the `/etc/cni/net.d` directory. A set of example +configuration files is included in the +[`docs/cni-examples`](https://github.com/containers/buildah/tree/main/docs/cni-examples) +directory of this source tree. + +## Package Installation + +Buildah is available on several software repositories and can be installed via a package manager such +as yum, dnf or apt-get on a number of Linux distributions. + +## Installation from GitHub + +Prior to installing Buildah, install the following packages on your Linux distro: +* make +* golang (Requires version 1.13 or higher.) +* bats +* btrfs-progs-devel +* bzip2 +* device-mapper-devel +* git +* go-md2man +* gpgme-devel +* glib2-devel +* libassuan-devel +* libseccomp-devel +* runc (Requires version 1.0 RC4 or higher.) +* containers-common + +### Fedora + +In Fedora, you can use this command: + +``` + dnf -y install \ + make \ + golang \ + bats \ + btrfs-progs-devel \ + device-mapper-devel \ + glib2-devel \ + gpgme-devel \ + libassuan-devel \ + libseccomp-devel \ + git \ + bzip2 \ + go-md2man \ + runc \ + containers-common +``` + +Then to install Buildah on Fedora follow the steps in this example: + +``` + mkdir ~/buildah + cd ~/buildah + export GOPATH=`pwd` + git clone https://github.com/containers/buildah ./src/github.com/containers/buildah + cd ./src/github.com/containers/buildah + make + sudo make install + buildah --help +``` + +### RHEL, CentOS + +In RHEL and CentOS 7, ensure that you are subscribed to the `rhel-7-server-rpms`, +`rhel-7-server-extras-rpms`, `rhel-7-server-optional-rpms` and `EPEL` repositories, then +run this command: + +``` + yum -y install \ + make \ + golang \ + bats \ + btrfs-progs-devel \ + device-mapper-devel \ + glib2-devel \ + gpgme-devel \ + libassuan-devel \ + libseccomp-devel \ + git \ + bzip2 \ + go-md2man \ + runc \ + skopeo-containers +``` + +The build steps for Buildah on RHEL or CentOS are the same as for Fedora, above. + +*NOTE:* Buildah on RHEL or CentOS version 7.* is not supported running as non-root due to +these systems not having newuidmap or newgidmap installed. It is possible to pull +the shadow-utils source RPM from Fedora 29 and build and install from that in order to +run Buildah as non-root on these systems. + +### openSUSE + +On openSUSE Tumbleweed, install go via `zypper in go`, then run this command: + +``` + zypper in make \ + git \ + golang \ + runc \ + bzip2 \ + libgpgme-devel \ + libseccomp-devel \ + device-mapper-devel \ + libbtrfs-devel \ + go-md2man +``` + +The build steps for Buildah on SUSE / openSUSE are the same as for Fedora, above. + + +### Ubuntu + +In Ubuntu zesty and xenial, you can use these commands: + +``` + sudo apt-get -y install software-properties-common + sudo add-apt-repository -y ppa:alexlarsson/flatpak + sudo add-apt-repository -y ppa:gophers/archive + sudo apt-add-repository -y ppa:projectatomic/ppa + sudo apt-get -y -qq update + sudo apt-get -y install bats btrfs-tools git libapparmor-dev libdevmapper-dev libglib2.0-dev libgpgme11-dev libseccomp-dev libselinux1-dev skopeo-containers go-md2man + sudo apt-get -y install golang-1.13 +``` +Then to install Buildah on Ubuntu follow the steps in this example: + +``` + mkdir ~/buildah + cd ~/buildah + export GOPATH=`pwd` + git clone https://github.com/containers/buildah ./src/github.com/containers/buildah + cd ./src/github.com/containers/buildah + PATH=/usr/lib/go-1.13/bin:$PATH make runc all SECURITYTAGS="apparmor seccomp" + sudo make install install.runc + buildah --help +``` + +### Debian + +To install the required dependencies, you can use those commands, tested under Debian GNU/Linux amd64 9.3 (stretch): + +``` +gpg --recv-keys 0x018BA5AD9DF57A4448F0E6CF8BECF1637AD8C79D +sudo gpg --export 0x018BA5AD9DF57A4448F0E6CF8BECF1637AD8C79D >> /usr/share/keyrings/projectatomic-ppa.gpg +sudo echo 'deb [signed-by=/usr/share/keyrings/projectatomic-ppa.gpg] http://ppa.launchpad.net/projectatomic/ppa/ubuntu zesty main' > /etc/apt/sources.list.d/projectatomic-ppa.list +sudo apt update +sudo apt -y install -t stretch-backports golang +sudo apt -y install bats btrfs-tools git libapparmor-dev libdevmapper-dev libglib2.0-dev libgpgme11-dev libseccomp-dev libselinux1-dev skopeo-containers go-md2man +``` + +The build steps on Debian are otherwise the same as Ubuntu, above. + +## Vendoring - Dependency Management + +This project is using [go modules](https://github.com/golang/go/wiki/Modules) for dependency management. If the CI is complaining about a pull request leaving behind an unclean state, it is very likely right about it. After changing dependencies, make sure to run `make vendor-in-container` to synchronize the code with the go module and repopulate the `./vendor` directory. + +## Configuration files + +The following configuration files are required in order for Buildah to run appropriately. The +majority of these files are commonly contained in the `containers-common` package. + +### [registries.conf](https://github.com/containers/buildah/blob/main/docs/samples/registries.conf) + +#### Man Page: [registries.conf.5](https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md) + +`/etc/containers/registries.conf` + +registries.conf is the configuration file which specifies which container registries should be consulted when completing image names which do not include a registry or domain portion. + +#### Example from the Fedora `containers-common` package + +``` +cat /etc/containers/registries.conf +# This is a system-wide configuration file used to +# keep track of registries for various container backends. +# It adheres to TOML format and does not support recursive +# lists of registries. + +# The default location for this configuration file is /etc/containers/registries.conf. + +# The only valid categories are: 'registries.search', 'registries.insecure', +# and 'registries.block'. + +[registries.search] +registries = ['docker.io', 'registry.fedoraproject.org', 'quay.io', 'registry.access.redhat.com', 'registry.centos.org'] + +# If you need to access insecure registries, add the registry's fully-qualified name. +# An insecure registry is one that does not have a valid SSL certificate or only does HTTP. +[registries.insecure] +registries = [] + + +# If you need to block pull access from a registry, uncomment the section below +# and add the registries fully-qualified name. +# +# Docker only +[registries.block] +registries = [] +``` + +### [mounts.conf](https://src.fedoraproject.org/rpms/skopeo/blob/main/f/mounts.conf) + +`/usr/share/containers/mounts.conf` and optionally `/etc/containers/mounts.conf` + +The mounts.conf files specify volume mount files or directories that are automatically mounted inside containers when executing the `buildah run` or `buildah build` commands. Container processes can then use this content. The volume mount content does not get committed to the final image. This file is usually provided by the containers-common package. + +Usually these directories are used for passing secrets or credentials required by the package software to access remote package repositories. + +For example, a mounts.conf with the line "`/usr/share/rhel/secrets:/run/secrets`", the content of `/usr/share/rhel/secrets` directory is mounted on `/run/secrets` inside the container. This mountpoint allows Red Hat Enterprise Linux subscriptions from the host to be used within the container. It is also possible to omit the destination if it's equal to the source path. For example, specifying `/var/lib/secrets` will mount the directory into the same container destination path `/var/lib/secrets`. + +Note this is not a volume mount. The content of the volumes is copied into container storage, not bind mounted directly from the host. + +#### Example from the Fedora `containers-common` package: + +``` +cat /usr/share/containers/mounts.conf +/usr/share/rhel/secrets:/run/secrets +``` + +### [seccomp.json](https://src.fedoraproject.org/rpms/skopeo/blob/main/f/seccomp.json) + +`/usr/share/containers/seccomp.json` + +seccomp.json contains the list of seccomp rules to be allowed inside of +containers. This file is usually provided by the containers-common package. + +The link above takes you to the seccomp.json + +### [policy.json](https://github.com/containers/skopeo/blob/main/default-policy.json) + +`/etc/containers/policy.json` + +#### Man Page: [policy.json.5](https://github.com/containers/image/blob/main/docs/policy.json.md) + + +#### Example from the Fedora `containers-common` package: + +``` +cat /etc/containers/policy.json +{ + "default": [ + { + "type": "insecureAcceptAnything" + } + ], + "transports": + { + "docker-daemon": + { + "": [{"type":"insecureAcceptAnything"}] + } + } +} +``` + +## Debug with Delve and the like + +To make a source debug build without optimizations use `DEBUG=1`, like: +``` +make all DEBUG=1 +``` + +## Vendoring + +Buildah uses Go Modules for vendoring purposes. If you need to update or add a vendored package into Buildah, please follow this procedure: + * Enter into your sandbox `src/github.com/containers/buildah` and ensure that the GOPATH variable is set to the directory prior as noted above. + * `export GO111MODULE=on` + * `go get` the needed version: + * Assuming you want to 'bump' the `github.com/containers/storage` package to version 1.12.13, use this command: `go get github.com/containers/storage@v1.12.13` + * Assuming that you want to 'bump' the `github.com/containers/storage` package to a particular commit, use this command: `go get github.com/containers/storage@e307568568533c4afccdf7b56df7b4493e4e9a7b` + * `make vendor-in-container` + * `make` + * `make install` + * Then add any updated or added files with `git add` then do a `git commit` and create a PR. + +### Vendor from your own fork + +If you wish to vendor in your personal fork to try changes out (assuming containers/storage in the below example): + + * `go mod edit -replace github.com/containers/storage=github.com/{mygithub_username}/storage@YOUR_BRANCH` + * `make vendor-in-container` + +To revert + * `go mod edit -dropreplace github.com/containers/storage` + * `make vendor-in-container` + +To speed up fetching dependencies, you can use a [Go Module Proxy](https://proxy.golang.org) by setting `GOPROXY=https://proxy.golang.org`. diff --git a/vendor/github.com/containers/buildah/internal/parse/parse.go b/vendor/github.com/containers/buildah/internal/parse/parse.go new file mode 100644 index 00000000000..8085cd09789 --- /dev/null +++ b/vendor/github.com/containers/buildah/internal/parse/parse.go @@ -0,0 +1,408 @@ +package parse + +import ( + "context" + "fmt" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/containers/buildah/internal" + internalUtil "github.com/containers/buildah/internal/util" + "github.com/containers/common/pkg/parse" + "github.com/containers/image/v5/types" + "github.com/containers/storage" + "github.com/containers/storage/pkg/idtools" + specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" +) + +const ( + // TypeBind is the type for mounting host dir + TypeBind = "bind" + // TypeTmpfs is the type for mounting tmpfs + TypeTmpfs = "tmpfs" + // TypeCache is the type for mounting a common persistent cache from host + TypeCache = "cache" + // mount=type=cache must create a persistent directory on host so its available for all consecutive builds. + // Lifecycle of following directory will be inherited from how host machine treats temporary directory + BuildahCacheDir = "buildah-cache" +) + +var ( + errBadMntOption = errors.New("invalid mount option") + errBadOptionArg = errors.New("must provide an argument for option") + errBadVolDest = errors.New("must set volume destination") + errBadVolSrc = errors.New("must set volume source") +) + +// GetBindMount parses a single bind mount entry from the --mount flag. +// Returns specifiedMount and a string which contains name of image that we mounted otherwise its empty. +// Caller is expected to perform unmount of any mounted images +func GetBindMount(ctx *types.SystemContext, args []string, contextDir string, store storage.Store, imageMountLabel string, additionalMountPoints map[string]internal.StageMountDetails) (specs.Mount, string, error) { + newMount := specs.Mount{ + Type: TypeBind, + } + + mountReadability := false + setDest := false + bindNonRecursive := false + fromImage := "" + + for _, val := range args { + kv := strings.SplitN(val, "=", 2) + switch kv[0] { + case "bind-nonrecursive": + newMount.Options = append(newMount.Options, "bind") + bindNonRecursive = true + case "ro", "nosuid", "nodev", "noexec": + // TODO: detect duplication of these options. + // (Is this necessary?) + newMount.Options = append(newMount.Options, kv[0]) + mountReadability = true + case "rw", "readwrite": + newMount.Options = append(newMount.Options, "rw") + mountReadability = true + case "readonly": + // Alias for "ro" + newMount.Options = append(newMount.Options, "ro") + mountReadability = true + case "shared", "rshared", "private", "rprivate", "slave", "rslave", "Z", "z", "U": + newMount.Options = append(newMount.Options, kv[0]) + case "from": + if len(kv) == 1 { + return newMount, "", errors.Wrapf(errBadOptionArg, kv[0]) + } + fromImage = kv[1] + case "bind-propagation": + if len(kv) == 1 { + return newMount, "", errors.Wrapf(errBadOptionArg, kv[0]) + } + newMount.Options = append(newMount.Options, kv[1]) + case "src", "source": + if len(kv) == 1 { + return newMount, "", errors.Wrapf(errBadOptionArg, kv[0]) + } + newMount.Source = kv[1] + case "target", "dst", "destination": + if len(kv) == 1 { + return newMount, "", errors.Wrapf(errBadOptionArg, kv[0]) + } + if err := parse.ValidateVolumeCtrDir(kv[1]); err != nil { + return newMount, "", err + } + newMount.Destination = kv[1] + setDest = true + case "consistency": + // Option for OS X only, has no meaning on other platforms + // and can thus be safely ignored. + // See also the handling of the equivalent "delegated" and "cached" in ValidateVolumeOpts + default: + return newMount, "", errors.Wrapf(errBadMntOption, kv[0]) + } + } + + // default mount readability is always readonly + if !mountReadability { + newMount.Options = append(newMount.Options, "ro") + } + + // Following variable ensures that we return imagename only if we did additional mount + isImageMounted := false + if fromImage != "" { + mountPoint := "" + if additionalMountPoints != nil { + if val, ok := additionalMountPoints[fromImage]; ok { + mountPoint = val.MountPoint + } + } + // if mountPoint of image was not found in additionalMap + // or additionalMap was nil, try mounting image + if mountPoint == "" { + image, err := internalUtil.LookupImage(ctx, store, fromImage) + if err != nil { + return newMount, "", err + } + + mountPoint, err = image.Mount(context.Background(), nil, imageMountLabel) + if err != nil { + return newMount, "", err + } + isImageMounted = true + } + contextDir = mountPoint + } + + // buildkit parity: default bind option must be `rbind` + // unless specified + if !bindNonRecursive { + newMount.Options = append(newMount.Options, "rbind") + } + + if !setDest { + return newMount, fromImage, errBadVolDest + } + + // buildkit parity: support absolute path for sources from current build context + if contextDir != "" { + // path should be /contextDir/specified path + newMount.Source = filepath.Join(contextDir, filepath.Clean(string(filepath.Separator)+newMount.Source)) + } else { + // looks like its coming from `build run --mount=type=bind` allow using absolute path + // error out if no source is set + if newMount.Source == "" { + return newMount, "", errBadVolSrc + } + if err := parse.ValidateVolumeHostDir(newMount.Source); err != nil { + return newMount, "", err + } + } + + opts, err := parse.ValidateVolumeOpts(newMount.Options) + if err != nil { + return newMount, fromImage, err + } + newMount.Options = opts + + if !isImageMounted { + // we don't want any cleanups if image was not mounted explicitly + // so dont return anything + fromImage = "" + } + + return newMount, fromImage, nil +} + +// GetCacheMount parses a single cache mount entry from the --mount flag. +func GetCacheMount(args []string, store storage.Store, imageMountLabel string, additionalMountPoints map[string]internal.StageMountDetails) (specs.Mount, error) { + var err error + var mode uint64 + var ( + setDest bool + setShared bool + setReadOnly bool + ) + fromStage := "" + newMount := specs.Mount{ + Type: TypeBind, + } + // if id is set a new subdirectory with `id` will be created under /host-temp/buildah-build-cache/id + id := "" + //buidkit parity: cache directory defaults to 755 + mode = 0o755 + //buidkit parity: cache directory defaults to uid 0 if not specified + uid := 0 + //buidkit parity: cache directory defaults to gid 0 if not specified + gid := 0 + + for _, val := range args { + kv := strings.SplitN(val, "=", 2) + switch kv[0] { + case "nosuid", "nodev", "noexec": + // TODO: detect duplication of these options. + // (Is this necessary?) + newMount.Options = append(newMount.Options, kv[0]) + case "rw", "readwrite": + newMount.Options = append(newMount.Options, "rw") + case "readonly", "ro": + // Alias for "ro" + newMount.Options = append(newMount.Options, "ro") + setReadOnly = true + case "shared", "rshared", "private", "rprivate", "slave", "rslave", "Z", "z", "U": + newMount.Options = append(newMount.Options, kv[0]) + setShared = true + case "bind-propagation": + if len(kv) == 1 { + return newMount, errors.Wrapf(errBadOptionArg, kv[0]) + } + newMount.Options = append(newMount.Options, kv[1]) + case "id": + if len(kv) == 1 { + return newMount, errors.Wrapf(errBadOptionArg, kv[0]) + } + id = kv[1] + case "from": + if len(kv) == 1 { + return newMount, errors.Wrapf(errBadOptionArg, kv[0]) + } + fromStage = kv[1] + case "target", "dst", "destination": + if len(kv) == 1 { + return newMount, errors.Wrapf(errBadOptionArg, kv[0]) + } + if err := parse.ValidateVolumeCtrDir(kv[1]); err != nil { + return newMount, err + } + newMount.Destination = kv[1] + setDest = true + case "src", "source": + if len(kv) == 1 { + return newMount, errors.Wrapf(errBadOptionArg, kv[0]) + } + newMount.Source = kv[1] + case "mode": + if len(kv) == 1 { + return newMount, errors.Wrapf(errBadOptionArg, kv[0]) + } + mode, err = strconv.ParseUint(kv[1], 8, 32) + if err != nil { + return newMount, errors.Wrapf(err, "Unable to parse cache mode") + } + case "uid": + if len(kv) == 1 { + return newMount, errors.Wrapf(errBadOptionArg, kv[0]) + } + uid, err = strconv.Atoi(kv[1]) + if err != nil { + return newMount, errors.Wrapf(err, "Unable to parse cache uid") + } + case "gid": + if len(kv) == 1 { + return newMount, errors.Wrapf(errBadOptionArg, kv[0]) + } + gid, err = strconv.Atoi(kv[1]) + if err != nil { + return newMount, errors.Wrapf(err, "Unable to parse cache gid") + } + default: + return newMount, errors.Wrapf(errBadMntOption, kv[0]) + } + } + + if !setDest { + return newMount, errBadVolDest + } + + if fromStage != "" { + // do not create cache on host + // instead use read-only mounted stage as cache + mountPoint := "" + if additionalMountPoints != nil { + if val, ok := additionalMountPoints[fromStage]; ok { + if val.IsStage { + mountPoint = val.MountPoint + } + } + } + // Cache does not supports using image so if not stage found + // return with error + if mountPoint == "" { + return newMount, fmt.Errorf("no stage found with name %s", fromStage) + } + // path should be /contextDir/specified path + newMount.Source = filepath.Join(mountPoint, filepath.Clean(string(filepath.Separator)+newMount.Source)) + } else { + // we need to create cache on host if no image is being used + + // since type is cache and cache can be reused by consecutive builds + // create a common cache directory, which persists on hosts within temp lifecycle + // add subdirectory if specified + + // cache parent directory + cacheParent := filepath.Join(getTempDir(), BuildahCacheDir) + // create cache on host if not present + err = os.MkdirAll(cacheParent, os.FileMode(0755)) + if err != nil { + return newMount, errors.Wrapf(err, "Unable to create build cache directory") + } + + if id != "" { + newMount.Source = filepath.Join(cacheParent, filepath.Clean(id)) + } else { + newMount.Source = filepath.Join(cacheParent, filepath.Clean(newMount.Destination)) + } + idPair := idtools.IDPair{ + UID: uid, + GID: gid, + } + //buildkit parity: change uid and gid if specified otheriwise keep `0` + err = idtools.MkdirAllAndChownNew(newMount.Source, os.FileMode(mode), idPair) + if err != nil { + return newMount, errors.Wrapf(err, "Unable to change uid,gid of cache directory") + } + } + + // buildkit parity: default sharing should be shared + // unless specified + if !setShared { + newMount.Options = append(newMount.Options, "shared") + } + + // buildkit parity: cache must writable unless `ro` or `readonly` is configured explicitly + if !setReadOnly { + newMount.Options = append(newMount.Options, "rw") + } + + newMount.Options = append(newMount.Options, "bind") + + opts, err := parse.ValidateVolumeOpts(newMount.Options) + if err != nil { + return newMount, err + } + newMount.Options = opts + + return newMount, nil +} + +// GetTmpfsMount parses a single tmpfs mount entry from the --mount flag +func GetTmpfsMount(args []string) (specs.Mount, error) { + newMount := specs.Mount{ + Type: TypeTmpfs, + Source: TypeTmpfs, + } + + setDest := false + + for _, val := range args { + kv := strings.SplitN(val, "=", 2) + switch kv[0] { + case "ro", "nosuid", "nodev", "noexec": + newMount.Options = append(newMount.Options, kv[0]) + case "readonly": + // Alias for "ro" + newMount.Options = append(newMount.Options, "ro") + case "tmpcopyup": + //the path that is shadowed by the tmpfs mount is recursively copied up to the tmpfs itself. + newMount.Options = append(newMount.Options, kv[0]) + case "tmpfs-mode": + if len(kv) == 1 { + return newMount, errors.Wrapf(errBadOptionArg, kv[0]) + } + newMount.Options = append(newMount.Options, fmt.Sprintf("mode=%s", kv[1])) + case "tmpfs-size": + if len(kv) == 1 { + return newMount, errors.Wrapf(errBadOptionArg, kv[0]) + } + newMount.Options = append(newMount.Options, fmt.Sprintf("size=%s", kv[1])) + case "src", "source": + return newMount, errors.Errorf("source is not supported with tmpfs mounts") + case "target", "dst", "destination": + if len(kv) == 1 { + return newMount, errors.Wrapf(errBadOptionArg, kv[0]) + } + if err := parse.ValidateVolumeCtrDir(kv[1]); err != nil { + return newMount, err + } + newMount.Destination = kv[1] + setDest = true + default: + return newMount, errors.Wrapf(errBadMntOption, kv[0]) + } + } + + if !setDest { + return newMount, errBadVolDest + } + + return newMount, nil +} + +/* This is internal function and could be changed at any time */ +/* for external usage please refer to buildah/pkg/parse.GetTempDir() */ +func getTempDir() string { + if tmpdir, ok := os.LookupEnv("TMPDIR"); ok { + return tmpdir + } + return "/var/tmp" +} diff --git a/vendor/github.com/containers/buildah/internal/types.go b/vendor/github.com/containers/buildah/internal/types.go new file mode 100644 index 00000000000..8ddff99fb75 --- /dev/null +++ b/vendor/github.com/containers/buildah/internal/types.go @@ -0,0 +1,11 @@ +package internal + +// Types is internal packages are suspected to change with releases avoid using these outside of buildah + +// StageMountDetails holds the Stage/Image mountpoint returned by StageExecutor +// StageExecutor has ability to mount stages/images in current context and +// automatically clean them up. +type StageMountDetails struct { + IsStage bool // tells if mountpoint returned from stage executor is stage or image + MountPoint string // mountpoint of stage/image +} diff --git a/vendor/github.com/containers/buildah/internal/util/util.go b/vendor/github.com/containers/buildah/internal/util/util.go new file mode 100644 index 00000000000..cce50816740 --- /dev/null +++ b/vendor/github.com/containers/buildah/internal/util/util.go @@ -0,0 +1,24 @@ +package util + +import ( + "github.com/containers/common/libimage" + "github.com/containers/image/v5/types" + "github.com/containers/storage" +) + +// LookupImage returns *Image to corresponding imagename or id +func LookupImage(ctx *types.SystemContext, store storage.Store, image string) (*libimage.Image, error) { + systemContext := ctx + if systemContext == nil { + systemContext = &types.SystemContext{} + } + runtime, err := libimage.RuntimeFromStore(store, &libimage.RuntimeOptions{SystemContext: systemContext}) + if err != nil { + return nil, err + } + localImage, _, err := runtime.LookupImage(image, nil) + if err != nil { + return nil, err + } + return localImage, nil +} diff --git a/vendor/github.com/containers/buildah/libdm_tag.sh b/vendor/github.com/containers/buildah/libdm_tag.sh new file mode 100644 index 00000000000..815b5d914ed --- /dev/null +++ b/vendor/github.com/containers/buildah/libdm_tag.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env bash +tmpdir="$PWD/tmp.$RANDOM" +mkdir -p "$tmpdir" +trap 'rm -fr "$tmpdir"' EXIT +${CC:-cc} ${CFLAGS} ${CPPFLAGS} ${LDFLAGS} -o "$tmpdir"/libdm_tag -x c - -ldevmapper > /dev/null 2> /dev/null << EOF +#include +int main() { + struct dm_task *task; + dm_task_deferred_remove(task); + return 0; +} +EOF +if test $? -ne 0 ; then + echo libdm_no_deferred_remove +fi diff --git a/vendor/github.com/containers/buildah/mount.go b/vendor/github.com/containers/buildah/mount.go new file mode 100644 index 00000000000..8c7a23f8cfd --- /dev/null +++ b/vendor/github.com/containers/buildah/mount.go @@ -0,0 +1,53 @@ +package buildah + +import ( + "github.com/pkg/errors" +) + +// Mount mounts a container's root filesystem in a location which can be +// accessed from the host, and returns the location. +func (b *Builder) Mount(label string) (string, error) { + mountpoint, err := b.store.Mount(b.ContainerID, label) + if err != nil { + return "", errors.Wrapf(err, "error mounting build container %q", b.ContainerID) + } + b.MountPoint = mountpoint + + err = b.Save() + if err != nil { + return "", errors.Wrapf(err, "error saving updated state for build container %q", b.ContainerID) + } + return mountpoint, nil +} + +func (b *Builder) setMountPoint(mountPoint string) error { + b.MountPoint = mountPoint + if err := b.Save(); err != nil { + return errors.Wrapf(err, "error saving updated state for build container %q", b.ContainerID) + } + return nil +} + +// Mounted returns whether the container is mounted or not +func (b *Builder) Mounted() (bool, error) { + mountCnt, err := b.store.Mounted(b.ContainerID) + if err != nil { + return false, errors.Wrapf(err, "error determining if mounting build container %q is mounted", b.ContainerID) + } + mounted := mountCnt > 0 + if mounted && b.MountPoint == "" { + ctr, err := b.store.Container(b.ContainerID) + if err != nil { + return mountCnt > 0, errors.Wrapf(err, "error determining if mounting build container %q is mounted", b.ContainerID) + } + layer, err := b.store.Layer(ctr.LayerID) + if err != nil { + return mountCnt > 0, errors.Wrapf(err, "error determining if mounting build container %q is mounted", b.ContainerID) + } + return mounted, b.setMountPoint(layer.MountPoint) + } + if !mounted && b.MountPoint != "" { + return mounted, b.setMountPoint("") + } + return mounted, nil +} diff --git a/vendor/github.com/containers/buildah/new.go b/vendor/github.com/containers/buildah/new.go new file mode 100644 index 00000000000..2f67eedb54e --- /dev/null +++ b/vendor/github.com/containers/buildah/new.go @@ -0,0 +1,338 @@ +package buildah + +import ( + "context" + "fmt" + "math/rand" + "strings" + + "github.com/containers/buildah/define" + "github.com/containers/buildah/pkg/blobcache" + "github.com/containers/common/libimage" + "github.com/containers/common/pkg/config" + "github.com/containers/image/v5/image" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/shortnames" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" + "github.com/containers/storage" + digest "github.com/opencontainers/go-digest" + v1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/openshift/imagebuilder" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const ( + // BaseImageFakeName is the "name" of a source image which we interpret + // as "no image". + BaseImageFakeName = imagebuilder.NoBaseImageSpecifier +) + +func getImageName(name string, img *storage.Image) string { + imageName := name + if len(img.Names) > 0 { + imageName = img.Names[0] + // When the image used by the container is a tagged image + // the container name might be set to the original image instead of + // the image given in the "from" command line. + // This loop is supposed to fix this. + for _, n := range img.Names { + if strings.Contains(n, name) { + imageName = n + break + } + } + } + return imageName +} + +func imageNamePrefix(imageName string) string { + prefix := imageName + s := strings.Split(prefix, ":") + if len(s) > 0 { + prefix = s[0] + } + s = strings.Split(prefix, "/") + if len(s) > 0 { + prefix = s[len(s)-1] + } + s = strings.Split(prefix, "@") + if len(s) > 0 { + prefix = s[0] + } + return prefix +} + +func newContainerIDMappingOptions(idmapOptions *define.IDMappingOptions) storage.IDMappingOptions { + var options storage.IDMappingOptions + if idmapOptions != nil { + options.HostUIDMapping = idmapOptions.HostUIDMapping + options.HostGIDMapping = idmapOptions.HostGIDMapping + uidmap, gidmap := convertRuntimeIDMaps(idmapOptions.UIDMap, idmapOptions.GIDMap) + if len(uidmap) > 0 && len(gidmap) > 0 { + options.UIDMap = uidmap + options.GIDMap = gidmap + } else { + options.HostUIDMapping = true + options.HostGIDMapping = true + } + } + return options +} + +func containerNameExist(name string, containers []storage.Container) bool { + for _, container := range containers { + for _, cname := range container.Names { + if cname == name { + return true + } + } + } + return false +} + +func findUnusedContainer(name string, containers []storage.Container) string { + suffix := 1 + tmpName := name + for containerNameExist(tmpName, containers) { + tmpName = fmt.Sprintf("%s-%d", name, suffix) + suffix++ + } + return tmpName +} + +func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions) (*Builder, error) { + var ( + ref types.ImageReference + img *storage.Image + err error + ) + + if options.FromImage == BaseImageFakeName { + options.FromImage = "" + } + + if options.NetworkInterface == nil { + // create the network interface + // Note: It is important to do this before we pull any images/create containers. + // The default backend detection logic needs an empty store to correctly detect + // that we can use netavark, if the store was not empty it will use CNI to not break existing installs. + options.NetworkInterface, err = getNetworkInterface(store, options.CNIConfigDir, options.CNIPluginPath) + if err != nil { + return nil, err + } + } + + systemContext := getSystemContext(store, options.SystemContext, options.SignaturePolicyPath) + + if options.FromImage != "" && options.FromImage != "scratch" { + imageRuntime, err := libimage.RuntimeFromStore(store, &libimage.RuntimeOptions{SystemContext: systemContext}) + if err != nil { + return nil, err + } + + pullPolicy, err := config.ParsePullPolicy(options.PullPolicy.String()) + if err != nil { + return nil, err + } + + // Note: options.Format does *not* relate to the image we're + // about to pull (see tests/digests.bats). So we're not + // forcing a MIMEType in the pullOptions below. + pullOptions := libimage.PullOptions{} + pullOptions.RetryDelay = &options.PullRetryDelay + pullOptions.OciDecryptConfig = options.OciDecryptConfig + pullOptions.SignaturePolicyPath = options.SignaturePolicyPath + pullOptions.Writer = options.ReportWriter + + maxRetries := uint(options.MaxPullRetries) + pullOptions.MaxRetries = &maxRetries + + if options.BlobDirectory != "" { + pullOptions.DestinationLookupReferenceFunc = blobcache.CacheLookupReferenceFunc(options.BlobDirectory, types.PreserveOriginal) + } + + pulledImages, err := imageRuntime.Pull(ctx, options.FromImage, pullPolicy, &pullOptions) + if err != nil { + return nil, err + } + if len(pulledImages) > 0 { + img = pulledImages[0].StorageImage() + ref, err = pulledImages[0].StorageReference() + if err != nil { + return nil, err + } + } + } + + imageSpec := options.FromImage + imageID := "" + imageDigest := "" + topLayer := "" + if img != nil { + imageSpec = getImageName(imageNamePrefix(imageSpec), img) + imageID = img.ID + topLayer = img.TopLayer + } + var src types.Image + if ref != nil { + srcSrc, err := ref.NewImageSource(ctx, systemContext) + if err != nil { + return nil, errors.Wrapf(err, "error instantiating image for %q", transports.ImageName(ref)) + } + defer srcSrc.Close() + manifestBytes, manifestType, err := srcSrc.GetManifest(ctx, nil) + if err != nil { + return nil, errors.Wrapf(err, "error loading image manifest for %q", transports.ImageName(ref)) + } + if manifestDigest, err := manifest.Digest(manifestBytes); err == nil { + imageDigest = manifestDigest.String() + } + var instanceDigest *digest.Digest + if manifest.MIMETypeIsMultiImage(manifestType) { + list, err := manifest.ListFromBlob(manifestBytes, manifestType) + if err != nil { + return nil, errors.Wrapf(err, "error parsing image manifest for %q as list", transports.ImageName(ref)) + } + instance, err := list.ChooseInstance(systemContext) + if err != nil { + return nil, errors.Wrapf(err, "error finding an appropriate image in manifest list %q", transports.ImageName(ref)) + } + instanceDigest = &instance + } + src, err = image.FromUnparsedImage(ctx, systemContext, image.UnparsedInstance(srcSrc, instanceDigest)) + if err != nil { + return nil, errors.Wrapf(err, "error instantiating image for %q instance %q", transports.ImageName(ref), instanceDigest) + } + } + + name := "working-container" + if options.ContainerSuffix != "" { + name = options.ContainerSuffix + } + if options.Container != "" { + name = options.Container + } else { + if imageSpec != "" { + name = imageNamePrefix(imageSpec) + "-" + name + } + } + var container *storage.Container + tmpName := name + if options.Container == "" { + containers, err := store.Containers() + if err != nil { + return nil, errors.Wrapf(err, "unable to check for container names") + } + tmpName = findUnusedContainer(tmpName, containers) + } + + conflict := 100 + for { + + var flags map[string]interface{} + // check if we have predefined ProcessLabel and MountLabel + // this could be true if this is another stage in a build + if options.ProcessLabel != "" && options.MountLabel != "" { + flags = map[string]interface{}{ + "ProcessLabel": options.ProcessLabel, + "MountLabel": options.MountLabel, + } + } + coptions := storage.ContainerOptions{ + LabelOpts: options.CommonBuildOpts.LabelOpts, + IDMappingOptions: newContainerIDMappingOptions(options.IDMappingOptions), + Flags: flags, + Volatile: true, + } + container, err = store.CreateContainer("", []string{tmpName}, imageID, "", "", &coptions) + if err == nil { + name = tmpName + break + } + if errors.Cause(err) != storage.ErrDuplicateName || options.Container != "" { + return nil, errors.Wrapf(err, "error creating container") + } + tmpName = fmt.Sprintf("%s-%d", name, rand.Int()%conflict) + conflict = conflict * 10 + } + defer func() { + if err != nil { + if err2 := store.DeleteContainer(container.ID); err2 != nil { + logrus.Errorf("error deleting container %q: %v", container.ID, err2) + } + } + }() + + uidmap, gidmap := convertStorageIDMaps(container.UIDMap, container.GIDMap) + + defaultNamespaceOptions, err := DefaultNamespaceOptions() + if err != nil { + return nil, err + } + + namespaceOptions := defaultNamespaceOptions + namespaceOptions.AddOrReplace(options.NamespaceOptions...) + + // Set the base-image annotations as suggested by the OCI image spec. + imageAnnotations := map[string]string{} + imageAnnotations[v1.AnnotationBaseImageDigest] = imageDigest + if !shortnames.IsShortName(imageSpec) { + // If the base image could be resolved to a fully-qualified + // image name, let's set it. + imageAnnotations[v1.AnnotationBaseImageName] = imageSpec + } + + builder := &Builder{ + store: store, + Type: containerType, + FromImage: imageSpec, + FromImageID: imageID, + FromImageDigest: imageDigest, + Container: name, + ContainerID: container.ID, + ImageAnnotations: imageAnnotations, + ImageCreatedBy: "", + ProcessLabel: container.ProcessLabel(), + MountLabel: container.MountLabel(), + DefaultMountsFilePath: options.DefaultMountsFilePath, + Isolation: options.Isolation, + NamespaceOptions: namespaceOptions, + ConfigureNetwork: options.ConfigureNetwork, + CNIPluginPath: options.CNIPluginPath, + CNIConfigDir: options.CNIConfigDir, + IDMappingOptions: define.IDMappingOptions{ + HostUIDMapping: len(uidmap) == 0, + HostGIDMapping: len(uidmap) == 0, + UIDMap: uidmap, + GIDMap: gidmap, + }, + Capabilities: copyStringSlice(options.Capabilities), + CommonBuildOpts: options.CommonBuildOpts, + TopLayer: topLayer, + Args: options.Args, + Format: options.Format, + TempVolumes: map[string]bool{}, + Devices: options.Devices, + Logger: options.Logger, + NetworkInterface: options.NetworkInterface, + } + + if options.Mount { + _, err = builder.Mount(container.MountLabel()) + if err != nil { + return nil, errors.Wrapf(err, "error mounting build container %q", builder.ContainerID) + } + } + + if err := builder.initConfig(ctx, src, systemContext); err != nil { + return nil, errors.Wrapf(err, "error preparing image configuration") + } + err = builder.Save() + if err != nil { + return nil, errors.Wrapf(err, "error saving builder state for container %q", builder.ContainerID) + } + + return builder, nil +} diff --git a/vendor/github.com/containers/buildah/pkg/blobcache/blobcache.go b/vendor/github.com/containers/buildah/pkg/blobcache/blobcache.go new file mode 100644 index 00000000000..8dadec13084 --- /dev/null +++ b/vendor/github.com/containers/buildah/pkg/blobcache/blobcache.go @@ -0,0 +1,568 @@ +package blobcache + +import ( + "bytes" + "context" + "io" + "io/ioutil" + "os" + "path/filepath" + "sync" + + "github.com/containers/buildah/docker" + "github.com/containers/common/libimage" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/image" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/compression" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/ioutils" + digest "github.com/opencontainers/go-digest" + v1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +var ( + _ types.ImageReference = &blobCacheReference{} + _ types.ImageSource = &blobCacheSource{} + _ types.ImageDestination = &blobCacheDestination{} +) + +const ( + compressedNote = ".compressed" + decompressedNote = ".decompressed" +) + +// BlobCache is an object which saves copies of blobs that are written to it while passing them +// through to some real destination, and which can be queried directly in order to read them +// back. +type BlobCache interface { + types.ImageReference + // HasBlob checks if a blob that matches the passed-in digest (and + // size, if not -1), is present in the cache. + HasBlob(types.BlobInfo) (bool, int64, error) + // Directories returns the list of cache directories. + Directory() string + // ClearCache() clears the contents of the cache directories. Note + // that this also clears content which was not placed there by this + // cache implementation. + ClearCache() error +} + +type blobCacheReference struct { + reference types.ImageReference + // WARNING: The contents of this directory may be accessed concurrently, + // both within this process and by multiple different processes + directory string + compress types.LayerCompression +} + +type blobCacheSource struct { + reference *blobCacheReference + source types.ImageSource + sys types.SystemContext + // this mutex synchronizes the counters below + mu sync.Mutex + cacheHits int64 + cacheMisses int64 + cacheErrors int64 +} + +type blobCacheDestination struct { + reference *blobCacheReference + destination types.ImageDestination +} + +func makeFilename(blobSum digest.Digest, isConfig bool) string { + if isConfig { + return blobSum.String() + ".config" + } + return blobSum.String() +} + +// CacheLookupReferenceFunc wraps a BlobCache into a +// libimage.LookupReferenceFunc to allow for using a BlobCache during +// image-copy operations. +func CacheLookupReferenceFunc(directory string, compress types.LayerCompression) libimage.LookupReferenceFunc { + // NOTE: this prevents us from moving BlobCache around and generalizes + // the libimage API. + return func(ref types.ImageReference) (types.ImageReference, error) { + ref, err := NewBlobCache(ref, directory, compress) + if err != nil { + return nil, errors.Wrapf(err, "error using blobcache %q", directory) + } + return ref, nil + } +} + +// NewBlobCache creates a new blob cache that wraps an image reference. Any blobs which are +// written to the destination image created from the resulting reference will also be stored +// as-is to the specified directory or a temporary directory. The cache directory's contents +// can be cleared by calling the returned BlobCache()'s ClearCache() method. +// The compress argument controls whether or not the cache will try to substitute a compressed +// or different version of a blob when preparing the list of layers when reading an image. +func NewBlobCache(ref types.ImageReference, directory string, compress types.LayerCompression) (BlobCache, error) { + if directory == "" { + return nil, errors.Errorf("error creating cache around reference %q: no directory specified", transports.ImageName(ref)) + } + switch compress { + case types.Compress, types.Decompress, types.PreserveOriginal: + // valid value, accept it + default: + return nil, errors.Errorf("unhandled LayerCompression value %v", compress) + } + return &blobCacheReference{ + reference: ref, + directory: directory, + compress: compress, + }, nil +} + +func (r *blobCacheReference) Transport() types.ImageTransport { + return r.reference.Transport() +} + +func (r *blobCacheReference) StringWithinTransport() string { + return r.reference.StringWithinTransport() +} + +func (r *blobCacheReference) DockerReference() reference.Named { + return r.reference.DockerReference() +} + +func (r *blobCacheReference) PolicyConfigurationIdentity() string { + return r.reference.PolicyConfigurationIdentity() +} + +func (r *blobCacheReference) PolicyConfigurationNamespaces() []string { + return r.reference.PolicyConfigurationNamespaces() +} + +func (r *blobCacheReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + return r.reference.DeleteImage(ctx, sys) +} + +func (r *blobCacheReference) HasBlob(blobinfo types.BlobInfo) (bool, int64, error) { + if blobinfo.Digest == "" { + return false, -1, nil + } + + for _, isConfig := range []bool{false, true} { + filename := filepath.Join(r.directory, makeFilename(blobinfo.Digest, isConfig)) + fileInfo, err := os.Stat(filename) + if err == nil && (blobinfo.Size == -1 || blobinfo.Size == fileInfo.Size()) { + return true, fileInfo.Size(), nil + } + if !os.IsNotExist(err) { + return false, -1, errors.Wrap(err, "checking size") + } + } + + return false, -1, nil +} + +func (r *blobCacheReference) Directory() string { + return r.directory +} + +func (r *blobCacheReference) ClearCache() error { + f, err := os.Open(r.directory) + if err != nil { + return errors.WithStack(err) + } + defer f.Close() + names, err := f.Readdirnames(-1) + if err != nil { + return errors.Wrapf(err, "error reading directory %q", r.directory) + } + for _, name := range names { + pathname := filepath.Join(r.directory, name) + if err = os.RemoveAll(pathname); err != nil { + return errors.Wrapf(err, "clearing cache for %q", transports.ImageName(r)) + } + } + return nil +} + +func (r *blobCacheReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + src, err := r.NewImageSource(ctx, sys) + if err != nil { + return nil, errors.Wrapf(err, "error creating new image %q", transports.ImageName(r.reference)) + } + return image.FromSource(ctx, sys, src) +} + +func (r *blobCacheReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { + src, err := r.reference.NewImageSource(ctx, sys) + if err != nil { + return nil, errors.Wrapf(err, "error creating new image source %q", transports.ImageName(r.reference)) + } + logrus.Debugf("starting to read from image %q using blob cache in %q (compression=%v)", transports.ImageName(r.reference), r.directory, r.compress) + return &blobCacheSource{reference: r, source: src, sys: *sys}, nil +} + +func (r *blobCacheReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { + dest, err := r.reference.NewImageDestination(ctx, sys) + if err != nil { + return nil, errors.Wrapf(err, "error creating new image destination %q", transports.ImageName(r.reference)) + } + logrus.Debugf("starting to write to image %q using blob cache in %q", transports.ImageName(r.reference), r.directory) + return &blobCacheDestination{reference: r, destination: dest}, nil +} + +func (s *blobCacheSource) Reference() types.ImageReference { + return s.reference +} + +func (s *blobCacheSource) Close() error { + logrus.Debugf("finished reading from image %q using blob cache: cache had %d hits, %d misses, %d errors", transports.ImageName(s.reference), s.cacheHits, s.cacheMisses, s.cacheErrors) + return s.source.Close() +} + +func (s *blobCacheSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { + if instanceDigest != nil { + filename := filepath.Join(s.reference.directory, makeFilename(*instanceDigest, false)) + manifestBytes, err := ioutil.ReadFile(filename) + if err == nil { + s.cacheHits++ + return manifestBytes, manifest.GuessMIMEType(manifestBytes), nil + } + if !os.IsNotExist(err) { + s.cacheErrors++ + return nil, "", errors.Wrap(err, "checking for manifest file") + } + } + s.cacheMisses++ + return s.source.GetManifest(ctx, instanceDigest) +} + +func (s *blobCacheSource) HasThreadSafeGetBlob() bool { + return s.source.HasThreadSafeGetBlob() +} + +func (s *blobCacheSource) GetBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { + present, size, err := s.reference.HasBlob(blobinfo) + if err != nil { + return nil, -1, err + } + if present { + for _, isConfig := range []bool{false, true} { + filename := filepath.Join(s.reference.directory, makeFilename(blobinfo.Digest, isConfig)) + f, err := os.Open(filename) + if err == nil { + s.mu.Lock() + s.cacheHits++ + s.mu.Unlock() + return f, size, nil + } + if !os.IsNotExist(err) { + s.mu.Lock() + s.cacheErrors++ + s.mu.Unlock() + return nil, -1, errors.Wrap(err, "checking for cache") + } + } + } + s.mu.Lock() + s.cacheMisses++ + s.mu.Unlock() + rc, size, err := s.source.GetBlob(ctx, blobinfo, cache) + if err != nil { + return rc, size, errors.Wrapf(err, "error reading blob from source image %q", transports.ImageName(s.reference)) + } + return rc, size, nil +} + +func (s *blobCacheSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + return s.source.GetSignatures(ctx, instanceDigest) +} + +func (s *blobCacheSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) { + signatures, err := s.source.GetSignatures(ctx, instanceDigest) + if err != nil { + return nil, errors.Wrapf(err, "error checking if image %q has signatures", transports.ImageName(s.reference)) + } + canReplaceBlobs := !(len(signatures) > 0 && len(signatures[0]) > 0) + + infos, err := s.source.LayerInfosForCopy(ctx, instanceDigest) + if err != nil { + return nil, errors.Wrapf(err, "error getting layer infos for copying image %q through cache", transports.ImageName(s.reference)) + } + if infos == nil { + img, err := image.FromUnparsedImage(ctx, &s.sys, image.UnparsedInstance(s.source, instanceDigest)) + if err != nil { + return nil, errors.Wrapf(err, "error opening image to get layer infos for copying image %q through cache", transports.ImageName(s.reference)) + } + infos = img.LayerInfos() + } + + if canReplaceBlobs && s.reference.compress != types.PreserveOriginal { + replacedInfos := make([]types.BlobInfo, 0, len(infos)) + for _, info := range infos { + var replaceDigest []byte + var err error + blobFile := filepath.Join(s.reference.directory, makeFilename(info.Digest, false)) + alternate := "" + switch s.reference.compress { + case types.Compress: + alternate = blobFile + compressedNote + replaceDigest, err = ioutil.ReadFile(alternate) + case types.Decompress: + alternate = blobFile + decompressedNote + replaceDigest, err = ioutil.ReadFile(alternate) + } + if err == nil && digest.Digest(replaceDigest).Validate() == nil { + alternate = filepath.Join(filepath.Dir(alternate), makeFilename(digest.Digest(replaceDigest), false)) + fileInfo, err := os.Stat(alternate) + if err == nil { + switch info.MediaType { + case v1.MediaTypeImageLayer, v1.MediaTypeImageLayerGzip: + switch s.reference.compress { + case types.Compress: + info.MediaType = v1.MediaTypeImageLayerGzip + info.CompressionAlgorithm = &compression.Gzip + case types.Decompress: + info.MediaType = v1.MediaTypeImageLayer + info.CompressionAlgorithm = nil + } + case docker.V2S2MediaTypeUncompressedLayer, manifest.DockerV2Schema2LayerMediaType: + switch s.reference.compress { + case types.Compress: + info.MediaType = manifest.DockerV2Schema2LayerMediaType + info.CompressionAlgorithm = &compression.Gzip + case types.Decompress: + // nope, not going to suggest anything, it's not allowed by the spec + replacedInfos = append(replacedInfos, info) + continue + } + } + logrus.Debugf("suggesting cached blob with digest %q, type %q, and compression %v in place of blob with digest %q", string(replaceDigest), info.MediaType, s.reference.compress, info.Digest.String()) + info.CompressionOperation = s.reference.compress + info.Digest = digest.Digest(replaceDigest) + info.Size = fileInfo.Size() + logrus.Debugf("info = %#v", info) + } + } + replacedInfos = append(replacedInfos, info) + } + infos = replacedInfos + } + + return infos, nil +} + +func (d *blobCacheDestination) Reference() types.ImageReference { + return d.reference +} + +func (d *blobCacheDestination) Close() error { + logrus.Debugf("finished writing to image %q using blob cache", transports.ImageName(d.reference)) + return d.destination.Close() +} + +func (d *blobCacheDestination) SupportedManifestMIMETypes() []string { + return d.destination.SupportedManifestMIMETypes() +} + +func (d *blobCacheDestination) SupportsSignatures(ctx context.Context) error { + return d.destination.SupportsSignatures(ctx) +} + +func (d *blobCacheDestination) DesiredLayerCompression() types.LayerCompression { + return d.destination.DesiredLayerCompression() +} + +func (d *blobCacheDestination) AcceptsForeignLayerURLs() bool { + return d.destination.AcceptsForeignLayerURLs() +} + +func (d *blobCacheDestination) MustMatchRuntimeOS() bool { + return d.destination.MustMatchRuntimeOS() +} + +func (d *blobCacheDestination) IgnoresEmbeddedDockerReference() bool { + return d.destination.IgnoresEmbeddedDockerReference() +} + +// Decompress and save the contents of the decompressReader stream into the passed-in temporary +// file. If we successfully save all of the data, rename the file to match the digest of the data, +// and make notes about the relationship between the file that holds a copy of the compressed data +// and this new file. +func saveStream(wg *sync.WaitGroup, decompressReader io.ReadCloser, tempFile *os.File, compressedFilename string, compressedDigest digest.Digest, isConfig bool, alternateDigest *digest.Digest) { + defer wg.Done() + // Decompress from and digest the reading end of that pipe. + decompressed, err3 := archive.DecompressStream(decompressReader) + digester := digest.Canonical.Digester() + if err3 == nil { + // Read the decompressed data through the filter over the pipe, blocking until the + // writing end is closed. + _, err3 = io.Copy(io.MultiWriter(tempFile, digester.Hash()), decompressed) + } else { + // Drain the pipe to keep from stalling the PutBlob() thread. + if _, err := io.Copy(ioutil.Discard, decompressReader); err != nil { + logrus.Debugf("error draining the pipe: %v", err) + } + } + decompressReader.Close() + decompressed.Close() + tempFile.Close() + // Determine the name that we should give to the uncompressed copy of the blob. + decompressedFilename := filepath.Join(filepath.Dir(tempFile.Name()), makeFilename(digester.Digest(), isConfig)) + if err3 == nil { + // Rename the temporary file. + if err3 = os.Rename(tempFile.Name(), decompressedFilename); err3 != nil { + logrus.Debugf("error renaming new decompressed copy of blob %q into place at %q: %v", digester.Digest().String(), decompressedFilename, err3) + // Remove the temporary file. + if err3 = os.Remove(tempFile.Name()); err3 != nil { + logrus.Debugf("error cleaning up temporary file %q for decompressed copy of blob %q: %v", tempFile.Name(), compressedDigest.String(), err3) + } + } else { + *alternateDigest = digester.Digest() + // Note the relationship between the two files. + if err3 = ioutils.AtomicWriteFile(decompressedFilename+compressedNote, []byte(compressedDigest.String()), 0600); err3 != nil { + logrus.Debugf("error noting that the compressed version of %q is %q: %v", digester.Digest().String(), compressedDigest.String(), err3) + } + if err3 = ioutils.AtomicWriteFile(compressedFilename+decompressedNote, []byte(digester.Digest().String()), 0600); err3 != nil { + logrus.Debugf("error noting that the decompressed version of %q is %q: %v", compressedDigest.String(), digester.Digest().String(), err3) + } + } + } else { + // Remove the temporary file. + if err3 = os.Remove(tempFile.Name()); err3 != nil { + logrus.Debugf("error cleaning up temporary file %q for decompressed copy of blob %q: %v", tempFile.Name(), compressedDigest.String(), err3) + } + } +} + +func (d *blobCacheDestination) HasThreadSafePutBlob() bool { + return d.destination.HasThreadSafePutBlob() +} + +func (d *blobCacheDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { + var tempfile *os.File + var err error + var n int + var alternateDigest digest.Digest + var closer io.Closer + wg := new(sync.WaitGroup) + needToWait := false + compression := archive.Uncompressed + if inputInfo.Digest != "" { + filename := filepath.Join(d.reference.directory, makeFilename(inputInfo.Digest, isConfig)) + tempfile, err = ioutil.TempFile(d.reference.directory, makeFilename(inputInfo.Digest, isConfig)) + if err == nil { + stream = io.TeeReader(stream, tempfile) + defer func() { + if err == nil { + if err = os.Rename(tempfile.Name(), filename); err != nil { + if err2 := os.Remove(tempfile.Name()); err2 != nil { + logrus.Debugf("error cleaning up temporary file %q for blob %q: %v", tempfile.Name(), inputInfo.Digest.String(), err2) + } + err = errors.Wrapf(err, "error renaming new layer for blob %q into place at %q", inputInfo.Digest.String(), filename) + } + } else { + if err2 := os.Remove(tempfile.Name()); err2 != nil { + logrus.Debugf("error cleaning up temporary file %q for blob %q: %v", tempfile.Name(), inputInfo.Digest.String(), err2) + } + } + tempfile.Close() + }() + } else { + logrus.Debugf("error while creating a temporary file under %q to hold blob %q: %v", d.reference.directory, inputInfo.Digest.String(), err) + } + if !isConfig { + initial := make([]byte, 8) + n, err = stream.Read(initial) + if n > 0 { + // Build a Reader that will still return the bytes that we just + // read, for PutBlob()'s sake. + stream = io.MultiReader(bytes.NewReader(initial[:n]), stream) + if n >= len(initial) { + compression = archive.DetectCompression(initial[:n]) + } + if compression == archive.Gzip { + // The stream is compressed, so create a file which we'll + // use to store a decompressed copy. + decompressedTemp, err2 := ioutil.TempFile(d.reference.directory, makeFilename(inputInfo.Digest, isConfig)) + if err2 != nil { + logrus.Debugf("error while creating a temporary file under %q to hold decompressed blob %q: %v", d.reference.directory, inputInfo.Digest.String(), err2) + decompressedTemp.Close() + } else { + // Write a copy of the compressed data to a pipe, + // closing the writing end of the pipe after + // PutBlob() returns. + decompressReader, decompressWriter := io.Pipe() + closer = decompressWriter + stream = io.TeeReader(stream, decompressWriter) + // Let saveStream() close the reading end and handle the temporary file. + wg.Add(1) + needToWait = true + go saveStream(wg, decompressReader, decompressedTemp, filename, inputInfo.Digest, isConfig, &alternateDigest) + } + } + } + } + } + newBlobInfo, err := d.destination.PutBlob(ctx, stream, inputInfo, cache, isConfig) + if closer != nil { + closer.Close() + } + if needToWait { + wg.Wait() + } + if err != nil { + return newBlobInfo, errors.Wrapf(err, "error storing blob to image destination for cache %q", transports.ImageName(d.reference)) + } + if alternateDigest.Validate() == nil { + logrus.Debugf("added blob %q (also %q) to the cache at %q", inputInfo.Digest.String(), alternateDigest.String(), d.reference.directory) + } else { + logrus.Debugf("added blob %q to the cache at %q", inputInfo.Digest.String(), d.reference.directory) + } + return newBlobInfo, nil +} + +func (d *blobCacheDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { + present, reusedInfo, err := d.destination.TryReusingBlob(ctx, info, cache, canSubstitute) + if err != nil || present { + return present, reusedInfo, err + } + + for _, isConfig := range []bool{false, true} { + filename := filepath.Join(d.reference.directory, makeFilename(info.Digest, isConfig)) + f, err := os.Open(filename) + if err == nil { + defer f.Close() + uploadedInfo, err := d.destination.PutBlob(ctx, f, info, cache, isConfig) + if err != nil { + return false, types.BlobInfo{}, err + } + return true, uploadedInfo, nil + } + } + + return false, types.BlobInfo{}, nil +} + +func (d *blobCacheDestination) PutManifest(ctx context.Context, manifestBytes []byte, instanceDigest *digest.Digest) error { + manifestDigest, err := manifest.Digest(manifestBytes) + if err != nil { + logrus.Warnf("error digesting manifest %q: %v", string(manifestBytes), err) + } else { + filename := filepath.Join(d.reference.directory, makeFilename(manifestDigest, false)) + if err = ioutils.AtomicWriteFile(filename, manifestBytes, 0600); err != nil { + logrus.Warnf("error saving manifest as %q: %v", filename, err) + } + } + return d.destination.PutManifest(ctx, manifestBytes, instanceDigest) +} + +func (d *blobCacheDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error { + return d.destination.PutSignatures(ctx, signatures, instanceDigest) +} + +func (d *blobCacheDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error { + return d.destination.Commit(ctx, unparsedToplevel) +} diff --git a/vendor/github.com/containers/buildah/pkg/chrootuser/user.go b/vendor/github.com/containers/buildah/pkg/chrootuser/user.go new file mode 100644 index 00000000000..0b5c04398cf --- /dev/null +++ b/vendor/github.com/containers/buildah/pkg/chrootuser/user.go @@ -0,0 +1,116 @@ +package chrootuser + +import ( + "os/user" + "strconv" + "strings" + + "github.com/pkg/errors" +) + +var ( + // ErrNoSuchUser indicates that the user provided by the caller does not + // exist in /etc/passws + ErrNoSuchUser = errors.New("user does not exist in /etc/passwd") +) + +// GetUser will return the uid, gid of the user specified in the userspec +// it will use the /etc/passwd and /etc/group files inside of the rootdir +// to return this information. +// userspec format [user | user:group | uid | uid:gid | user:gid | uid:group ] +func GetUser(rootdir, userspec string) (uint32, uint32, string, error) { + var gid64 uint64 + var gerr error = user.UnknownGroupError("error looking up group") + + spec := strings.SplitN(userspec, ":", 2) + userspec = spec[0] + groupspec := "" + + if userspec == "" { + userspec = "0" + } + + if len(spec) > 1 { + groupspec = spec[1] + } + + uid64, uerr := strconv.ParseUint(userspec, 10, 32) + if uerr == nil && groupspec == "" { + // We parsed the user name as a number, and there's no group + // component, so try to look up the primary GID of the user who + // has this UID. + var name string + name, gid64, gerr = lookupGroupForUIDInContainer(rootdir, uid64) + if gerr == nil { + userspec = name + } else { + // Leave userspec alone, but swallow the error and just + // use GID 0. + gid64 = 0 + gerr = nil + } + } + if uerr != nil { + // The user ID couldn't be parsed as a number, so try to look + // up the user's UID and primary GID. + uid64, gid64, uerr = lookupUserInContainer(rootdir, userspec) + gerr = uerr + } + + if groupspec != "" { + // We have a group name or number, so parse it. + gid64, gerr = strconv.ParseUint(groupspec, 10, 32) + if gerr != nil { + // The group couldn't be parsed as a number, so look up + // the group's GID. + gid64, gerr = lookupGroupInContainer(rootdir, groupspec) + } + } + + homedir, err := lookupHomedirInContainer(rootdir, uid64) + if err != nil { + homedir = "/" + } + + if uerr == nil && gerr == nil { + return uint32(uid64), uint32(gid64), homedir, nil + } + + err = errors.Wrapf(uerr, "error determining run uid") + if uerr == nil { + err = errors.Wrapf(gerr, "error determining run gid") + } + + return 0, 0, homedir, err +} + +// GetGroup returns the gid by looking it up in the /etc/group file +// groupspec format [ group | gid ] +func GetGroup(rootdir, groupspec string) (uint32, error) { + gid64, gerr := strconv.ParseUint(groupspec, 10, 32) + if gerr != nil { + // The group couldn't be parsed as a number, so look up + // the group's GID. + gid64, gerr = lookupGroupInContainer(rootdir, groupspec) + } + if gerr != nil { + return 0, errors.Wrapf(gerr, "error looking up group for gid %q", groupspec) + } + return uint32(gid64), nil +} + +// GetAdditionalGroupsForUser returns a list of gids that userid is associated with +func GetAdditionalGroupsForUser(rootdir string, userid uint64) ([]uint32, error) { + gids, err := lookupAdditionalGroupsForUIDInContainer(rootdir, userid) + if err != nil { + return nil, errors.Wrapf(err, "error looking up supplemental groups for uid %d", userid) + } + return gids, nil +} + +// LookupUIDInContainer returns username and gid associated with a UID in a container +// it will use the /etc/passwd files inside of the rootdir +// to return this information. +func LookupUIDInContainer(rootdir string, uid uint64) (user string, gid uint64, err error) { + return lookupUIDInContainer(rootdir, uid) +} diff --git a/vendor/github.com/containers/buildah/pkg/chrootuser/user_basic.go b/vendor/github.com/containers/buildah/pkg/chrootuser/user_basic.go new file mode 100644 index 00000000000..6c997c4c932 --- /dev/null +++ b/vendor/github.com/containers/buildah/pkg/chrootuser/user_basic.go @@ -0,0 +1,31 @@ +// +build !linux + +package chrootuser + +import ( + "github.com/pkg/errors" +) + +func lookupUserInContainer(rootdir, username string) (uint64, uint64, error) { + return 0, 0, errors.New("user lookup not supported") +} + +func lookupGroupInContainer(rootdir, groupname string) (uint64, error) { + return 0, errors.New("group lookup not supported") +} + +func lookupGroupForUIDInContainer(rootdir string, userid uint64) (string, uint64, error) { + return "", 0, errors.New("primary group lookup by uid not supported") +} + +func lookupAdditionalGroupsForUIDInContainer(rootdir string, userid uint64) (gid []uint32, err error) { + return nil, errors.New("supplemental groups list lookup by uid not supported") +} + +func lookupUIDInContainer(rootdir string, uid uint64) (string, uint64, error) { + return "", 0, errors.New("UID lookup not supported") +} + +func lookupHomedirInContainer(rootdir string, uid uint64) (string, error) { + return "", errors.New("Home directory lookup not supported") +} diff --git a/vendor/github.com/containers/buildah/pkg/chrootuser/user_linux.go b/vendor/github.com/containers/buildah/pkg/chrootuser/user_linux.go new file mode 100644 index 00000000000..89b31782eef --- /dev/null +++ b/vendor/github.com/containers/buildah/pkg/chrootuser/user_linux.go @@ -0,0 +1,297 @@ +// +build linux + +package chrootuser + +import ( + "bufio" + "flag" + "fmt" + "io" + "os" + "os/exec" + "os/user" + "strconv" + "strings" + "sync" + + "github.com/containers/storage/pkg/reexec" + "golang.org/x/sys/unix" +) + +const ( + openChrootedCommand = "chrootuser-open" +) + +func init() { + reexec.Register(openChrootedCommand, openChrootedFileMain) +} + +func openChrootedFileMain() { + status := 0 + flag.Parse() + if len(flag.Args()) < 1 { + os.Exit(1) + } + // Our first parameter is the directory to chroot into. + if err := unix.Chdir(flag.Arg(0)); err != nil { + fmt.Fprintf(os.Stderr, "chdir(): %v", err) + os.Exit(1) + } + if err := unix.Chroot(flag.Arg(0)); err != nil { + fmt.Fprintf(os.Stderr, "chroot(): %v", err) + os.Exit(1) + } + // Anything else is a file we want to dump out. + for _, filename := range flag.Args()[1:] { + f, err := os.Open(filename) + if err != nil { + fmt.Fprintf(os.Stderr, "open(%q): %v", filename, err) + status = 1 + continue + } + _, err = io.Copy(os.Stdout, f) + if err != nil { + fmt.Fprintf(os.Stderr, "read(%q): %v", filename, err) + } + f.Close() + } + os.Exit(status) +} + +func openChrootedFile(rootdir, filename string) (*exec.Cmd, io.ReadCloser, error) { + // The child process expects a chroot and one or more filenames that + // will be consulted relative to the chroot directory and concatenated + // to its stdout. Start it up. + cmd := reexec.Command(openChrootedCommand, rootdir, filename) + stdout, err := cmd.StdoutPipe() + if err != nil { + return nil, nil, err + } + err = cmd.Start() + if err != nil { + return nil, nil, err + } + // Hand back the child's stdout for reading, and the child to reap. + return cmd, stdout, nil +} + +var ( + lookupUser, lookupGroup sync.Mutex +) + +type lookupPasswdEntry struct { + name string + uid uint64 + gid uint64 + home string +} +type lookupGroupEntry struct { + name string + gid uint64 + user string +} + +func parseNextPasswd(rc *bufio.Scanner) *lookupPasswdEntry { + if !rc.Scan() { + return nil + } + line := rc.Text() + fields := strings.Split(line, ":") + if len(fields) != 7 { + return nil + } + uid, err := strconv.ParseUint(fields[2], 10, 32) + if err != nil { + return nil + } + gid, err := strconv.ParseUint(fields[3], 10, 32) + if err != nil { + return nil + } + return &lookupPasswdEntry{ + name: fields[0], + uid: uid, + gid: gid, + home: fields[5], + } +} + +func parseNextGroup(rc *bufio.Scanner) *lookupGroupEntry { + if !rc.Scan() { + return nil + } + line := rc.Text() + fields := strings.Split(line, ":") + if len(fields) != 4 { + return nil + } + gid, err := strconv.ParseUint(fields[2], 10, 32) + if err != nil { + return nil + } + return &lookupGroupEntry{ + name: fields[0], + gid: gid, + user: fields[3], + } +} + +func lookupUserInContainer(rootdir, username string) (uid uint64, gid uint64, err error) { + cmd, f, err := openChrootedFile(rootdir, "/etc/passwd") + if err != nil { + return 0, 0, err + } + defer func() { + _ = cmd.Wait() + }() + rc := bufio.NewScanner(f) + defer f.Close() + + lookupUser.Lock() + defer lookupUser.Unlock() + + pwd := parseNextPasswd(rc) + for pwd != nil { + if pwd.name != username { + pwd = parseNextPasswd(rc) + continue + } + return pwd.uid, pwd.gid, nil + } + + return 0, 0, user.UnknownUserError(fmt.Sprintf("error looking up user %q", username)) +} + +func lookupGroupForUIDInContainer(rootdir string, userid uint64) (username string, gid uint64, err error) { + cmd, f, err := openChrootedFile(rootdir, "/etc/passwd") + if err != nil { + return "", 0, err + } + defer func() { + _ = cmd.Wait() + }() + rc := bufio.NewScanner(f) + defer f.Close() + + lookupUser.Lock() + defer lookupUser.Unlock() + + pwd := parseNextPasswd(rc) + for pwd != nil { + if pwd.uid != userid { + pwd = parseNextPasswd(rc) + continue + } + return pwd.name, pwd.gid, nil + } + + return "", 0, ErrNoSuchUser +} + +func lookupAdditionalGroupsForUIDInContainer(rootdir string, userid uint64) (gid []uint32, err error) { + // Get the username associated with userid + username, _, err := lookupGroupForUIDInContainer(rootdir, userid) + if err != nil { + return nil, err + } + + cmd, f, err := openChrootedFile(rootdir, "/etc/group") + if err != nil { + return nil, err + } + defer func() { + _ = cmd.Wait() + }() + rc := bufio.NewScanner(f) + defer f.Close() + + lookupGroup.Lock() + defer lookupGroup.Unlock() + + grp := parseNextGroup(rc) + for grp != nil { + if strings.Contains(grp.user, username) { + gid = append(gid, uint32(grp.gid)) + } + grp = parseNextGroup(rc) + } + return gid, nil +} + +func lookupGroupInContainer(rootdir, groupname string) (gid uint64, err error) { + cmd, f, err := openChrootedFile(rootdir, "/etc/group") + if err != nil { + return 0, err + } + defer func() { + _ = cmd.Wait() + }() + rc := bufio.NewScanner(f) + defer f.Close() + + lookupGroup.Lock() + defer lookupGroup.Unlock() + + grp := parseNextGroup(rc) + for grp != nil { + if grp.name != groupname { + grp = parseNextGroup(rc) + continue + } + return grp.gid, nil + } + + return 0, user.UnknownGroupError(fmt.Sprintf("error looking up group %q", groupname)) +} + +func lookupUIDInContainer(rootdir string, uid uint64) (string, uint64, error) { + cmd, f, err := openChrootedFile(rootdir, "/etc/passwd") + if err != nil { + return "", 0, err + } + defer func() { + _ = cmd.Wait() + }() + rc := bufio.NewScanner(f) + defer f.Close() + + lookupUser.Lock() + defer lookupUser.Unlock() + + pwd := parseNextPasswd(rc) + for pwd != nil { + if pwd.uid != uid { + pwd = parseNextPasswd(rc) + continue + } + return pwd.name, pwd.gid, nil + } + + return "", 0, user.UnknownUserError(fmt.Sprintf("error looking up uid %q", uid)) +} + +func lookupHomedirInContainer(rootdir string, uid uint64) (string, error) { + cmd, f, err := openChrootedFile(rootdir, "/etc/passwd") + if err != nil { + return "", err + } + defer func() { + _ = cmd.Wait() + }() + rc := bufio.NewScanner(f) + defer f.Close() + + lookupUser.Lock() + defer lookupUser.Unlock() + + pwd := parseNextPasswd(rc) + for pwd != nil { + if pwd.uid != uid { + pwd = parseNextPasswd(rc) + continue + } + return pwd.home, nil + } + + return "", user.UnknownUserError(fmt.Sprintf("error looking up uid %q for homedir", uid)) +} diff --git a/vendor/github.com/containers/buildah/pkg/cli/common.go b/vendor/github.com/containers/buildah/pkg/cli/common.go new file mode 100644 index 00000000000..d05fbde7c24 --- /dev/null +++ b/vendor/github.com/containers/buildah/pkg/cli/common.go @@ -0,0 +1,438 @@ +package cli + +// the cli package contains urfave/cli related structs that help make up +// the command line for buildah commands. it resides here so other projects +// that vendor in this code can use them too. + +import ( + "fmt" + "os" + "runtime" + "strings" + + "github.com/containers/buildah/define" + "github.com/containers/buildah/pkg/completion" + "github.com/containers/buildah/pkg/parse" + commonComp "github.com/containers/common/pkg/completion" + "github.com/containers/common/pkg/config" + "github.com/containers/storage/pkg/unshare" + "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" + "github.com/spf13/pflag" +) + +// LayerResults represents the results of the layer flags +type LayerResults struct { + ForceRm bool + Layers bool +} + +// UserNSResults represents the results for the UserNS flags +type UserNSResults struct { + UserNS string + UserNSUIDMap []string + UserNSGIDMap []string + UserNSUIDMapUser string + UserNSGIDMapGroup string +} + +// NameSpaceResults represents the results for Namespace flags +type NameSpaceResults struct { + Cgroup string + IPC string + Network string + CNIConfigDir string + CNIPlugInPath string + PID string + UTS string +} + +// BudResults represents the results for Build flags +type BudResults struct { + AllPlatforms bool + Annotation []string + Authfile string + BuildArg []string + CacheFrom string + CertDir string + Compress bool + Creds string + DisableCompression bool + DisableContentTrust bool + IgnoreFile string + File []string + Format string + From string + Iidfile string + Label []string + Logfile string + Manifest string + NoCache bool + Timestamp int64 + Pull string + PullAlways bool + PullNever bool + Quiet bool + Rm bool + Runtime string + RuntimeFlags []string + Secrets []string + SSH []string + SignaturePolicy string + SignBy string + Squash bool + Stdin bool + Tag []string + Target string + TLSVerify bool + Jobs int + LogRusage bool + RusageLogFile string + UnsetEnvs []string +} + +// FromAndBugResults represents the results for common flags +// in build and from +type FromAndBudResults struct { + AddHost []string + BlobCache string + CapAdd []string + CapDrop []string + CgroupParent string + CPUPeriod uint64 + CPUQuota int64 + CPUSetCPUs string + CPUSetMems string + CPUShares uint64 + DecryptionKeys []string + Devices []string + DNSSearch []string + DNSServers []string + DNSOptions []string + HTTPProxy bool + Isolation string + Memory string + MemorySwap string + SecurityOpt []string + ShmSize string + Ulimit []string + Volumes []string +} + +// GetUserNSFlags returns the common flags for usernamespace +func GetUserNSFlags(flags *UserNSResults) pflag.FlagSet { + usernsFlags := pflag.FlagSet{} + usernsFlags.StringVar(&flags.UserNS, "userns", "", "'container', `path` of user namespace to join, or 'host'") + usernsFlags.StringSliceVar(&flags.UserNSUIDMap, "userns-uid-map", []string{}, "`containerUID:hostUID:length` UID mapping to use in user namespace") + usernsFlags.StringSliceVar(&flags.UserNSGIDMap, "userns-gid-map", []string{}, "`containerGID:hostGID:length` GID mapping to use in user namespace") + usernsFlags.StringVar(&flags.UserNSUIDMapUser, "userns-uid-map-user", "", "`name` of entries from /etc/subuid to use to set user namespace UID mapping") + usernsFlags.StringVar(&flags.UserNSGIDMapGroup, "userns-gid-map-group", "", "`name` of entries from /etc/subgid to use to set user namespace GID mapping") + return usernsFlags +} + +// GetUserNSFlagsCompletions returns the FlagCompletions for the userns flags +func GetUserNSFlagsCompletions() commonComp.FlagCompletions { + flagCompletion := commonComp.FlagCompletions{} + flagCompletion["userns"] = completion.AutocompleteNamespaceFlag + flagCompletion["userns-uid-map"] = commonComp.AutocompleteNone + flagCompletion["userns-gid-map"] = commonComp.AutocompleteNone + flagCompletion["userns-uid-map-user"] = commonComp.AutocompleteSubuidName + flagCompletion["userns-gid-map-group"] = commonComp.AutocompleteSubgidName + return flagCompletion +} + +// GetNameSpaceFlags returns the common flags for a namespace menu +func GetNameSpaceFlags(flags *NameSpaceResults) pflag.FlagSet { + fs := pflag.FlagSet{} + fs.StringVar(&flags.Cgroup, "cgroupns", "", "'private', or 'host'") + fs.StringVar(&flags.IPC, string(specs.IPCNamespace), "", "'private', `path` of IPC namespace to join, or 'host'") + fs.StringVar(&flags.Network, string(specs.NetworkNamespace), "", "'private', 'none', 'ns:path' of network namespace to join, or 'host'") + fs.StringVar(&flags.CNIConfigDir, "cni-config-dir", "", "`directory` of CNI configuration files") + _ = fs.MarkHidden("cni-config-dir") + fs.StringVar(&flags.CNIPlugInPath, "cni-plugin-path", "", "`path` of CNI network plugins") + _ = fs.MarkHidden("cni-plugin-path") + fs.StringVar(&flags.PID, string(specs.PIDNamespace), "", "private, `path` of PID namespace to join, or 'host'") + fs.StringVar(&flags.UTS, string(specs.UTSNamespace), "", "private, :`path` of UTS namespace to join, or 'host'") + return fs +} + +// GetNameSpaceFlagsCompletions returns the FlagCompletions for the namespace flags +func GetNameSpaceFlagsCompletions() commonComp.FlagCompletions { + flagCompletion := commonComp.FlagCompletions{} + flagCompletion["cgroupns"] = completion.AutocompleteNamespaceFlag + flagCompletion[string(specs.IPCNamespace)] = completion.AutocompleteNamespaceFlag + flagCompletion[string(specs.NetworkNamespace)] = completion.AutocompleteNamespaceFlag + flagCompletion[string(specs.PIDNamespace)] = completion.AutocompleteNamespaceFlag + flagCompletion[string(specs.UTSNamespace)] = completion.AutocompleteNamespaceFlag + return flagCompletion +} + +// GetLayerFlags returns the common flags for layers +func GetLayerFlags(flags *LayerResults) pflag.FlagSet { + fs := pflag.FlagSet{} + fs.BoolVar(&flags.ForceRm, "force-rm", false, "Always remove intermediate containers after a build, even if the build is unsuccessful.") + fs.BoolVar(&flags.Layers, "layers", UseLayers(), "cache intermediate layers during build. Use BUILDAH_LAYERS environment variable to override.") + return fs +} + +// Note: GetLayerFlagsCompletion is not needed since GetLayerFlags only contains bool flags + +// GetBudFlags returns common build flags +func GetBudFlags(flags *BudResults) pflag.FlagSet { + fs := pflag.FlagSet{} + fs.BoolVar(&flags.AllPlatforms, "all-platforms", false, "attempt to build for all base image platforms") + fs.String("arch", runtime.GOARCH, "set the ARCH of the image to the provided value instead of the architecture of the host") + fs.StringArrayVar(&flags.Annotation, "annotation", []string{}, "Set metadata for an image (default [])") + fs.StringVar(&flags.Authfile, "authfile", "", "path of the authentication file.") + fs.StringArrayVar(&flags.BuildArg, "build-arg", []string{}, "`argument=value` to supply to the builder") + fs.StringVar(&flags.CacheFrom, "cache-from", "", "Images to utilise as potential cache sources. The build process does not currently support caching so this is a NOOP.") + fs.StringVar(&flags.CertDir, "cert-dir", "", "use certificates at the specified path to access the registry") + fs.BoolVar(&flags.Compress, "compress", false, "This is legacy option, which has no effect on the image") + fs.StringVar(&flags.Creds, "creds", "", "use `[username[:password]]` for accessing the registry") + fs.BoolVarP(&flags.DisableCompression, "disable-compression", "D", true, "don't compress layers by default") + fs.BoolVar(&flags.DisableContentTrust, "disable-content-trust", false, "This is a Docker specific option and is a NOOP") + fs.StringVar(&flags.From, "from", "", "image name used to replace the value in the first FROM instruction in the Containerfile") + fs.StringVar(&flags.IgnoreFile, "ignorefile", "", "path to an alternate .dockerignore file") + fs.StringSliceVarP(&flags.File, "file", "f", []string{}, "`pathname or URL` of a Dockerfile") + fs.StringVar(&flags.Format, "format", DefaultFormat(), "`format` of the built image's manifest and metadata. Use BUILDAH_FORMAT environment variable to override.") + fs.StringVar(&flags.Iidfile, "iidfile", "", "`file` to write the image ID to") + fs.IntVar(&flags.Jobs, "jobs", 1, "how many stages to run in parallel") + fs.StringArrayVar(&flags.Label, "label", []string{}, "Set metadata for an image (default [])") + fs.StringVar(&flags.Logfile, "logfile", "", "log to `file` instead of stdout/stderr") + fs.Int("loglevel", 0, "NO LONGER USED, flag ignored, and hidden") + if err := fs.MarkHidden("loglevel"); err != nil { + panic(fmt.Sprintf("error marking the loglevel flag as hidden: %v", err)) + } + fs.BoolVar(&flags.LogRusage, "log-rusage", false, "log resource usage at each build step") + if err := fs.MarkHidden("log-rusage"); err != nil { + panic(fmt.Sprintf("error marking the log-rusage flag as hidden: %v", err)) + } + fs.StringVar(&flags.RusageLogFile, "rusage-logfile", "", "destination file to which rusage should be logged to instead of stdout (= the default).") + if err := fs.MarkHidden("rusage-logfile"); err != nil { + panic(fmt.Sprintf("error marking the rusage-logfile flag as hidden: %v", err)) + } + fs.StringVar(&flags.Manifest, "manifest", "", "add the image to the specified manifest list. Creates manifest list if it does not exist") + fs.BoolVar(&flags.NoCache, "no-cache", false, "Do not use existing cached images for the container build. Build from the start with a new set of cached layers.") + fs.String("os", runtime.GOOS, "set the OS to the provided value instead of the current operating system of the host") + fs.StringVar(&flags.Pull, "pull", "true", "pull the image from the registry if newer or not present in store, if false, only pull the image if not present, if always, pull the image even if the named image is present in store, if never, only use the image present in store if available") + fs.Lookup("pull").NoOptDefVal = "true" //allow `--pull ` to be set to `true` as expected. + fs.BoolVar(&flags.PullAlways, "pull-always", false, "pull the image even if the named image is present in store") + if err := fs.MarkHidden("pull-always"); err != nil { + panic(fmt.Sprintf("error marking the pull-always flag as hidden: %v", err)) + } + fs.BoolVar(&flags.PullNever, "pull-never", false, "do not pull the image, use the image present in store if available") + if err := fs.MarkHidden("pull-never"); err != nil { + panic(fmt.Sprintf("error marking the pull-never flag as hidden: %v", err)) + } + fs.BoolVarP(&flags.Quiet, "quiet", "q", false, "refrain from announcing build instructions and image read/write progress") + fs.BoolVar(&flags.Rm, "rm", true, "Remove intermediate containers after a successful build") + // "runtime" definition moved to avoid name collision in podman build. Defined in cmd/buildah/build.go. + fs.StringSliceVar(&flags.RuntimeFlags, "runtime-flag", []string{}, "add global flags for the container runtime") + fs.StringArrayVar(&flags.Secrets, "secret", []string{}, "secret file to expose to the build") + fs.StringVar(&flags.SignBy, "sign-by", "", "sign the image using a GPG key with the specified `FINGERPRINT`") + fs.StringVar(&flags.SignaturePolicy, "signature-policy", "", "`pathname` of signature policy file (not usually used)") + if err := fs.MarkHidden("signature-policy"); err != nil { + panic(fmt.Sprintf("error marking the signature-policy flag as hidden: %v", err)) + } + fs.BoolVar(&flags.Squash, "squash", false, "squash newly built layers into a single new layer") + fs.StringArrayVar(&flags.SSH, "ssh", []string{}, "SSH agent socket or keys to expose to the build. (format: default|[=|[,]])") + fs.BoolVar(&flags.Stdin, "stdin", false, "pass stdin into containers") + fs.StringArrayVarP(&flags.Tag, "tag", "t", []string{}, "tagged `name` to apply to the built image") + fs.StringVar(&flags.Target, "target", "", "set the target build stage to build") + fs.Int64Var(&flags.Timestamp, "timestamp", 0, "set created timestamp to the specified epoch seconds to allow for deterministic builds, defaults to current time") + fs.BoolVar(&flags.TLSVerify, "tls-verify", true, "require HTTPS and verify certificates when accessing the registry") + fs.String("variant", "", "override the `variant` of the specified image") + fs.StringSliceVar(&flags.UnsetEnvs, "unsetenv", nil, "Unset environment variable from final image") + return fs +} + +// GetBudFlagsCompletions returns the FlagCompletions for the common build flags +func GetBudFlagsCompletions() commonComp.FlagCompletions { + flagCompletion := commonComp.FlagCompletions{} + flagCompletion["arch"] = commonComp.AutocompleteNone + flagCompletion["annotation"] = commonComp.AutocompleteNone + flagCompletion["authfile"] = commonComp.AutocompleteDefault + flagCompletion["build-arg"] = commonComp.AutocompleteNone + flagCompletion["cache-from"] = commonComp.AutocompleteNone + flagCompletion["cert-dir"] = commonComp.AutocompleteDefault + flagCompletion["creds"] = commonComp.AutocompleteNone + flagCompletion["file"] = commonComp.AutocompleteDefault + flagCompletion["from"] = commonComp.AutocompleteDefault + flagCompletion["format"] = commonComp.AutocompleteNone + flagCompletion["ignorefile"] = commonComp.AutocompleteDefault + flagCompletion["iidfile"] = commonComp.AutocompleteDefault + flagCompletion["jobs"] = commonComp.AutocompleteNone + flagCompletion["label"] = commonComp.AutocompleteNone + flagCompletion["logfile"] = commonComp.AutocompleteDefault + flagCompletion["manifest"] = commonComp.AutocompleteDefault + flagCompletion["os"] = commonComp.AutocompleteNone + flagCompletion["pull"] = commonComp.AutocompleteDefault + flagCompletion["runtime-flag"] = commonComp.AutocompleteNone + flagCompletion["secret"] = commonComp.AutocompleteNone + flagCompletion["ssh"] = commonComp.AutocompleteNone + flagCompletion["sign-by"] = commonComp.AutocompleteNone + flagCompletion["signature-policy"] = commonComp.AutocompleteNone + flagCompletion["tag"] = commonComp.AutocompleteNone + flagCompletion["target"] = commonComp.AutocompleteNone + flagCompletion["timestamp"] = commonComp.AutocompleteNone + flagCompletion["variant"] = commonComp.AutocompleteNone + flagCompletion["unsetenv"] = commonComp.AutocompleteNone + return flagCompletion +} + +// GetFromAndBudFlags returns from and build flags +func GetFromAndBudFlags(flags *FromAndBudResults, usernsResults *UserNSResults, namespaceResults *NameSpaceResults) (pflag.FlagSet, error) { + fs := pflag.FlagSet{} + defaultContainerConfig, err := config.Default() + if err != nil { + return fs, errors.Wrapf(err, "failed to get container config") + } + + fs.StringSliceVar(&flags.AddHost, "add-host", []string{}, "add a custom host-to-IP mapping (`host:ip`) (default [])") + fs.StringVar(&flags.BlobCache, "blob-cache", "", "assume image blobs in the specified directory will be available for pushing") + if err := fs.MarkHidden("blob-cache"); err != nil { + panic(fmt.Sprintf("error marking net flag as hidden: %v", err)) + } + fs.StringSliceVar(&flags.CapAdd, "cap-add", []string{}, "add the specified capability when running (default [])") + fs.StringSliceVar(&flags.CapDrop, "cap-drop", []string{}, "drop the specified capability when running (default [])") + fs.StringVar(&flags.CgroupParent, "cgroup-parent", "", "optional parent cgroup for the container") + fs.Uint64Var(&flags.CPUPeriod, "cpu-period", 0, "limit the CPU CFS (Completely Fair Scheduler) period") + fs.Int64Var(&flags.CPUQuota, "cpu-quota", 0, "limit the CPU CFS (Completely Fair Scheduler) quota") + fs.Uint64VarP(&flags.CPUShares, "cpu-shares", "c", 0, "CPU shares (relative weight)") + fs.StringVar(&flags.CPUSetCPUs, "cpuset-cpus", "", "CPUs in which to allow execution (0-3, 0,1)") + fs.StringVar(&flags.CPUSetMems, "cpuset-mems", "", "memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems.") + fs.StringSliceVar(&flags.DecryptionKeys, "decryption-key", nil, "key needed to decrypt the image") + fs.StringArrayVar(&flags.Devices, "device", defaultContainerConfig.Containers.Devices, "Additional devices to be used within containers (default [])") + fs.StringSliceVar(&flags.DNSSearch, "dns-search", defaultContainerConfig.Containers.DNSSearches, "Set custom DNS search domains") + fs.StringSliceVar(&flags.DNSServers, "dns", defaultContainerConfig.Containers.DNSServers, "Set custom DNS servers or disable it completely by setting it to 'none', which prevents the automatic creation of `/etc/resolv.conf`.") + fs.StringSliceVar(&flags.DNSOptions, "dns-option", defaultContainerConfig.Containers.DNSOptions, "Set custom DNS options") + fs.BoolVar(&flags.HTTPProxy, "http-proxy", true, "pass through HTTP Proxy environment variables") + fs.StringVar(&flags.Isolation, "isolation", DefaultIsolation(), "`type` of process isolation to use. Use BUILDAH_ISOLATION environment variable to override.") + fs.StringVarP(&flags.Memory, "memory", "m", "", "memory limit (format: [], where unit = b, k, m or g)") + fs.StringVar(&flags.MemorySwap, "memory-swap", "", "swap limit equal to memory plus swap: '-1' to enable unlimited swap") + fs.String("arch", runtime.GOARCH, "set the ARCH of the image to the provided value instead of the architecture of the host") + fs.String("os", runtime.GOOS, "prefer `OS` instead of the running OS when pulling images") + fs.StringSlice("platform", []string{parse.DefaultPlatform()}, "set the OS/ARCH/VARIANT of the image to the provided value instead of the current operating system and architecture of the host (for example `linux/arm`)") + fs.String("variant", "", "override the `variant` of the specified image") + fs.StringArrayVar(&flags.SecurityOpt, "security-opt", []string{}, "security options (default [])") + fs.StringVar(&flags.ShmSize, "shm-size", defaultContainerConfig.Containers.ShmSize, "size of '/dev/shm'. The format is ``.") + fs.StringSliceVar(&flags.Ulimit, "ulimit", defaultContainerConfig.Containers.DefaultUlimits, "ulimit options") + fs.StringArrayVarP(&flags.Volumes, "volume", "v", defaultContainerConfig.Containers.Volumes, "bind mount a volume into the container") + + // Add in the usernamespace and namespaceflags + usernsFlags := GetUserNSFlags(usernsResults) + namespaceFlags := GetNameSpaceFlags(namespaceResults) + fs.AddFlagSet(&usernsFlags) + fs.AddFlagSet(&namespaceFlags) + + return fs, nil +} + +// GetFromAndBudFlagsCompletions returns the FlagCompletions for the from and build flags +func GetFromAndBudFlagsCompletions() commonComp.FlagCompletions { + flagCompletion := commonComp.FlagCompletions{} + flagCompletion["arch"] = commonComp.AutocompleteNone + flagCompletion["add-host"] = commonComp.AutocompleteNone + flagCompletion["blob-cache"] = commonComp.AutocompleteNone + flagCompletion["cap-add"] = commonComp.AutocompleteCapabilities + flagCompletion["cap-drop"] = commonComp.AutocompleteCapabilities + flagCompletion["cgroup-parent"] = commonComp.AutocompleteDefault // FIXME: This would be a path right?! + flagCompletion["cpu-period"] = commonComp.AutocompleteNone + flagCompletion["cpu-quota"] = commonComp.AutocompleteNone + flagCompletion["cpu-shares"] = commonComp.AutocompleteNone + flagCompletion["cpuset-cpus"] = commonComp.AutocompleteNone + flagCompletion["cpuset-mems"] = commonComp.AutocompleteNone + flagCompletion["decryption-key"] = commonComp.AutocompleteNone + flagCompletion["device"] = commonComp.AutocompleteDefault + flagCompletion["dns-search"] = commonComp.AutocompleteNone + flagCompletion["dns"] = commonComp.AutocompleteNone + flagCompletion["dns-option"] = commonComp.AutocompleteNone + flagCompletion["isolation"] = commonComp.AutocompleteNone + flagCompletion["memory"] = commonComp.AutocompleteNone + flagCompletion["memory-swap"] = commonComp.AutocompleteNone + flagCompletion["os"] = commonComp.AutocompleteNone + flagCompletion["platform"] = commonComp.AutocompleteNone + flagCompletion["security-opt"] = commonComp.AutocompleteNone + flagCompletion["shm-size"] = commonComp.AutocompleteNone + flagCompletion["ulimit"] = commonComp.AutocompleteNone + flagCompletion["volume"] = commonComp.AutocompleteDefault + flagCompletion["variant"] = commonComp.AutocompleteNone + + // Add in the usernamespace and namespace flag completions + userNsComp := GetUserNSFlagsCompletions() + for name, comp := range userNsComp { + flagCompletion[name] = comp + } + namespaceComp := GetNameSpaceFlagsCompletions() + for name, comp := range namespaceComp { + flagCompletion[name] = comp + } + + return flagCompletion +} + +// UseLayers returns true if BUILDAH_LAYERS is set to "1" or "true" +// otherwise it returns false +func UseLayers() bool { + layers := os.Getenv("BUILDAH_LAYERS") + if strings.ToLower(layers) == "true" || layers == "1" { + return true + } + return false +} + +// DefaultFormat returns the default image format +func DefaultFormat() string { + format := os.Getenv("BUILDAH_FORMAT") + if format != "" { + return format + } + return define.OCI +} + +// DefaultIsolation returns the default image format +func DefaultIsolation() string { + isolation := os.Getenv("BUILDAH_ISOLATION") + if isolation != "" { + return isolation + } + if unshare.IsRootless() { + return "rootless" + } + return define.OCI +} + +// DefaultHistory returns the default add-history setting +func DefaultHistory() bool { + history := os.Getenv("BUILDAH_HISTORY") + if strings.ToLower(history) == "true" || history == "1" { + return true + } + return false +} + +func VerifyFlagsArgsOrder(args []string) error { + for _, arg := range args { + if strings.HasPrefix(arg, "-") { + return errors.Errorf("No options (%s) can be specified after the image or container name", arg) + } + } + return nil +} + +// aliasFlags is a function to handle backwards compatibility with old flags +func AliasFlags(f *pflag.FlagSet, name string) pflag.NormalizedName { + switch name { + case "net": + name = "network" + case "override-arch": + name = "arch" + case "override-os": + name = "os" + case "purge": + name = "rm" + case "tty": + name = "terminal" + } + return pflag.NormalizedName(name) +} diff --git a/vendor/github.com/containers/buildah/pkg/cli/exec_codes.go b/vendor/github.com/containers/buildah/pkg/cli/exec_codes.go new file mode 100644 index 00000000000..7ba42e910d8 --- /dev/null +++ b/vendor/github.com/containers/buildah/pkg/cli/exec_codes.go @@ -0,0 +1,13 @@ +package cli + +const ( + // ExecErrorCodeGeneric is the default error code to return from an exec session if libpod failed + // prior to calling the runtime + ExecErrorCodeGeneric = 125 + // ExecErrorCodeCannotInvoke is the error code to return when the runtime fails to invoke a command + // an example of this can be found by trying to execute a directory: + // `podman exec -l /etc` + ExecErrorCodeCannotInvoke = 126 + // ExecErrorCodeNotFound is the error code to return when a command cannot be found + ExecErrorCodeNotFound = 127 +) diff --git a/vendor/github.com/containers/buildah/pkg/completion/completion.go b/vendor/github.com/containers/buildah/pkg/completion/completion.go new file mode 100644 index 00000000000..a7812d296f5 --- /dev/null +++ b/vendor/github.com/containers/buildah/pkg/completion/completion.go @@ -0,0 +1,23 @@ +package completion + +import ( + "strings" + + "github.com/spf13/cobra" +) + +/* Autocomplete Functions for cobra ValidArgsFunction */ + +// AutocompleteNamespaceFlag - Autocomplete the userns flag. +// -> host, private, container, ns:[path], [path] +func AutocompleteNamespaceFlag(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + var completions []string + // If we don't filter on "toComplete", zsh and fish will not do file completion + // even if the prefix typed by the user does not match the returned completions + for _, comp := range []string{"host", "private", "container", "ns:"} { + if strings.HasPrefix(comp, toComplete) { + completions = append(completions, comp) + } + } + return completions, cobra.ShellCompDirectiveDefault +} diff --git a/vendor/github.com/containers/buildah/pkg/formats/formats.go b/vendor/github.com/containers/buildah/pkg/formats/formats.go new file mode 100644 index 00000000000..2753601cfb2 --- /dev/null +++ b/vendor/github.com/containers/buildah/pkg/formats/formats.go @@ -0,0 +1,167 @@ +package formats + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "os" + "strings" + "text/tabwriter" + "text/template" + + "github.com/ghodss/yaml" + "github.com/pkg/errors" + "golang.org/x/term" +) + +const ( + // JSONString const to save on duplicate variable names + JSONString = "json" + // IDString const to save on duplicates for Go templates + IDString = "{{.ID}}" + + parsingErrorStr = "Template parsing error" +) + +// Writer interface for outputs +type Writer interface { + Out() error +} + +// JSONStructArray for JSON output +type JSONStructArray struct { + Output []interface{} +} + +// StdoutTemplateArray for Go template output +type StdoutTemplateArray struct { + Output []interface{} + Template string + Fields map[string]string +} + +// JSONStruct for JSON output +type JSONStruct struct { + Output interface{} +} + +// StdoutTemplate for Go template output +type StdoutTemplate struct { + Output interface{} + Template string + Fields map[string]string +} + +// YAMLStruct for YAML output +type YAMLStruct struct { + Output interface{} +} + +func setJSONFormatEncoder(isTerminal bool, w io.Writer) *json.Encoder { + enc := json.NewEncoder(w) + enc.SetIndent("", " ") + if isTerminal { + enc.SetEscapeHTML(false) + } + return enc +} + +// Out method for JSON Arrays +func (j JSONStructArray) Out() error { + buf := bytes.NewBuffer(nil) + enc := setJSONFormatEncoder(term.IsTerminal(int(os.Stdout.Fd())), buf) + if err := enc.Encode(j.Output); err != nil { + return err + } + data := buf.Bytes() + + // JSON returns a byte array with a literal null [110 117 108 108] in it + // if it is passed empty data. We used bytes.Compare to see if that is + // the case. + if diff := bytes.Compare(data, []byte("null")); diff == 0 { + data = []byte("[]") + } + + // If the we did get NULL back, we should spit out {} which is + // at least valid JSON for the consumer. + fmt.Printf("%s", data) + humanNewLine() + return nil +} + +// Out method for Go templates +func (t StdoutTemplateArray) Out() error { + w := tabwriter.NewWriter(os.Stdout, 0, 0, 3, ' ', 0) + if strings.HasPrefix(t.Template, "table") { + // replace any spaces with tabs in template so that tabwriter can align it + t.Template = strings.Replace(strings.TrimSpace(t.Template[5:]), " ", "\t", -1) + headerTmpl, err := template.New("header").Funcs(headerFunctions).Parse(t.Template) + if err != nil { + return errors.Wrapf(err, parsingErrorStr) + } + err = headerTmpl.Execute(w, t.Fields) + if err != nil { + return err + } + fmt.Fprintln(w, "") + } + t.Template = strings.Replace(t.Template, " ", "\t", -1) + tmpl, err := template.New("image").Funcs(basicFunctions).Parse(t.Template) + if err != nil { + return errors.Wrapf(err, parsingErrorStr) + } + for _, raw := range t.Output { + basicTmpl := tmpl.Funcs(basicFunctions) + if err := basicTmpl.Execute(w, raw); err != nil { + return errors.Wrapf(err, parsingErrorStr) + } + fmt.Fprintln(w, "") + } + return w.Flush() +} + +// Out method for JSON struct +func (j JSONStruct) Out() error { + data, err := json.MarshalIndent(j.Output, "", " ") + if err != nil { + return err + } + fmt.Printf("%s", data) + humanNewLine() + return nil +} + +//Out method for Go templates +func (t StdoutTemplate) Out() error { + tmpl, err := template.New("image").Parse(t.Template) + if err != nil { + return errors.Wrapf(err, "template parsing error") + } + err = tmpl.Execute(os.Stdout, t.Output) + if err != nil { + return err + } + humanNewLine() + return nil +} + +// Out method for YAML +func (y YAMLStruct) Out() error { + var buf []byte + var err error + buf, err = yaml.Marshal(y.Output) + if err != nil { + return err + } + fmt.Printf("%s", string(buf)) + humanNewLine() + return nil +} + +// humanNewLine prints a new line at the end of the output only if stdout is the terminal +func humanNewLine() { + if term.IsTerminal(int(os.Stdout.Fd())) { + fmt.Println() + } +} diff --git a/vendor/github.com/containers/buildah/pkg/formats/templates.go b/vendor/github.com/containers/buildah/pkg/formats/templates.go new file mode 100644 index 00000000000..c2582552a3f --- /dev/null +++ b/vendor/github.com/containers/buildah/pkg/formats/templates.go @@ -0,0 +1,78 @@ +package formats + +import ( + "bytes" + "encoding/json" + "strings" + "text/template" +) + +// basicFunctions are the set of initial +// functions provided to every template. +var basicFunctions = template.FuncMap{ + "json": func(v interface{}) string { + buf := &bytes.Buffer{} + enc := json.NewEncoder(buf) + enc.SetEscapeHTML(false) + _ = enc.Encode(v) + // Remove the trailing new line added by the encoder + return strings.TrimSpace(buf.String()) + }, + "split": strings.Split, + "join": strings.Join, + "title": strings.Title, + "lower": strings.ToLower, + "upper": strings.ToUpper, + "pad": padWithSpace, + "truncate": truncateWithLength, +} + +// HeaderFunctions are used to created headers of a table. +// This is a replacement of basicFunctions for header generation +// because we want the header to remain intact. +// Some functions like `split` are irrelevant so not added. +var headerFunctions = template.FuncMap{ + "json": func(v string) string { + return v + }, + "title": func(v string) string { + return v + }, + "lower": func(v string) string { + return v + }, + "upper": func(v string) string { + return v + }, + "truncate": func(v string, l int) string { + return v + }, +} + +// Parse creates a new anonymous template with the basic functions +// and parses the given format. +func Parse(format string) (*template.Template, error) { + return NewParse("", format) +} + +// NewParse creates a new tagged template with the basic functions +// and parses the given format. +func NewParse(tag, format string) (*template.Template, error) { + return template.New(tag).Funcs(basicFunctions).Parse(format) +} + +// padWithSpace adds whitespace to the input if the input is non-empty +func padWithSpace(source string, prefix, suffix int) string { + if source == "" { + return source + } + return strings.Repeat(" ", prefix) + source + strings.Repeat(" ", suffix) +} + +// truncateWithLength truncates the source string up to the length provided by the input +func truncateWithLength(source string, length int) string { + if len(source) < length { + return source + } + return source[:length] +} diff --git a/vendor/github.com/containers/buildah/pkg/overlay/overlay.go b/vendor/github.com/containers/buildah/pkg/overlay/overlay.go new file mode 100644 index 00000000000..c325bc5cfb3 --- /dev/null +++ b/vendor/github.com/containers/buildah/pkg/overlay/overlay.go @@ -0,0 +1,311 @@ +package overlay + +import ( + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "strings" + "syscall" + + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/system" + "github.com/containers/storage/pkg/unshare" + "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +// Options type holds various configuration options for overlay +// MountWithOptions accepts following type so it is easier to specify +// more verbose configuration for overlay mount. +type Options struct { + // The Upper directory is normally writable layer in an overlay mount. + // Note!! : Following API does not handles escaping or validates correctness of the values + // passed to UpperDirOptionFragment instead API will try to pass values as is it + // to the `mount` command. It is user's responsibility to make sure they pre-validate + // these values. Invalid inputs may lead to undefined behviour. + // This is provided as-is, use it if it works for you, we can/will change/break that in the future. + // See discussion here for more context: https://github.com/containers/buildah/pull/3715#discussion_r786036959 + // TODO: Should we address above comment and handle escaping of metacharacters like + // `comma`, `backslash` ,`colon` and any other special characters + UpperDirOptionFragment string + // The Workdir is used to prepare files as they are switched between the layers. + // Note!! : Following API does not handles escaping or validates correctness of the values + // passed to WorkDirOptionFragment instead API will try to pass values as is it + // to the `mount` command. It is user's responsibility to make sure they pre-validate + // these values. Invalid inputs may lead to undefined behviour. + // This is provided as-is, use it if it works for you, we can/will change/break that in the future. + // See discussion here for more context: https://github.com/containers/buildah/pull/3715#discussion_r786036959 + // TODO: Should we address above comment and handle escaping of metacharacters like + // `comma`, `backslash` ,`colon` and any other special characters + WorkDirOptionFragment string + // Graph options relayed from podman, will be responsible for choosing mount program + GraphOpts []string + // Mark if following overlay is read only + ReadOnly bool + // RootUID is not used yet but keeping it here for legacy reasons. + RootUID int + // RootGID is not used yet but keeping it here for legacy reasons. + RootGID int +} + +// TempDir generates an overlay Temp directory in the container content +func TempDir(containerDir string, rootUID, rootGID int) (string, error) { + contentDir := filepath.Join(containerDir, "overlay") + if err := idtools.MkdirAllAs(contentDir, 0700, rootUID, rootGID); err != nil { + return "", errors.Wrapf(err, "failed to create the overlay %s directory", contentDir) + } + + contentDir, err := ioutil.TempDir(contentDir, "") + if err != nil { + return "", errors.Wrapf(err, "failed to create the overlay tmpdir in %s directory", contentDir) + } + + return generateOverlayStructure(contentDir, rootUID, rootGID) +} + +// GenerateStructure generates an overlay directory structure for container content +func GenerateStructure(containerDir, containerID, name string, rootUID, rootGID int) (string, error) { + contentDir := filepath.Join(containerDir, "overlay-containers", containerID, name) + if err := idtools.MkdirAllAs(contentDir, 0700, rootUID, rootGID); err != nil { + return "", errors.Wrapf(err, "failed to create the overlay %s directory", contentDir) + } + + return generateOverlayStructure(contentDir, rootUID, rootGID) +} + +// generateOverlayStructure generates upper, work and merge directory structure for overlay directory +func generateOverlayStructure(containerDir string, rootUID, rootGID int) (string, error) { + upperDir := filepath.Join(containerDir, "upper") + workDir := filepath.Join(containerDir, "work") + if err := idtools.MkdirAllAs(upperDir, 0700, rootUID, rootGID); err != nil { + return "", errors.Wrapf(err, "failed to create the overlay %s directory", upperDir) + } + if err := idtools.MkdirAllAs(workDir, 0700, rootUID, rootGID); err != nil { + return "", errors.Wrapf(err, "failed to create the overlay %s directory", workDir) + } + mergeDir := filepath.Join(containerDir, "merge") + if err := idtools.MkdirAllAs(mergeDir, 0700, rootUID, rootGID); err != nil { + return "", errors.Wrapf(err, "failed to create the overlay %s directory", mergeDir) + } + + return containerDir, nil +} + +// Mount creates a subdir of the contentDir based on the source directory +// from the source system. It then mounts up the source directory on to the +// generated mount point and returns the mount point to the caller. +func Mount(contentDir, source, dest string, rootUID, rootGID int, graphOptions []string) (mount specs.Mount, Err error) { + overlayOpts := Options{GraphOpts: graphOptions, ReadOnly: false, RootUID: rootUID, RootGID: rootGID} + return MountWithOptions(contentDir, source, dest, &overlayOpts) +} + +// MountReadOnly creates a subdir of the contentDir based on the source directory +// from the source system. It then mounts up the source directory on to the +// generated mount point and returns the mount point to the caller. Note that no +// upper layer will be created rendering it a read-only mount +func MountReadOnly(contentDir, source, dest string, rootUID, rootGID int, graphOptions []string) (mount specs.Mount, Err error) { + overlayOpts := Options{GraphOpts: graphOptions, ReadOnly: true, RootUID: rootUID, RootGID: rootGID} + return MountWithOptions(contentDir, source, dest, &overlayOpts) +} + +// findMountProgram finds if any mount program is specified in the graph options. +func findMountProgram(graphOptions []string) string { + mountMap := map[string]bool{ + ".mount_program": true, + "overlay.mount_program": true, + "overlay2.mount_program": true, + } + + for _, i := range graphOptions { + s := strings.SplitN(i, "=", 2) + if len(s) != 2 { + continue + } + key := s[0] + val := s[1] + if mountMap[key] { + return val + } + } + + return "" +} + +// mountWithMountProgram mount an overlay at mergeDir using the specified mount program +// and overlay options. +func mountWithMountProgram(mountProgram, overlayOptions, mergeDir string) error { + cmd := exec.Command(mountProgram, "-o", overlayOptions, mergeDir) + + if err := cmd.Run(); err != nil { + return errors.Wrapf(err, "exec %s", mountProgram) + } + return nil +} + +// MountWithOptions creates a subdir of the contentDir based on the source directory +// from the source system. It then mounts up the source directory on to the +// generated mount point and returns the mount point to the caller. +// But allows api to set custom workdir, upperdir and other overlay options +// Following API is being used by podman at the moment +func MountWithOptions(contentDir, source, dest string, opts *Options) (mount specs.Mount, Err error) { + mergeDir := filepath.Join(contentDir, "merge") + + // Create overlay mount options for rw/ro. + var overlayOptions string + if opts.ReadOnly { + // Read-only overlay mounts require two lower layer. + lowerTwo := filepath.Join(contentDir, "lower") + if err := os.Mkdir(lowerTwo, 0755); err != nil { + return mount, err + } + overlayOptions = fmt.Sprintf("lowerdir=%s:%s,private", escapeColon(source), lowerTwo) + } else { + // Read-write overlay mounts want a lower, upper and a work layer. + workDir := filepath.Join(contentDir, "work") + upperDir := filepath.Join(contentDir, "upper") + + if opts.WorkDirOptionFragment != "" && opts.UpperDirOptionFragment != "" { + workDir = opts.WorkDirOptionFragment + upperDir = opts.UpperDirOptionFragment + } + + st, err := os.Stat(source) + if err != nil { + return mount, err + } + if err := os.Chmod(upperDir, st.Mode()); err != nil { + return mount, err + } + if stat, ok := st.Sys().(*syscall.Stat_t); ok { + if err := os.Chown(upperDir, int(stat.Uid), int(stat.Gid)); err != nil { + return mount, err + } + } + overlayOptions = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s,private", escapeColon(source), upperDir, workDir) + } + + mountProgram := findMountProgram(opts.GraphOpts) + if mountProgram != "" { + if err := mountWithMountProgram(mountProgram, overlayOptions, mergeDir); err != nil { + return mount, err + } + + mount.Source = mergeDir + mount.Destination = dest + mount.Type = "bind" + mount.Options = []string{"bind", "slave"} + return mount, nil + } + + if unshare.IsRootless() { + /* If a mount_program is not specified, fallback to try mounting native overlay. */ + overlayOptions = fmt.Sprintf("%s,userxattr", overlayOptions) + } + + mount.Source = mergeDir + mount.Destination = dest + mount.Type = "overlay" + mount.Options = strings.Split(overlayOptions, ",") + + return mount, nil +} + +// Convert ":" to "\:", the path which will be overlay mounted need to be escaped +func escapeColon(source string) string { + return strings.ReplaceAll(source, ":", "\\:") +} + +// RemoveTemp removes temporary mountpoint and all content from its parent +// directory +func RemoveTemp(contentDir string) error { + if err := Unmount(contentDir); err != nil { + return err + } + + return os.RemoveAll(contentDir) +} + +// Unmount the overlay mountpoint +func Unmount(contentDir string) error { + mergeDir := filepath.Join(contentDir, "merge") + + if unshare.IsRootless() { + // Attempt to unmount the FUSE mount using either fusermount or fusermount3. + // If they fail, fallback to unix.Unmount + for _, v := range []string{"fusermount3", "fusermount"} { + err := exec.Command(v, "-u", mergeDir).Run() + if err != nil && errors.Cause(err) != exec.ErrNotFound { + logrus.Debugf("Error unmounting %s with %s - %v", mergeDir, v, err) + } + if err == nil { + return nil + } + } + // If fusermount|fusermount3 failed to unmount the FUSE file system, attempt unmount + } + + // Ignore EINVAL as the specified merge dir is not a mount point + if err := unix.Unmount(mergeDir, 0); err != nil && !os.IsNotExist(err) && err != unix.EINVAL { + return errors.Wrapf(err, "unmount overlay %s", mergeDir) + } + return nil +} + +func recreate(contentDir string) error { + st, err := system.Stat(contentDir) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return errors.Wrap(err, "failed to stat overlay upper directory") + } + + if err := os.RemoveAll(contentDir); err != nil { + return errors.WithStack(err) + } + + if err := idtools.MkdirAllAs(contentDir, os.FileMode(st.Mode()), int(st.UID()), int(st.GID())); err != nil { + return errors.Wrap(err, "failed to create overlay directory") + } + return nil +} + +// CleanupMount removes all temporary mountpoint content +func CleanupMount(contentDir string) (Err error) { + if err := recreate(filepath.Join(contentDir, "upper")); err != nil { + return err + } + if err := recreate(filepath.Join(contentDir, "work")); err != nil { + return err + } + return nil +} + +// CleanupContent removes all temporary mountpoint and all content from +// directory +func CleanupContent(containerDir string) (Err error) { + contentDir := filepath.Join(containerDir, "overlay") + + files, err := ioutil.ReadDir(contentDir) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return errors.Wrap(err, "read directory") + } + for _, f := range files { + dir := filepath.Join(contentDir, f.Name()) + if err := Unmount(dir); err != nil { + return err + } + } + + if err := os.RemoveAll(contentDir); err != nil && !os.IsNotExist(err) { + return errors.Wrap(err, "failed to cleanup overlay directory") + } + return nil +} diff --git a/vendor/github.com/containers/buildah/pkg/parse/parse.go b/vendor/github.com/containers/buildah/pkg/parse/parse.go new file mode 100644 index 00000000000..b57b36a6256 --- /dev/null +++ b/vendor/github.com/containers/buildah/pkg/parse/parse.go @@ -0,0 +1,1076 @@ +package parse + +// this package should contain functions that parse and validate +// user input and is shared either amongst buildah subcommands or +// would be useful to projects vendoring buildah + +import ( + "fmt" + "net" + "os" + "path/filepath" + "strconv" + "strings" + "unicode" + + "github.com/containerd/containerd/platforms" + "github.com/containers/buildah/define" + internalParse "github.com/containers/buildah/internal/parse" + "github.com/containers/buildah/pkg/sshagent" + "github.com/containers/common/pkg/parse" + "github.com/containers/image/v5/types" + "github.com/containers/storage" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/unshare" + units "github.com/docker/go-units" + specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/openshift/imagebuilder" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "github.com/spf13/pflag" + "golang.org/x/term" +) + +const ( + // SeccompDefaultPath defines the default seccomp path + SeccompDefaultPath = "/usr/share/containers/seccomp.json" + // SeccompOverridePath if this exists it overrides the default seccomp path + SeccompOverridePath = "/etc/crio/seccomp.json" + // TypeBind is the type for mounting host dir + TypeBind = "bind" + // TypeTmpfs is the type for mounting tmpfs + TypeTmpfs = "tmpfs" + // TypeCache is the type for mounting a common persistent cache from host + TypeCache = "cache" + // mount=type=cache must create a persistent directory on host so its available for all consecutive builds. + // Lifecycle of following directory will be inherited from how host machine treats temporary directory + BuildahCacheDir = "buildah-cache" +) + +var ( + errDuplicateDest = errors.Errorf("duplicate mount destination") +) + +// CommonBuildOptions parses the build options from the bud cli +func CommonBuildOptions(c *cobra.Command) (*define.CommonBuildOptions, error) { + return CommonBuildOptionsFromFlagSet(c.Flags(), c.Flag) +} + +// CommonBuildOptionsFromFlagSet parses the build options from the bud cli +func CommonBuildOptionsFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name string) *pflag.Flag) (*define.CommonBuildOptions, error) { + var ( + memoryLimit int64 + memorySwap int64 + noDNS bool + err error + ) + + memVal, _ := flags.GetString("memory") + if memVal != "" { + memoryLimit, err = units.RAMInBytes(memVal) + if err != nil { + return nil, errors.Wrapf(err, "invalid value for memory") + } + } + + memSwapValue, _ := flags.GetString("memory-swap") + if memSwapValue != "" { + if memSwapValue == "-1" { + memorySwap = -1 + } else { + memorySwap, err = units.RAMInBytes(memSwapValue) + if err != nil { + return nil, errors.Wrapf(err, "invalid value for memory-swap") + } + } + } + + addHost, _ := flags.GetStringSlice("add-host") + if len(addHost) > 0 { + for _, host := range addHost { + if err := validateExtraHost(host); err != nil { + return nil, errors.Wrapf(err, "invalid value for add-host") + } + } + } + + noDNS = false + dnsServers := []string{} + if flags.Changed("dns") { + dnsServers, _ = flags.GetStringSlice("dns") + for _, server := range dnsServers { + if strings.ToLower(server) == "none" { + noDNS = true + } + } + if noDNS && len(dnsServers) > 1 { + return nil, errors.Errorf("invalid --dns, --dns=none may not be used with any other --dns options") + } + } + + dnsSearch := []string{} + if flags.Changed("dns-search") { + dnsSearch, _ = flags.GetStringSlice("dns-search") + if noDNS && len(dnsSearch) > 0 { + return nil, errors.Errorf("invalid --dns-search, --dns-search may not be used with --dns=none") + } + } + + dnsOptions := []string{} + if flags.Changed("dns-option") { + dnsOptions, _ = flags.GetStringSlice("dns-option") + if noDNS && len(dnsOptions) > 0 { + return nil, errors.Errorf("invalid --dns-option, --dns-option may not be used with --dns=none") + } + } + + if _, err := units.FromHumanSize(findFlagFunc("shm-size").Value.String()); err != nil { + return nil, errors.Wrapf(err, "invalid --shm-size") + } + volumes, _ := flags.GetStringArray("volume") + if err := Volumes(volumes); err != nil { + return nil, err + } + cpuPeriod, _ := flags.GetUint64("cpu-period") + cpuQuota, _ := flags.GetInt64("cpu-quota") + cpuShares, _ := flags.GetUint64("cpu-shares") + httpProxy, _ := flags.GetBool("http-proxy") + + ulimit := []string{} + if flags.Changed("ulimit") { + ulimit, _ = flags.GetStringSlice("ulimit") + } + + secrets, _ := flags.GetStringArray("secret") + sshsources, _ := flags.GetStringArray("ssh") + + commonOpts := &define.CommonBuildOptions{ + AddHost: addHost, + CPUPeriod: cpuPeriod, + CPUQuota: cpuQuota, + CPUSetCPUs: findFlagFunc("cpuset-cpus").Value.String(), + CPUSetMems: findFlagFunc("cpuset-mems").Value.String(), + CPUShares: cpuShares, + CgroupParent: findFlagFunc("cgroup-parent").Value.String(), + DNSOptions: dnsOptions, + DNSSearch: dnsSearch, + DNSServers: dnsServers, + HTTPProxy: httpProxy, + Memory: memoryLimit, + MemorySwap: memorySwap, + ShmSize: findFlagFunc("shm-size").Value.String(), + Ulimit: ulimit, + Volumes: volumes, + Secrets: secrets, + SSHSources: sshsources, + } + securityOpts, _ := flags.GetStringArray("security-opt") + if err := parseSecurityOpts(securityOpts, commonOpts); err != nil { + return nil, err + } + return commonOpts, nil +} + +func parseSecurityOpts(securityOpts []string, commonOpts *define.CommonBuildOptions) error { + for _, opt := range securityOpts { + if opt == "no-new-privileges" { + return errors.Errorf("no-new-privileges is not supported") + } + con := strings.SplitN(opt, "=", 2) + if len(con) != 2 { + return errors.Errorf("Invalid --security-opt name=value pair: %q", opt) + } + + switch con[0] { + case "label": + commonOpts.LabelOpts = append(commonOpts.LabelOpts, con[1]) + case "apparmor": + commonOpts.ApparmorProfile = con[1] + case "seccomp": + commonOpts.SeccompProfilePath = con[1] + default: + return errors.Errorf("Invalid --security-opt 2: %q", opt) + } + + } + + if commonOpts.SeccompProfilePath == "" { + if _, err := os.Stat(SeccompOverridePath); err == nil { + commonOpts.SeccompProfilePath = SeccompOverridePath + } else { + if !os.IsNotExist(err) { + return errors.WithStack(err) + } + if _, err := os.Stat(SeccompDefaultPath); err != nil { + if !os.IsNotExist(err) { + return errors.WithStack(err) + } + } else { + commonOpts.SeccompProfilePath = SeccompDefaultPath + } + } + } + return nil +} + +// Split string into slice by colon. Backslash-escaped colon (i.e. "\:") will not be regarded as separator +func SplitStringWithColonEscape(str string) []string { + result := make([]string, 0, 3) + sb := &strings.Builder{} + for idx, r := range str { + if r == ':' { + // the colon is backslash-escaped + if idx-1 > 0 && str[idx-1] == '\\' { + sb.WriteRune(r) + } else { + // os.Stat will fail if path contains escaped colon + result = append(result, revertEscapedColon(sb.String())) + sb.Reset() + } + } else { + sb.WriteRune(r) + } + } + if sb.Len() > 0 { + result = append(result, revertEscapedColon(sb.String())) + } + return result +} + +// Convert "\:" to ":" +func revertEscapedColon(source string) string { + return strings.ReplaceAll(source, "\\:", ":") +} + +// Volume parses the input of --volume +func Volume(volume string) (specs.Mount, error) { + mount := specs.Mount{} + arr := SplitStringWithColonEscape(volume) + if len(arr) < 2 { + return mount, errors.Errorf("incorrect volume format %q, should be host-dir:ctr-dir[:option]", volume) + } + if err := validateVolumeMountHostDir(arr[0]); err != nil { + return mount, err + } + if err := parse.ValidateVolumeCtrDir(arr[1]); err != nil { + return mount, err + } + mountOptions := "" + if len(arr) > 2 { + mountOptions = arr[2] + if _, err := parse.ValidateVolumeOpts(strings.Split(arr[2], ",")); err != nil { + return mount, err + } + } + mountOpts := strings.Split(mountOptions, ",") + mount.Source = arr[0] + mount.Destination = arr[1] + mount.Type = "rbind" + mount.Options = mountOpts + return mount, nil +} + +// Volumes validates the host and container paths passed in to the --volume flag +func Volumes(volumes []string) error { + if len(volumes) == 0 { + return nil + } + for _, volume := range volumes { + if _, err := Volume(volume); err != nil { + return err + } + } + return nil +} + +func getVolumeMounts(volumes []string) (map[string]specs.Mount, error) { + finalVolumeMounts := make(map[string]specs.Mount) + + for _, volume := range volumes { + volumeMount, err := Volume(volume) + if err != nil { + return nil, err + } + if _, ok := finalVolumeMounts[volumeMount.Destination]; ok { + return nil, errors.Wrapf(errDuplicateDest, volumeMount.Destination) + } + finalVolumeMounts[volumeMount.Destination] = volumeMount + } + return finalVolumeMounts, nil +} + +// GetVolumes gets the volumes from --volume and --mount +func GetVolumes(ctx *types.SystemContext, store storage.Store, volumes []string, mounts []string, contextDir string) ([]specs.Mount, []string, error) { + unifiedMounts, mountedImages, err := getMounts(ctx, store, mounts, contextDir) + if err != nil { + return nil, mountedImages, err + } + volumeMounts, err := getVolumeMounts(volumes) + if err != nil { + return nil, mountedImages, err + } + for dest, mount := range volumeMounts { + if _, ok := unifiedMounts[dest]; ok { + return nil, mountedImages, errors.Wrapf(errDuplicateDest, dest) + } + unifiedMounts[dest] = mount + } + + finalMounts := make([]specs.Mount, 0, len(unifiedMounts)) + for _, mount := range unifiedMounts { + finalMounts = append(finalMounts, mount) + } + return finalMounts, mountedImages, nil +} + +// getMounts takes user-provided input from the --mount flag and creates OCI +// spec mounts. +// buildah run --mount type=bind,src=/etc/resolv.conf,target=/etc/resolv.conf ... +// buildah run --mount type=tmpfs,target=/dev/shm ... +func getMounts(ctx *types.SystemContext, store storage.Store, mounts []string, contextDir string) (map[string]specs.Mount, []string, error) { + finalMounts := make(map[string]specs.Mount) + mountedImages := make([]string, 0) + + errInvalidSyntax := errors.Errorf("incorrect mount format: should be --mount type=,[src=,]target=[,options]") + + // TODO(vrothberg): the manual parsing can be replaced with a regular expression + // to allow a more robust parsing of the mount format and to give + // precise errors regarding supported format versus supported options. + for _, mount := range mounts { + arr := strings.SplitN(mount, ",", 2) + if len(arr) < 2 { + return nil, mountedImages, errors.Wrapf(errInvalidSyntax, "%q", mount) + } + kv := strings.Split(arr[0], "=") + // TODO: type is not explicitly required in Docker. + // If not specified, it defaults to "volume". + if len(kv) != 2 || kv[0] != "type" { + return nil, mountedImages, errors.Wrapf(errInvalidSyntax, "%q", mount) + } + + tokens := strings.Split(arr[1], ",") + switch kv[1] { + case TypeBind: + mount, image, err := internalParse.GetBindMount(ctx, tokens, contextDir, store, "", nil) + if err != nil { + return nil, mountedImages, err + } + if _, ok := finalMounts[mount.Destination]; ok { + return nil, mountedImages, errors.Wrapf(errDuplicateDest, mount.Destination) + } + finalMounts[mount.Destination] = mount + mountedImages = append(mountedImages, image) + case TypeCache: + mount, err := internalParse.GetCacheMount(tokens, store, "", nil) + if err != nil { + return nil, mountedImages, err + } + if _, ok := finalMounts[mount.Destination]; ok { + return nil, mountedImages, errors.Wrapf(errDuplicateDest, mount.Destination) + } + finalMounts[mount.Destination] = mount + case TypeTmpfs: + mount, err := internalParse.GetTmpfsMount(tokens) + if err != nil { + return nil, mountedImages, err + } + if _, ok := finalMounts[mount.Destination]; ok { + return nil, mountedImages, errors.Wrapf(errDuplicateDest, mount.Destination) + } + finalMounts[mount.Destination] = mount + default: + return nil, mountedImages, errors.Errorf("invalid filesystem type %q", kv[1]) + } + } + + return finalMounts, mountedImages, nil +} + +// ValidateVolumeHostDir validates a volume mount's source directory +func ValidateVolumeHostDir(hostDir string) error { + return parse.ValidateVolumeHostDir(hostDir) +} + +// validates the host path of buildah --volume +func validateVolumeMountHostDir(hostDir string) error { + if !filepath.IsAbs(hostDir) { + return errors.Errorf("invalid host path, must be an absolute path %q", hostDir) + } + if _, err := os.Stat(hostDir); err != nil { + return errors.WithStack(err) + } + return nil +} + +// ValidateVolumeCtrDir validates a volume mount's destination directory. +func ValidateVolumeCtrDir(ctrDir string) error { + return parse.ValidateVolumeCtrDir(ctrDir) +} + +// ValidateVolumeOpts validates a volume's options +func ValidateVolumeOpts(options []string) ([]string, error) { + return parse.ValidateVolumeOpts(options) +} + +// validateExtraHost validates that the specified string is a valid extrahost and returns it. +// ExtraHost is in the form of name:ip where the ip has to be a valid ip (ipv4 or ipv6). +// for add-host flag +func validateExtraHost(val string) error { + // allow for IPv6 addresses in extra hosts by only splitting on first ":" + arr := strings.SplitN(val, ":", 2) + if len(arr) != 2 || len(arr[0]) == 0 { + return errors.Errorf("bad format for add-host: %q", val) + } + if _, err := validateIPAddress(arr[1]); err != nil { + return errors.Errorf("invalid IP address in add-host: %q", arr[1]) + } + return nil +} + +// validateIPAddress validates an Ip address. +// for dns, ip, and ip6 flags also +func validateIPAddress(val string) (string, error) { + var ip = net.ParseIP(strings.TrimSpace(val)) + if ip != nil { + return ip.String(), nil + } + return "", errors.Errorf("%s is not an ip address", val) +} + +// SystemContextFromOptions returns a SystemContext populated with values +// per the input parameters provided by the caller for the use in authentication. +func SystemContextFromOptions(c *cobra.Command) (*types.SystemContext, error) { + return SystemContextFromFlagSet(c.Flags(), c.Flag) +} + +// SystemContextFromFlagSet returns a SystemContext populated with values +// per the input parameters provided by the caller for the use in authentication. +func SystemContextFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name string) *pflag.Flag) (*types.SystemContext, error) { + certDir, err := flags.GetString("cert-dir") + if err != nil { + certDir = "" + } + ctx := &types.SystemContext{ + DockerCertPath: certDir, + } + tlsVerify, err := flags.GetBool("tls-verify") + if err == nil && findFlagFunc("tls-verify").Changed { + ctx.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!tlsVerify) + ctx.OCIInsecureSkipTLSVerify = !tlsVerify + ctx.DockerDaemonInsecureSkipTLSVerify = !tlsVerify + } + disableCompression, err := flags.GetBool("disable-compression") + if err == nil { + if disableCompression { + ctx.OCIAcceptUncompressedLayers = true + } else { + ctx.DirForceCompress = true + } + } + creds, err := flags.GetString("creds") + if err == nil && findFlagFunc("creds").Changed { + var err error + ctx.DockerAuthConfig, err = AuthConfig(creds) + if err != nil { + return nil, err + } + } + sigPolicy, err := flags.GetString("signature-policy") + if err == nil && findFlagFunc("signature-policy").Changed { + ctx.SignaturePolicyPath = sigPolicy + } + authfile, err := flags.GetString("authfile") + if err == nil { + ctx.AuthFilePath = getAuthFile(authfile) + } + regConf, err := flags.GetString("registries-conf") + if err == nil && findFlagFunc("registries-conf").Changed { + ctx.SystemRegistriesConfPath = regConf + } + regConfDir, err := flags.GetString("registries-conf-dir") + if err == nil && findFlagFunc("registries-conf-dir").Changed { + ctx.RegistriesDirPath = regConfDir + } + shortNameAliasConf, err := flags.GetString("short-name-alias-conf") + if err == nil && findFlagFunc("short-name-alias-conf").Changed { + ctx.UserShortNameAliasConfPath = shortNameAliasConf + } + ctx.DockerRegistryUserAgent = fmt.Sprintf("Buildah/%s", define.Version) + if findFlagFunc("os") != nil && findFlagFunc("os").Changed { + var os string + if os, err = flags.GetString("os"); err != nil { + return nil, err + } + ctx.OSChoice = os + } + if findFlagFunc("arch") != nil && findFlagFunc("arch").Changed { + var arch string + if arch, err = flags.GetString("arch"); err != nil { + return nil, err + } + ctx.ArchitectureChoice = arch + } + if findFlagFunc("variant") != nil && findFlagFunc("variant").Changed { + var variant string + if variant, err = flags.GetString("variant"); err != nil { + return nil, err + } + ctx.VariantChoice = variant + } + if findFlagFunc("platform") != nil && findFlagFunc("platform").Changed { + var specs []string + if specs, err = flags.GetStringSlice("platform"); err != nil { + return nil, err + } + if len(specs) == 0 || specs[0] == "" { + return nil, errors.Errorf("unable to parse --platform value %v", specs) + } + platform := specs[0] + os, arch, variant, err := Platform(platform) + if err != nil { + return nil, err + } + if ctx.OSChoice != "" || ctx.ArchitectureChoice != "" || ctx.VariantChoice != "" { + return nil, errors.Errorf("invalid --platform may not be used with --os, --arch, or --variant") + } + ctx.OSChoice = os + ctx.ArchitectureChoice = arch + ctx.VariantChoice = variant + } + + ctx.BigFilesTemporaryDir = GetTempDir() + return ctx, nil +} + +func getAuthFile(authfile string) string { + if authfile != "" { + return authfile + } + return os.Getenv("REGISTRY_AUTH_FILE") +} + +// PlatformFromOptions parses the operating system (os) and architecture (arch) +// from the provided command line options. Deprecated in favor of +// PlatformsFromOptions(), but kept here because it's part of our API. +func PlatformFromOptions(c *cobra.Command) (os, arch string, err error) { + platforms, err := PlatformsFromOptions(c) + if err != nil { + return "", "", err + } + if len(platforms) < 1 { + return "", "", errors.Errorf("invalid platform syntax for --platform (use OS/ARCH[/VARIANT])") + } + return platforms[0].OS, platforms[0].Arch, nil +} + +// PlatformsFromOptions parses the operating system (os) and architecture +// (arch) from the provided command line options. If --platform used, it +// also returns the list of platforms that were passed in as its argument. +func PlatformsFromOptions(c *cobra.Command) (platforms []struct{ OS, Arch, Variant string }, err error) { + var os, arch, variant string + if c.Flag("os").Changed { + if os, err = c.Flags().GetString("os"); err != nil { + return nil, err + } + } + if c.Flag("arch").Changed { + if arch, err = c.Flags().GetString("arch"); err != nil { + return nil, err + } + } + if c.Flag("variant").Changed { + if variant, err = c.Flags().GetString("variant"); err != nil { + return nil, err + } + } + platforms = []struct{ OS, Arch, Variant string }{{os, arch, variant}} + if c.Flag("platform").Changed { + platforms = nil + platformSpecs, err := c.Flags().GetStringSlice("platform") + if err != nil { + return nil, errors.Wrap(err, "unable to parse platform") + } + if os != "" || arch != "" || variant != "" { + return nil, errors.Errorf("invalid --platform may not be used with --os, --arch, or --variant") + } + for _, pf := range platformSpecs { + if os, arch, variant, err = Platform(pf); err != nil { + return nil, errors.Wrapf(err, "unable to parse platform %q", pf) + } + platforms = append(platforms, struct{ OS, Arch, Variant string }{os, arch, variant}) + } + } + return platforms, nil +} + +const platformSep = "/" + +// DefaultPlatform returns the standard platform for the current system +func DefaultPlatform() string { + return platforms.DefaultString() +} + +// Platform separates the platform string into os, arch and variant, +// accepting any of $arch, $os/$arch, or $os/$arch/$variant. +func Platform(platform string) (os, arch, variant string, err error) { + split := strings.Split(platform, platformSep) + switch len(split) { + case 3: + variant = split[2] + fallthrough + case 2: + arch = split[1] + os = split[0] + return + case 1: + if platform == "local" { + return Platform(DefaultPlatform()) + } + } + return "", "", "", errors.Errorf("invalid platform syntax for %q (use OS/ARCH[/VARIANT][,...])", platform) +} + +func parseCreds(creds string) (string, string) { + if creds == "" { + return "", "" + } + up := strings.SplitN(creds, ":", 2) + if len(up) == 1 { + return up[0], "" + } + if up[0] == "" { + return "", up[1] + } + return up[0], up[1] +} + +// AuthConfig parses the creds in format [username[:password] into an auth +// config. +func AuthConfig(creds string) (*types.DockerAuthConfig, error) { + username, password := parseCreds(creds) + if username == "" { + fmt.Print("Username: ") + fmt.Scanln(&username) + } + if password == "" { + fmt.Print("Password: ") + termPassword, err := term.ReadPassword(0) + if err != nil { + return nil, errors.Wrapf(err, "could not read password from terminal") + } + password = string(termPassword) + } + + return &types.DockerAuthConfig{ + Username: username, + Password: password, + }, nil +} + +// IDMappingOptions parses the build options related to user namespaces and ID mapping. +func IDMappingOptions(c *cobra.Command, isolation define.Isolation) (usernsOptions define.NamespaceOptions, idmapOptions *define.IDMappingOptions, err error) { + return IDMappingOptionsFromFlagSet(c.Flags(), c.PersistentFlags(), c.Flag) +} + +// IDMappingOptionsFromFlagSet parses the build options related to user namespaces and ID mapping. +func IDMappingOptionsFromFlagSet(flags *pflag.FlagSet, persistentFlags *pflag.FlagSet, findFlagFunc func(name string) *pflag.Flag) (usernsOptions define.NamespaceOptions, idmapOptions *define.IDMappingOptions, err error) { + user := findFlagFunc("userns-uid-map-user").Value.String() + group := findFlagFunc("userns-gid-map-group").Value.String() + // If only the user or group was specified, use the same value for the + // other, since we need both in order to initialize the maps using the + // names. + if user == "" && group != "" { + user = group + } + if group == "" && user != "" { + group = user + } + // Either start with empty maps or the name-based maps. + mappings := idtools.NewIDMappingsFromMaps(nil, nil) + if user != "" && group != "" { + submappings, err := idtools.NewIDMappings(user, group) + if err != nil { + return nil, nil, err + } + mappings = submappings + } + globalOptions := persistentFlags + // We'll parse the UID and GID mapping options the same way. + buildIDMap := func(basemap []idtools.IDMap, option string) ([]specs.LinuxIDMapping, error) { + outmap := make([]specs.LinuxIDMapping, 0, len(basemap)) + // Start with the name-based map entries. + for _, m := range basemap { + outmap = append(outmap, specs.LinuxIDMapping{ + ContainerID: uint32(m.ContainerID), + HostID: uint32(m.HostID), + Size: uint32(m.Size), + }) + } + // Parse the flag's value as one or more triples (if it's even + // been set), and append them. + var spec []string + if globalOptions.Lookup(option) != nil && globalOptions.Lookup(option).Changed { + spec, _ = globalOptions.GetStringSlice(option) + } + if findFlagFunc(option).Changed { + spec, _ = flags.GetStringSlice(option) + } + idmap, err := parseIDMap(spec) + if err != nil { + return nil, err + } + for _, m := range idmap { + outmap = append(outmap, specs.LinuxIDMapping{ + ContainerID: m[0], + HostID: m[1], + Size: m[2], + }) + } + return outmap, nil + } + uidmap, err := buildIDMap(mappings.UIDs(), "userns-uid-map") + if err != nil { + return nil, nil, err + } + gidmap, err := buildIDMap(mappings.GIDs(), "userns-gid-map") + if err != nil { + return nil, nil, err + } + // If we only have one map or the other populated at this point, then + // use the same mapping for both, since we know that no user or group + // name was specified, but a specific mapping was for one or the other. + if len(uidmap) == 0 && len(gidmap) != 0 { + uidmap = gidmap + } + if len(gidmap) == 0 && len(uidmap) != 0 { + gidmap = uidmap + } + + // By default, having mappings configured means we use a user + // namespace. Otherwise, we don't. + usernsOption := define.NamespaceOption{ + Name: string(specs.UserNamespace), + Host: len(uidmap) == 0 && len(gidmap) == 0, + } + // If the user specifically requested that we either use or don't use + // user namespaces, override that default. + if findFlagFunc("userns").Changed { + how := findFlagFunc("userns").Value.String() + switch how { + case "", "container", "private": + usernsOption.Host = false + case "host": + usernsOption.Host = true + default: + how = strings.TrimPrefix(how, "ns:") + if _, err := os.Stat(how); err != nil { + return nil, nil, errors.Wrapf(err, "checking %s namespace", string(specs.UserNamespace)) + } + logrus.Debugf("setting %q namespace to %q", string(specs.UserNamespace), how) + usernsOption.Path = how + } + } + usernsOptions = define.NamespaceOptions{usernsOption} + + // If the user requested that we use the host namespace, but also that + // we use mappings, that's not going to work. + if (len(uidmap) != 0 || len(gidmap) != 0) && usernsOption.Host { + return nil, nil, errors.Errorf("can not specify ID mappings while using host's user namespace") + } + return usernsOptions, &define.IDMappingOptions{ + HostUIDMapping: usernsOption.Host, + HostGIDMapping: usernsOption.Host, + UIDMap: uidmap, + GIDMap: gidmap, + }, nil +} + +func parseIDMap(spec []string) (m [][3]uint32, err error) { + for _, s := range spec { + args := strings.FieldsFunc(s, func(r rune) bool { return !unicode.IsDigit(r) }) + if len(args)%3 != 0 { + return nil, errors.Errorf("mapping %q is not in the form containerid:hostid:size[,...]", s) + } + for len(args) >= 3 { + cid, err := strconv.ParseUint(args[0], 10, 32) + if err != nil { + return nil, errors.Wrapf(err, "error parsing container ID %q from mapping %q as a number", args[0], s) + } + hostid, err := strconv.ParseUint(args[1], 10, 32) + if err != nil { + return nil, errors.Wrapf(err, "error parsing host ID %q from mapping %q as a number", args[1], s) + } + size, err := strconv.ParseUint(args[2], 10, 32) + if err != nil { + return nil, errors.Wrapf(err, "error parsing %q from mapping %q as a number", args[2], s) + } + m = append(m, [3]uint32{uint32(cid), uint32(hostid), uint32(size)}) + args = args[3:] + } + } + return m, nil +} + +// NamespaceOptions parses the build options for all namespaces except for user namespace. +func NamespaceOptions(c *cobra.Command) (namespaceOptions define.NamespaceOptions, networkPolicy define.NetworkConfigurationPolicy, err error) { + return NamespaceOptionsFromFlagSet(c.Flags(), c.Flag) +} + +// NamespaceOptionsFromFlagSet parses the build options for all namespaces except for user namespace. +func NamespaceOptionsFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name string) *pflag.Flag) (namespaceOptions define.NamespaceOptions, networkPolicy define.NetworkConfigurationPolicy, err error) { + options := make(define.NamespaceOptions, 0, 7) + policy := define.NetworkDefault + for _, what := range []string{"cgroupns", string(specs.IPCNamespace), "network", string(specs.PIDNamespace), string(specs.UTSNamespace)} { + if flags.Lookup(what) != nil && findFlagFunc(what).Changed { + how := findFlagFunc(what).Value.String() + switch what { + case "cgroupns": + what = string(specs.CgroupNamespace) + } + switch how { + case "", "container", "private": + logrus.Debugf("setting %q namespace to %q", what, "") + policy = define.NetworkEnabled + options.AddOrReplace(define.NamespaceOption{ + Name: what, + }) + case "host": + logrus.Debugf("setting %q namespace to host", what) + policy = define.NetworkEnabled + options.AddOrReplace(define.NamespaceOption{ + Name: what, + Host: true, + }) + default: + if what == string(specs.NetworkNamespace) { + if how == "none" { + options.AddOrReplace(define.NamespaceOption{ + Name: what, + }) + policy = define.NetworkDisabled + logrus.Debugf("setting network to disabled") + break + } + } + how = strings.TrimPrefix(how, "ns:") + // if not a path we assume it is a comma separated network list, see setupNamespaces() in run_linux.go + if filepath.IsAbs(how) || what != string(specs.NetworkNamespace) { + if _, err := os.Stat(how); err != nil { + return nil, define.NetworkDefault, errors.Wrapf(err, "checking %s namespace", what) + } + } + policy = define.NetworkEnabled + logrus.Debugf("setting %q namespace to %q", what, how) + options.AddOrReplace(define.NamespaceOption{ + Name: what, + Path: how, + }) + } + } + } + return options, policy, nil +} + +func defaultIsolation() (define.Isolation, error) { + isolation, isSet := os.LookupEnv("BUILDAH_ISOLATION") + if isSet { + switch strings.ToLower(isolation) { + case "oci": + return define.IsolationOCI, nil + case "rootless": + return define.IsolationOCIRootless, nil + case "chroot": + return define.IsolationChroot, nil + default: + return 0, errors.Errorf("unrecognized $BUILDAH_ISOLATION value %q", isolation) + } + } + if unshare.IsRootless() { + return define.IsolationOCIRootless, nil + } + return define.IsolationDefault, nil +} + +// IsolationOption parses the --isolation flag. +func IsolationOption(isolation string) (define.Isolation, error) { + if isolation != "" { + switch strings.ToLower(isolation) { + case "oci", "default": + return define.IsolationOCI, nil + case "rootless": + return define.IsolationOCIRootless, nil + case "chroot": + return define.IsolationChroot, nil + default: + return 0, errors.Errorf("unrecognized isolation type %q", isolation) + } + } + return defaultIsolation() +} + +// Device parses device mapping string to a src, dest & permissions string +// Valid values for device look like: +// '/dev/sdc" +// '/dev/sdc:/dev/xvdc" +// '/dev/sdc:/dev/xvdc:rwm" +// '/dev/sdc:rm" +func Device(device string) (string, string, string, error) { + src := "" + dst := "" + permissions := "rwm" + arr := strings.Split(device, ":") + switch len(arr) { + case 3: + if !isValidDeviceMode(arr[2]) { + return "", "", "", errors.Errorf("invalid device mode: %s", arr[2]) + } + permissions = arr[2] + fallthrough + case 2: + if isValidDeviceMode(arr[1]) { + permissions = arr[1] + } else { + if len(arr[1]) == 0 || arr[1][0] != '/' { + return "", "", "", errors.Errorf("invalid device mode: %s", arr[1]) + } + dst = arr[1] + } + fallthrough + case 1: + if len(arr[0]) > 0 { + src = arr[0] + break + } + fallthrough + default: + return "", "", "", errors.Errorf("invalid device specification: %s", device) + } + + if dst == "" { + dst = src + } + return src, dst, permissions, nil +} + +// isValidDeviceMode checks if the mode for device is valid or not. +// isValid mode is a composition of r (read), w (write), and m (mknod). +func isValidDeviceMode(mode string) bool { + var legalDeviceMode = map[rune]bool{ + 'r': true, + 'w': true, + 'm': true, + } + if mode == "" { + return false + } + for _, c := range mode { + if !legalDeviceMode[c] { + return false + } + legalDeviceMode[c] = false + } + return true +} + +func GetTempDir() string { + if tmpdir, ok := os.LookupEnv("TMPDIR"); ok { + return tmpdir + } + return "/var/tmp" +} + +// Secrets parses the --secret flag +func Secrets(secrets []string) (map[string]define.Secret, error) { + invalidSyntax := errors.Errorf("incorrect secret flag format: should be --secret id=foo,src=bar[,env=ENV,type=file|env]") + parsed := make(map[string]define.Secret) + for _, secret := range secrets { + tokens := strings.Split(secret, ",") + var id, src, typ string + for _, val := range tokens { + kv := strings.SplitN(val, "=", 2) + switch kv[0] { + case "id": + id = kv[1] + case "src": + src = kv[1] + case "env": + src = kv[1] + typ = "env" + case "type": + if kv[1] != "file" && kv[1] != "env" { + return nil, errors.New("invalid secret type, must be file or env") + } + typ = kv[1] + } + } + if id == "" { + return nil, invalidSyntax + } + if src == "" { + src = id + } + if typ == "" { + if _, ok := os.LookupEnv(id); ok { + typ = "env" + } else { + typ = "file" + } + } + + if typ == "file" { + fullPath, err := filepath.Abs(src) + if err != nil { + return nil, errors.Wrap(err, "could not parse secrets") + } + _, err = os.Stat(fullPath) + if err != nil { + return nil, errors.Wrap(err, "could not parse secrets") + } + src = fullPath + } + newSecret := define.Secret{ + Source: src, + SourceType: typ, + } + parsed[id] = newSecret + + } + return parsed, nil +} + +// SSH parses the --ssh flag +func SSH(sshSources []string) (map[string]*sshagent.Source, error) { + parsed := make(map[string]*sshagent.Source) + var paths []string + for _, v := range sshSources { + parts := strings.SplitN(v, "=", 2) + if len(parts) > 1 { + paths = strings.Split(parts[1], ",") + } + + source, err := sshagent.NewSource(paths) + if err != nil { + return nil, err + } + parsed[parts[0]] = source + } + return parsed, nil +} + +func ContainerIgnoreFile(contextDir, path string) ([]string, string, error) { + if path != "" { + excludes, err := imagebuilder.ParseIgnore(path) + return excludes, path, err + } + path = filepath.Join(contextDir, ".containerignore") + excludes, err := imagebuilder.ParseIgnore(path) + if os.IsNotExist(err) { + path = filepath.Join(contextDir, ".dockerignore") + excludes, err = imagebuilder.ParseIgnore(path) + } + if os.IsNotExist(err) { + return excludes, "", nil + } + return excludes, path, err +} diff --git a/vendor/github.com/containers/buildah/pkg/parse/parse_unix.go b/vendor/github.com/containers/buildah/pkg/parse/parse_unix.go new file mode 100644 index 00000000000..8b11df33cf4 --- /dev/null +++ b/vendor/github.com/containers/buildah/pkg/parse/parse_unix.go @@ -0,0 +1,50 @@ +// +build linux darwin + +package parse + +import ( + "os" + "path/filepath" + + "github.com/containers/buildah/define" + "github.com/containers/storage/pkg/unshare" + "github.com/opencontainers/runc/libcontainer/devices" + "github.com/pkg/errors" +) + +func DeviceFromPath(device string) (define.ContainerDevices, error) { + var devs define.ContainerDevices + src, dst, permissions, err := Device(device) + if err != nil { + return nil, err + } + if unshare.IsRootless() && src != dst { + return nil, errors.Errorf("Renaming device %s to %s is not supported in rootless containers", src, dst) + } + srcInfo, err := os.Stat(src) + if err != nil { + return nil, errors.Wrapf(err, "error getting info of source device %s", src) + } + + if !srcInfo.IsDir() { + dev, err := devices.DeviceFromPath(src, permissions) + if err != nil { + return nil, errors.Wrapf(err, "%s is not a valid device", src) + } + dev.Path = dst + devs = append(devs, *dev) + return devs, nil + } + + // If source device is a directory + srcDevices, err := devices.GetDevices(src) + if err != nil { + return nil, errors.Wrapf(err, "error getting source devices from directory %s", src) + } + for _, d := range srcDevices { + d.Path = filepath.Join(dst, filepath.Base(d.Path)) + d.Permissions = devices.Permissions(permissions) + devs = append(devs, *d) + } + return devs, nil +} diff --git a/vendor/github.com/containers/buildah/pkg/parse/parse_unsupported.go b/vendor/github.com/containers/buildah/pkg/parse/parse_unsupported.go new file mode 100644 index 00000000000..c48b24884d3 --- /dev/null +++ b/vendor/github.com/containers/buildah/pkg/parse/parse_unsupported.go @@ -0,0 +1,16 @@ +// +build !linux,!darwin + +package parse + +import ( + "github.com/containers/buildah/define" + "github.com/pkg/errors" +) + +func getDefaultProcessLimits() []string { + return []string{} +} + +func DeviceFromPath(device string) (define.ContainerDevices, error) { + return nil, errors.Errorf("devices not supported") +} diff --git a/vendor/github.com/containers/buildah/pkg/rusage/rusage.go b/vendor/github.com/containers/buildah/pkg/rusage/rusage.go new file mode 100644 index 00000000000..7b1226d249d --- /dev/null +++ b/vendor/github.com/containers/buildah/pkg/rusage/rusage.go @@ -0,0 +1,48 @@ +package rusage + +import ( + "fmt" + "time" + + units "github.com/docker/go-units" +) + +// Rusage is a subset of a Unix-style resource usage counter for the current +// process and its children. The counters are always 0 on platforms where the +// system call is not available (i.e., systems where getrusage() doesn't +// exist). +type Rusage struct { + Date time.Time + Elapsed time.Duration + Utime, Stime time.Duration + Inblock, Outblock int64 +} + +// FormatDiff formats the result of rusage.Rusage.Subtract() for logging. +func FormatDiff(diff Rusage) string { + return fmt.Sprintf("%s(system) %s(user) %s(elapsed) %s input %s output", diff.Stime.Round(time.Millisecond), diff.Utime.Round(time.Millisecond), diff.Elapsed.Round(time.Millisecond), units.HumanSize(float64(diff.Inblock*512)), units.HumanSize(float64(diff.Outblock*512))) +} + +// Subtract subtracts the items in delta from r, and returns the difference. +// The Date field is zeroed for easier comparison with the zero value for the +// Rusage type. +func (r Rusage) Subtract(baseline Rusage) Rusage { + return Rusage{ + Elapsed: r.Date.Sub(baseline.Date), + Utime: r.Utime - baseline.Utime, + Stime: r.Stime - baseline.Stime, + Inblock: r.Inblock - baseline.Inblock, + Outblock: r.Outblock - baseline.Outblock, + } +} + +// Get returns the counters for the current process and its children, +// subtracting any values in the passed in "since" value, or an error. +// The Elapsed field will always be set to zero. +func Get() (Rusage, error) { + counters, err := get() + if err != nil { + return Rusage{}, err + } + return counters, nil +} diff --git a/vendor/github.com/containers/buildah/pkg/rusage/rusage_unix.go b/vendor/github.com/containers/buildah/pkg/rusage/rusage_unix.go new file mode 100644 index 00000000000..5bfed45c13a --- /dev/null +++ b/vendor/github.com/containers/buildah/pkg/rusage/rusage_unix.go @@ -0,0 +1,35 @@ +// +build !windows + +package rusage + +import ( + "syscall" + "time" + + "github.com/pkg/errors" +) + +func mkduration(tv syscall.Timeval) time.Duration { + return time.Duration(tv.Sec)*time.Second + time.Duration(tv.Usec)*time.Microsecond +} + +func get() (Rusage, error) { + var rusage syscall.Rusage + err := syscall.Getrusage(syscall.RUSAGE_CHILDREN, &rusage) + if err != nil { + return Rusage{}, errors.Wrapf(err, "error getting resource usage") + } + r := Rusage{ + Date: time.Now(), + Utime: mkduration(rusage.Utime), + Stime: mkduration(rusage.Stime), + Inblock: int64(rusage.Inblock), // nolint: unconvert + Outblock: int64(rusage.Oublock), // nolint: unconvert + } + return r, nil +} + +// Supported returns true if resource usage counters are supported on this OS. +func Supported() bool { + return true +} diff --git a/vendor/github.com/containers/buildah/pkg/rusage/rusage_unsupported.go b/vendor/github.com/containers/buildah/pkg/rusage/rusage_unsupported.go new file mode 100644 index 00000000000..031c8140247 --- /dev/null +++ b/vendor/github.com/containers/buildah/pkg/rusage/rusage_unsupported.go @@ -0,0 +1,18 @@ +// +build windows + +package rusage + +import ( + "syscall" + + "github.com/pkg/errors" +) + +func get() (Rusage, error) { + return Rusage{}, errors.Wrapf(syscall.ENOTSUP, "error getting resource usage") +} + +// Supported returns true if resource usage counters are supported on this OS. +func Supported() bool { + return false +} diff --git a/vendor/github.com/containers/buildah/pkg/sshagent/sshagent.go b/vendor/github.com/containers/buildah/pkg/sshagent/sshagent.go new file mode 100644 index 00000000000..b985cd2b01a --- /dev/null +++ b/vendor/github.com/containers/buildah/pkg/sshagent/sshagent.go @@ -0,0 +1,231 @@ +package sshagent + +import ( + "io" + "io/ioutil" + "net" + "os" + "path/filepath" + "sync" + "time" + + "github.com/opencontainers/selinux/go-selinux" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/crypto/ssh" + "golang.org/x/crypto/ssh/agent" +) + +// AgentServer is an ssh agent that can be served and shutdown at a later time +type AgentServer struct { + agent agent.Agent + wg sync.WaitGroup + conn *net.Conn + listener net.Listener + shutdown chan bool + servePath string + serveDir string +} + +// NewAgentServer creates a new agent on the host +func NewAgentServer(source *Source) (*AgentServer, error) { + if source.Keys != nil { + return newAgentServerKeyring(source.Keys) + } + return newAgentServerSocket(source.Socket) +} + +// newAgentServerKeyring creates a new agent from scratch and adds keys +func newAgentServerKeyring(keys []interface{}) (*AgentServer, error) { + a := agent.NewKeyring() + for _, k := range keys { + if err := a.Add(agent.AddedKey{PrivateKey: k}); err != nil { + return nil, errors.Wrap(err, "failed to create ssh agent") + } + } + return &AgentServer{ + agent: a, + shutdown: make(chan bool, 1), + }, nil +} + +// newAgentServerSocket creates a new agent from an existing agent on the host +func newAgentServerSocket(socketPath string) (*AgentServer, error) { + conn, err := net.Dial("unix", socketPath) + if err != nil { + return nil, err + } + a := &readOnlyAgent{agent.NewClient(conn)} + + return &AgentServer{ + agent: a, + conn: &conn, + shutdown: make(chan bool, 1), + }, nil + +} + +// Serve starts the SSH agent on the host and returns the path of the socket where the agent is serving +func (a *AgentServer) Serve(processLabel string) (string, error) { + err := selinux.SetSocketLabel(processLabel) + if err != nil { + return "", err + } + serveDir, err := ioutil.TempDir("", ".buildah-ssh-sock") + if err != nil { + return "", err + } + servePath := filepath.Join(serveDir, "ssh_auth_sock") + a.serveDir = serveDir + a.servePath = servePath + listener, err := net.Listen("unix", servePath) + if err != nil { + return "", err + } + err = selinux.SetSocketLabel("") + if err != nil { + return "", err + } + a.listener = listener + + go func() { + for { + //listener.Accept blocks + c, err := listener.Accept() + if err != nil { + select { + case <-a.shutdown: + return + default: + logrus.Errorf("error accepting SSH connection: %v", err) + continue + } + } + a.wg.Add(1) + go func() { + // agent.ServeAgent will only ever return with error, + err := agent.ServeAgent(a.agent, c) + if err != io.EOF { + logrus.Errorf("error serving agent: %v", err) + } + a.wg.Done() + }() + // the only way to get agent.ServeAgent is to close the connection it's serving on + // TODO: ideally we should use some sort of forwarding mechanism for output instead of manually closing connection. + go func() { + time.Sleep(2000 * time.Millisecond) + c.Close() + }() + } + }() + return a.servePath, nil +} + +// Shutdown shuts down the agent and closes the socket +func (a *AgentServer) Shutdown() error { + if a.listener != nil { + a.shutdown <- true + a.listener.Close() + } + if a.conn != nil { + conn := *a.conn + conn.Close() + } + a.wg.Wait() + err := os.RemoveAll(a.serveDir) + if err != nil { + return err + } + a.serveDir = "" + a.servePath = "" + return nil +} + +// ServePath returns the path where the agent is serving +func (a *AgentServer) ServePath() string { + return a.servePath +} + +// readOnlyAgent and its functions originally from github.com/mopby/buildkit/session/sshforward/sshprovider/agentprovider.go + +// readOnlyAgent implemetnts the agent.Agent interface +// readOnlyAgent allows reads only to prevent keys from being added from the build to the forwarded ssh agent on the host +type readOnlyAgent struct { + agent.Agent +} + +func (a *readOnlyAgent) Add(_ agent.AddedKey) error { + return errors.New("adding new keys not allowed by buildah") +} + +func (a *readOnlyAgent) Remove(_ ssh.PublicKey) error { + return errors.New("removing keys not allowed by buildah") +} + +func (a *readOnlyAgent) RemoveAll() error { + return errors.New("removing keys not allowed by buildah") +} + +func (a *readOnlyAgent) Lock(_ []byte) error { + return errors.New("locking agent not allowed by buildah") +} + +// Source is what the forwarded agent's source is +// The source of the forwarded agent can be from a socket on the host, or from individual key files +type Source struct { + Socket string + Keys []interface{} +} + +// NewSource takes paths and checks of they are keys or sockets, and creates a source +func NewSource(paths []string) (*Source, error) { + var keys []interface{} + var socket string + if len(paths) == 0 { + socket = os.Getenv("SSH_AUTH_SOCK") + if socket == "" { + return nil, errors.New("$SSH_AUTH_SOCK not set") + } + } + for _, p := range paths { + if socket != "" { + return nil, errors.New("only one socket is allowed") + } + + fi, err := os.Stat(p) + if err != nil { + return nil, err + } + if fi.Mode()&os.ModeSocket > 0 { + if len(keys) == 0 { + socket = p + } else { + return nil, errors.New("cannot mix keys and socket file") + } + continue + } + + f, err := os.Open(p) + if err != nil { + return nil, err + } + dt, err := ioutil.ReadAll(&io.LimitedReader{R: f, N: 100 * 1024}) + if err != nil { + return nil, err + } + + k, err := ssh.ParseRawPrivateKey(dt) + if err != nil { + return nil, errors.Wrapf(err, "cannot parse ssh key") + } + keys = append(keys, k) + } + if socket != "" { + return &Source{ + Socket: socket, + }, nil + } + return &Source{ + Keys: keys, + }, nil +} diff --git a/vendor/github.com/containers/buildah/pkg/util/util.go b/vendor/github.com/containers/buildah/pkg/util/util.go new file mode 100644 index 00000000000..209ad9544a8 --- /dev/null +++ b/vendor/github.com/containers/buildah/pkg/util/util.go @@ -0,0 +1,81 @@ +package util + +import ( + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/pkg/errors" +) + +// Mirrors path to a tmpfile if path points to a +// file descriptor instead of actual file on filesystem +// reason: operations with file descriptors are can lead +// to edge cases where content on FD is not in a consumable +// state after first consumption. +// returns path as string and bool to confirm if temp file +// was created and needs to be cleaned up. +func MirrorToTempFileIfPathIsDescriptor(file string) (string, bool) { + // one use-case is discussed here + // https://github.com/containers/buildah/issues/3070 + if !strings.HasPrefix(file, "/dev/fd") { + return file, false + } + b, err := ioutil.ReadFile(file) + if err != nil { + // if anything goes wrong return original path + return file, false + } + tmpfile, err := ioutil.TempFile(os.TempDir(), "buildah-temp-file") + if err != nil { + return file, false + } + if _, err := tmpfile.Write(b); err != nil { + // if anything goes wrong return original path + return file, false + } + + return tmpfile.Name(), true +} + +// DiscoverContainerfile tries to find a Containerfile or a Dockerfile within the provided `path`. +func DiscoverContainerfile(path string) (foundCtrFile string, err error) { + // Test for existence of the file + target, err := os.Stat(path) + if err != nil { + return "", errors.Wrap(err, "discovering Containerfile") + } + + switch mode := target.Mode(); { + case mode.IsDir(): + // If the path is a real directory, we assume a Containerfile or a Dockerfile within it + ctrfile := filepath.Join(path, "Containerfile") + + // Test for existence of the Containerfile file + file, err := os.Stat(ctrfile) + if err != nil { + // See if we have a Dockerfile within it + ctrfile = filepath.Join(path, "Dockerfile") + + // Test for existence of the Dockerfile file + file, err = os.Stat(ctrfile) + if err != nil { + return "", errors.Wrap(err, "cannot find Containerfile or Dockerfile in context directory") + } + } + + // The file exists, now verify the correct mode + if mode := file.Mode(); mode.IsRegular() { + foundCtrFile = ctrfile + } else { + return "", errors.Errorf("assumed Containerfile %q is not a file", ctrfile) + } + + case mode.IsRegular(): + // If the context dir is a file, we assume this as Containerfile + foundCtrFile = path + } + + return foundCtrFile, nil +} diff --git a/vendor/github.com/containers/buildah/pull.go b/vendor/github.com/containers/buildah/pull.go new file mode 100644 index 00000000000..7149ac98631 --- /dev/null +++ b/vendor/github.com/containers/buildah/pull.go @@ -0,0 +1,96 @@ +package buildah + +import ( + "context" + "io" + "time" + + "github.com/containers/buildah/define" + "github.com/containers/buildah/pkg/blobcache" + "github.com/containers/common/libimage" + "github.com/containers/common/pkg/config" + "github.com/containers/image/v5/types" + encconfig "github.com/containers/ocicrypt/config" + "github.com/containers/storage" + "github.com/pkg/errors" +) + +// PullOptions can be used to alter how an image is copied in from somewhere. +type PullOptions struct { + // SignaturePolicyPath specifies an override location for the signature + // policy which should be used for verifying the new image as it is + // being written. Except in specific circumstances, no value should be + // specified, indicating that the shared, system-wide default policy + // should be used. + SignaturePolicyPath string + // ReportWriter is an io.Writer which will be used to log the writing + // of the new image. + ReportWriter io.Writer + // Store is the local storage store which holds the source image. + Store storage.Store + // github.com/containers/image/types SystemContext to hold credentials + // and other authentication/authorization information. + SystemContext *types.SystemContext + // BlobDirectory is the name of a directory in which we'll attempt to + // store copies of layer blobs that we pull down, if any. It should + // already exist. + BlobDirectory string + // AllTags is a boolean value that determines if all tagged images + // will be downloaded from the repository. The default is false. + AllTags bool + // RemoveSignatures causes any existing signatures for the image to be + // discarded when pulling it. + RemoveSignatures bool + // MaxRetries is the maximum number of attempts we'll make to pull any + // one image from the external registry if the first attempt fails. + MaxRetries int + // RetryDelay is how long to wait before retrying a pull attempt. + RetryDelay time.Duration + // OciDecryptConfig contains the config that can be used to decrypt an image if it is + // encrypted if non-nil. If nil, it does not attempt to decrypt an image. + OciDecryptConfig *encconfig.DecryptConfig + // PullPolicy takes the value PullIfMissing, PullAlways, PullIfNewer, or PullNever. + PullPolicy define.PullPolicy +} + +// Pull copies the contents of the image from somewhere else to local storage. Returns the +// ID of the local image or an error. +func Pull(ctx context.Context, imageName string, options PullOptions) (imageID string, err error) { + libimageOptions := &libimage.PullOptions{} + libimageOptions.SignaturePolicyPath = options.SignaturePolicyPath + libimageOptions.Writer = options.ReportWriter + libimageOptions.RemoveSignatures = options.RemoveSignatures + libimageOptions.OciDecryptConfig = options.OciDecryptConfig + libimageOptions.AllTags = options.AllTags + libimageOptions.RetryDelay = &options.RetryDelay + + if options.MaxRetries > 0 { + retries := uint(options.MaxRetries) + libimageOptions.MaxRetries = &retries + } + + if options.BlobDirectory != "" { + libimageOptions.DestinationLookupReferenceFunc = blobcache.CacheLookupReferenceFunc(options.BlobDirectory, types.PreserveOriginal) + } + + pullPolicy, err := config.ParsePullPolicy(options.PullPolicy.String()) + if err != nil { + return "", err + } + + runtime, err := libimage.RuntimeFromStore(options.Store, &libimage.RuntimeOptions{SystemContext: options.SystemContext}) + if err != nil { + return "", err + } + + pulledImages, err := runtime.Pull(context.Background(), imageName, pullPolicy, libimageOptions) + if err != nil { + return "", err + } + + if len(pulledImages) == 0 { + return "", errors.Errorf("internal error pulling %s: no image pulled and no error", imageName) + } + + return pulledImages[0].ID(), nil +} diff --git a/vendor/github.com/containers/buildah/push.go b/vendor/github.com/containers/buildah/push.go new file mode 100644 index 00000000000..cc915469d99 --- /dev/null +++ b/vendor/github.com/containers/buildah/push.go @@ -0,0 +1,135 @@ +package buildah + +import ( + "context" + "fmt" + "io" + "time" + + "github.com/containers/buildah/pkg/blobcache" + "github.com/containers/common/libimage" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/compression" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" + encconfig "github.com/containers/ocicrypt/config" + "github.com/containers/storage" + "github.com/containers/storage/pkg/archive" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// PushOptions can be used to alter how an image is copied somewhere. +type PushOptions struct { + // Compression specifies the type of compression which is applied to + // layer blobs. The default is to not use compression, but + // archive.Gzip is recommended. + // OBSOLETE: Use CompressionFormat instead. + Compression archive.Compression + // SignaturePolicyPath specifies an override location for the signature + // policy which should be used for verifying the new image as it is + // being written. Except in specific circumstances, no value should be + // specified, indicating that the shared, system-wide default policy + // should be used. + SignaturePolicyPath string + // ReportWriter is an io.Writer which will be used to log the writing + // of the new image. + ReportWriter io.Writer + // Store is the local storage store which holds the source image. + Store storage.Store + // github.com/containers/image/types SystemContext to hold credentials + // and other authentication/authorization information. + SystemContext *types.SystemContext + // ManifestType is the format to use + // possible options are oci, v2s1, and v2s2 + ManifestType string + // BlobDirectory is the name of a directory in which we'll look for + // prebuilt copies of layer blobs that we might otherwise need to + // regenerate from on-disk layers, substituting them in the list of + // blobs to copy whenever possible. + BlobDirectory string + // Quiet is a boolean value that determines if minimal output to + // the user will be displayed, this is best used for logging. + // The default is false. + Quiet bool + // SignBy is the fingerprint of a GPG key to use for signing the image. + SignBy string + // RemoveSignatures causes any existing signatures for the image to be + // discarded for the pushed copy. + RemoveSignatures bool + // MaxRetries is the maximum number of attempts we'll make to push any + // one image to the external registry if the first attempt fails. + MaxRetries int + // RetryDelay is how long to wait before retrying a push attempt. + RetryDelay time.Duration + // OciEncryptConfig when non-nil indicates that an image should be encrypted. + // The encryption options is derived from the construction of EncryptConfig object. + OciEncryptConfig *encconfig.EncryptConfig + // OciEncryptLayers represents the list of layers to encrypt. + // If nil, don't encrypt any layers. + // If non-nil and len==0, denotes encrypt all layers. + // integers in the slice represent 0-indexed layer indices, with support for negative + // indexing. i.e. 0 is the first layer, -1 is the last (top-most) layer. + OciEncryptLayers *[]int + + // CompressionFormat is the format to use for the compression of the blobs + CompressionFormat *compression.Algorithm + // CompressionLevel specifies what compression level is used + CompressionLevel *int +} + +// Push copies the contents of the image to a new location. +func Push(ctx context.Context, image string, dest types.ImageReference, options PushOptions) (reference.Canonical, digest.Digest, error) { + libimageOptions := &libimage.PushOptions{} + libimageOptions.SignaturePolicyPath = options.SignaturePolicyPath + libimageOptions.Writer = options.ReportWriter + libimageOptions.ManifestMIMEType = options.ManifestType + libimageOptions.SignBy = options.SignBy + libimageOptions.RemoveSignatures = options.RemoveSignatures + libimageOptions.RetryDelay = &options.RetryDelay + libimageOptions.OciEncryptConfig = options.OciEncryptConfig + libimageOptions.OciEncryptLayers = options.OciEncryptLayers + libimageOptions.CompressionFormat = options.CompressionFormat + libimageOptions.CompressionLevel = options.CompressionLevel + libimageOptions.PolicyAllowStorage = true + + if options.Quiet { + libimageOptions.Writer = nil + } + + if options.BlobDirectory != "" { + compress := types.PreserveOriginal + if options.Compression == archive.Gzip { + compress = types.Compress + } + libimageOptions.SourceLookupReferenceFunc = blobcache.CacheLookupReferenceFunc(options.BlobDirectory, compress) + } + + runtime, err := libimage.RuntimeFromStore(options.Store, &libimage.RuntimeOptions{SystemContext: options.SystemContext}) + if err != nil { + return nil, "", err + } + + destString := fmt.Sprintf("%s:%s", dest.Transport().Name(), dest.StringWithinTransport()) + manifestBytes, err := runtime.Push(ctx, image, destString, libimageOptions) + if err != nil { + return nil, "", err + } + + manifestDigest, err := manifest.Digest(manifestBytes) + if err != nil { + return nil, "", errors.Wrapf(err, "error computing digest of manifest of new image %q", transports.ImageName(dest)) + } + + var ref reference.Canonical + if name := dest.DockerReference(); name != nil { + ref, err = reference.WithDigest(name, manifestDigest) + if err != nil { + logrus.Warnf("error generating canonical reference with name %q and digest %s: %v", name, manifestDigest.String(), err) + } + } + + return ref, manifestDigest, nil +} diff --git a/vendor/github.com/containers/buildah/release.sh b/vendor/github.com/containers/buildah/release.sh new file mode 100644 index 00000000000..007f238d880 --- /dev/null +++ b/vendor/github.com/containers/buildah/release.sh @@ -0,0 +1,107 @@ +#!/bin/sh +# +# Cut a buildah release. Usage: +# +# $ hack/release.sh +# +# For example: +# +# $ hack/release.sh 1.2.3 1.3.0 +# +# for "I'm cutting 1.2.3, and want to use 1.3.0-dev for future work". + +VERSION="$1" +NEXT_VERSION="$2" +DATE=$(date '+%Y-%m-%d') +LAST_TAG=$(git describe --tags --abbrev=0) + +write_go_version() +{ + LOCAL_VERSION="$1" + sed -i "s/^\(.*Version = \"\).*/\1${LOCAL_VERSION}\"/" define/types.go +} + +write_spec_version() +{ + LOCAL_VERSION="$1" + sed -i "s/^\(Version: *\).*/\1${LOCAL_VERSION}/" contrib/rpm/buildah.spec +} + +write_spec_changelog() +{ + sed '/\*.*-dev-1/d' -i ./contrib/rpm/buildah.spec + VERSION=$1 + date=$(date "+%a %b %d, %Y") + name=$(getent passwd $USERNAME | cut -d ':' -f 5) + echo "* ${date} ${name} <${USER}@redhat.com> ${VERSION}-1" >.changelog.txt + if [[ "${VERSION}" != *-dev ]]; then + git log --no-merges --format='- %s' "${LAST_TAG}..HEAD" >>.changelog.txt + else + echo "" >>.changelog.txt + fi + sed '/^%changelog.*/r .changelog.txt' -i ./contrib/rpm/buildah.spec + rm -f .changelog.txt +} + +write_makefile_epoch() +{ + LOCAL_EPOCH="$1" + sed -i "s/^\(EPOCH_TEST_COMMIT ?= \).*/\1${LOCAL_EPOCH}/" Makefile +} + +write_changelog() +{ + echo "- Changelog for v${VERSION} (${DATE})" >.changelog.txt && + git log --no-merges --format=' * %s' "${LAST_TAG}..HEAD" >>.changelog.txt && + echo >>.changelog.txt && + cat changelog.txt >>.changelog.txt && + mv -f .changelog.txt changelog.txt + + echo " +## v${VERSION} (${DATE}) +" >.CHANGELOG.md && + git log --no-merges --format=' %s' "${LAST_TAG}..HEAD" >>.CHANGELOG.md && + sed -i -e '/# Changelog/r .CHANGELOG.md' CHANGELOG.md && + rm -f .CHANGELOG.md +} + +release_commit() +{ + write_go_version "${VERSION}" && + write_spec_version "${VERSION}" && + write_spec_changelog "${VERSION}" && + write_changelog && + git commit -asm "Bump to v${VERSION} + +[NO TESTS NEEDED] +" +} + +dev_version_commit() +{ + write_go_version "${NEXT_VERSION}-dev" && + write_spec_version "${NEXT_VERSION}-dev" && + write_spec_changelog "${NEXT_VERSION}-dev" && + git commit -asm "Bump to v${NEXT_VERSION}-dev + +[NO TESTS NEEDED] +" +} + +epoch_commit() +{ + LOCAL_EPOCH="$1" + write_makefile_epoch "${LOCAL_EPOCH}" && + git commit -asm 'Bump gitvalidation epoch + + [NO TESTS NEEDED] +' +} + +git fetch origin && +git checkout -b "bump-${VERSION}" origin/main && +EPOCH=$(git rev-parse HEAD) && +release_commit && +git tag -s -m "version ${VERSION}" "v${VERSION}" && +dev_version_commit && +epoch_commit "${EPOCH}" diff --git a/vendor/github.com/containers/buildah/run.go b/vendor/github.com/containers/buildah/run.go new file mode 100644 index 00000000000..ae390727800 --- /dev/null +++ b/vendor/github.com/containers/buildah/run.go @@ -0,0 +1,176 @@ +package buildah + +import ( + "fmt" + "io" + + "github.com/containers/buildah/define" + "github.com/containers/buildah/internal" + "github.com/containers/buildah/pkg/sshagent" + "github.com/containers/image/v5/types" + "github.com/opencontainers/runtime-spec/specs-go" + "github.com/sirupsen/logrus" +) + +const ( + // runUsingRuntimeCommand is a command we use as a key for reexec + runUsingRuntimeCommand = define.Package + "-oci-runtime" +) + +// TerminalPolicy takes the value DefaultTerminal, WithoutTerminal, or WithTerminal. +type TerminalPolicy int + +const ( + // DefaultTerminal indicates that this Run invocation should be + // connected to a pseudoterminal if we're connected to a terminal. + DefaultTerminal TerminalPolicy = iota + // WithoutTerminal indicates that this Run invocation should NOT be + // connected to a pseudoterminal. + WithoutTerminal + // WithTerminal indicates that this Run invocation should be connected + // to a pseudoterminal. + WithTerminal +) + +// String converts a TerminalPolicy into a string. +func (t TerminalPolicy) String() string { + switch t { + case DefaultTerminal: + return "DefaultTerminal" + case WithoutTerminal: + return "WithoutTerminal" + case WithTerminal: + return "WithTerminal" + } + return fmt.Sprintf("unrecognized terminal setting %d", t) +} + +// NamespaceOption controls how we set up a namespace when launching processes. +type NamespaceOption = define.NamespaceOption + +// NamespaceOptions provides some helper methods for a slice of NamespaceOption +// structs. +type NamespaceOptions = define.NamespaceOptions + +// IDMappingOptions controls how we set up UID/GID mapping when we set up a +// user namespace. +type IDMappingOptions = define.IDMappingOptions + +// Isolation provides a way to specify whether we're supposed to use a proper +// OCI runtime, or some other method for running commands. +type Isolation = define.Isolation + +const ( + // IsolationDefault is whatever we think will work best. + IsolationDefault = define.IsolationDefault + // IsolationOCI is a proper OCI runtime. + IsolationOCI = define.IsolationOCI + // IsolationChroot is a more chroot-like environment: less isolation, + // but with fewer requirements. + IsolationChroot = define.IsolationChroot + // IsolationOCIRootless is a proper OCI runtime in rootless mode. + IsolationOCIRootless = define.IsolationOCIRootless +) + +// RunOptions can be used to alter how a command is run in the container. +type RunOptions struct { + // Logger is the logrus logger to write log messages with + Logger *logrus.Logger `json:"-"` + // Hostname is the hostname we set for the running container. + Hostname string + // Isolation is either IsolationDefault, IsolationOCI, IsolationChroot, or IsolationOCIRootless. + Isolation define.Isolation + // Runtime is the name of the runtime to run. It should accept the + // same arguments that runc does, and produce similar output. + Runtime string + // Args adds global arguments for the runtime. + Args []string + // NoPivot adds the --no-pivot runtime flag. + NoPivot bool + // Mounts are additional mount points which we want to provide. + Mounts []specs.Mount + // Env is additional environment variables to set. + Env []string + // User is the user as whom to run the command. + User string + // WorkingDir is an override for the working directory. + WorkingDir string + // ContextDir is used as the root directory for the source location for mounts that are of type "bind". + ContextDir string + // Shell is default shell to run in a container. + Shell string + // Cmd is an override for the configured default command. + Cmd []string + // Entrypoint is an override for the configured entry point. + Entrypoint []string + // NamespaceOptions controls how we set up the namespaces for the process. + NamespaceOptions define.NamespaceOptions + // ConfigureNetwork controls whether or not network interfaces and + // routing are configured for a new network namespace (i.e., when not + // joining another's namespace and not just using the host's + // namespace), effectively deciding whether or not the process has a + // usable network. + ConfigureNetwork define.NetworkConfigurationPolicy + // CNIPluginPath is the location of CNI plugin helpers, if they should be + // run from a location other than the default location. + CNIPluginPath string + // CNIConfigDir is the location of CNI configuration files, if the files in + // the default configuration directory shouldn't be used. + CNIConfigDir string + // Terminal provides a way to specify whether or not the command should + // be run with a pseudoterminal. By default (DefaultTerminal), a + // terminal is used if os.Stdout is connected to a terminal, but that + // decision can be overridden by specifying either WithTerminal or + // WithoutTerminal. + Terminal TerminalPolicy + // TerminalSize provides a way to set the number of rows and columns in + // a pseudo-terminal, if we create one, and Stdin/Stdout/Stderr aren't + // connected to a terminal. + TerminalSize *specs.Box + // The stdin/stdout/stderr descriptors to use. If set to nil, the + // corresponding files in the "os" package are used as defaults. + Stdin io.Reader `json:"-"` + Stdout io.Writer `json:"-"` + Stderr io.Writer `json:"-"` + // Quiet tells the run to turn off output to stdout. + Quiet bool + // AddCapabilities is a list of capabilities to add to the default set. + AddCapabilities []string + // DropCapabilities is a list of capabilities to remove from the default set, + // after processing the AddCapabilities set. If a capability appears in both + // lists, it will be dropped. + DropCapabilities []string + // Devices are the additional devices to add to the containers + Devices define.ContainerDevices + // Secrets are the available secrets to use in a RUN + Secrets map[string]define.Secret + // SSHSources is the available ssh agents to use in a RUN + SSHSources map[string]*sshagent.Source `json:"-"` + // RunMounts are mounts for this run. RunMounts for this run + // will not show up in subsequent runs. + RunMounts []string + // Map of stages and container mountpoint if any from stage executor + StageMountPoints map[string]internal.StageMountDetails + // External Image mounts to be cleaned up. + // Buildah run --mount could mount image before RUN calls, RUN could cleanup + // them up as well + ExternalImageMounts []string + // System context of current build + SystemContext *types.SystemContext + // CgroupManager to use for running OCI containers + CgroupManager string +} + +// RunMountArtifacts are the artifacts created when using a run mount. +type runMountArtifacts struct { + // RunMountTargets are the run mount targets inside the container + RunMountTargets []string + // TmpFiles are artifacts that need to be removed outside the container + TmpFiles []string + // Any external images which were mounted inside container + MountedImages []string + // Agents are the ssh agents started + Agents []*sshagent.AgentServer + // SSHAuthSock is the path to the ssh auth sock inside the container + SSHAuthSock string +} diff --git a/vendor/github.com/containers/buildah/run_linux.go b/vendor/github.com/containers/buildah/run_linux.go new file mode 100644 index 00000000000..eb9adbfb6b1 --- /dev/null +++ b/vendor/github.com/containers/buildah/run_linux.go @@ -0,0 +1,2871 @@ +//go:build linux +// +build linux + +package buildah + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net" + "os" + "os/exec" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" + "sync/atomic" + "syscall" + "time" + + "github.com/containers/buildah/bind" + "github.com/containers/buildah/chroot" + "github.com/containers/buildah/copier" + "github.com/containers/buildah/define" + "github.com/containers/buildah/internal" + internalParse "github.com/containers/buildah/internal/parse" + internalUtil "github.com/containers/buildah/internal/util" + "github.com/containers/buildah/pkg/overlay" + "github.com/containers/buildah/pkg/parse" + "github.com/containers/buildah/pkg/sshagent" + "github.com/containers/buildah/util" + "github.com/containers/common/libnetwork/network" + nettypes "github.com/containers/common/libnetwork/types" + "github.com/containers/common/pkg/capabilities" + "github.com/containers/common/pkg/cgroups" + "github.com/containers/common/pkg/chown" + "github.com/containers/common/pkg/config" + "github.com/containers/common/pkg/subscriptions" + imagetypes "github.com/containers/image/v5/types" + "github.com/containers/storage" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/ioutils" + "github.com/containers/storage/pkg/reexec" + "github.com/containers/storage/pkg/stringid" + "github.com/containers/storage/pkg/unshare" + storagetypes "github.com/containers/storage/types" + "github.com/docker/go-units" + "github.com/docker/libnetwork/resolvconf" + "github.com/docker/libnetwork/types" + "github.com/opencontainers/go-digest" + "github.com/opencontainers/runtime-spec/specs-go" + spec "github.com/opencontainers/runtime-spec/specs-go" + "github.com/opencontainers/runtime-tools/generate" + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" + "golang.org/x/term" +) + +// ContainerDevices is an alias for a slice of github.com/opencontainers/runc/libcontainer/configs.Device structures. +type ContainerDevices define.ContainerDevices + +func setChildProcess() error { + if err := unix.Prctl(unix.PR_SET_CHILD_SUBREAPER, uintptr(1), 0, 0, 0); err != nil { + fmt.Fprintf(os.Stderr, "prctl(PR_SET_CHILD_SUBREAPER, 1): %v\n", err) + return err + } + return nil +} + +// Run runs the specified command in the container's root filesystem. +func (b *Builder) Run(command []string, options RunOptions) error { + p, err := ioutil.TempDir("", define.Package) + if err != nil { + return err + } + // On some hosts like AH, /tmp is a symlink and we need an + // absolute path. + path, err := filepath.EvalSymlinks(p) + if err != nil { + return err + } + logrus.Debugf("using %q to hold bundle data", path) + defer func() { + if err2 := os.RemoveAll(path); err2 != nil { + options.Logger.Error(err2) + } + }() + + gp, err := generate.New("linux") + if err != nil { + return errors.Wrapf(err, "error generating new 'linux' runtime spec") + } + g := &gp + + isolation := options.Isolation + if isolation == define.IsolationDefault { + isolation = b.Isolation + if isolation == define.IsolationDefault { + isolation = define.IsolationOCI + } + } + if err := checkAndOverrideIsolationOptions(isolation, &options); err != nil { + return err + } + + // hardwire the environment to match docker build to avoid subtle and hard-to-debug differences due to containers.conf + b.configureEnvironment(g, options, []string{"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"}) + + if b.CommonBuildOpts == nil { + return errors.Errorf("Invalid format on container you must recreate the container") + } + + if err := addCommonOptsToSpec(b.CommonBuildOpts, g); err != nil { + return err + } + + if options.WorkingDir != "" { + g.SetProcessCwd(options.WorkingDir) + } else if b.WorkDir() != "" { + g.SetProcessCwd(b.WorkDir()) + } + setupSelinux(g, b.ProcessLabel, b.MountLabel) + mountPoint, err := b.Mount(b.MountLabel) + if err != nil { + return errors.Wrapf(err, "error mounting container %q", b.ContainerID) + } + defer func() { + if err := b.Unmount(); err != nil { + options.Logger.Errorf("error unmounting container: %v", err) + } + }() + g.SetRootPath(mountPoint) + if len(command) > 0 { + command = runLookupPath(g, command) + g.SetProcessArgs(command) + } else { + g.SetProcessArgs(nil) + } + + for _, d := range b.Devices { + sDev := spec.LinuxDevice{ + Type: string(d.Type), + Path: d.Path, + Major: d.Major, + Minor: d.Minor, + FileMode: &d.FileMode, + UID: &d.Uid, + GID: &d.Gid, + } + g.AddDevice(sDev) + g.AddLinuxResourcesDevice(true, string(d.Type), &d.Major, &d.Minor, string(d.Permissions)) + } + + setupMaskedPaths(g) + setupReadOnlyPaths(g) + + setupTerminal(g, options.Terminal, options.TerminalSize) + + configureNetwork, configureNetworks, err := b.configureNamespaces(g, &options) + if err != nil { + return err + } + + // rootless and networks are not supported + if len(configureNetworks) > 0 && isolation == IsolationOCIRootless { + return errors.New("cannot use networks as rootless") + } + + homeDir, err := b.configureUIDGID(g, mountPoint, options) + if err != nil { + return err + } + + g.SetProcessApparmorProfile(b.CommonBuildOpts.ApparmorProfile) + + // Now grab the spec from the generator. Set the generator to nil so that future contributors + // will quickly be able to tell that they're supposed to be modifying the spec directly from here. + spec := g.Config + g = nil + + // Set the seccomp configuration using the specified profile name. Some syscalls are + // allowed if certain capabilities are to be granted (example: CAP_SYS_CHROOT and chroot), + // so we sorted out the capabilities lists first. + if err = setupSeccomp(spec, b.CommonBuildOpts.SeccompProfilePath); err != nil { + return err + } + + // Figure out who owns files that will appear to be owned by UID/GID 0 in the container. + rootUID, rootGID, err := util.GetHostRootIDs(spec) + if err != nil { + return err + } + rootIDPair := &idtools.IDPair{UID: int(rootUID), GID: int(rootGID)} + + mode := os.FileMode(0755) + coptions := copier.MkdirOptions{ + ChownNew: rootIDPair, + ChmodNew: &mode, + } + if err := copier.Mkdir(mountPoint, filepath.Join(mountPoint, spec.Process.Cwd), coptions); err != nil { + return err + } + + bindFiles := make(map[string]string) + namespaceOptions := append(b.NamespaceOptions, options.NamespaceOptions...) + volumes := b.Volumes() + + if !contains(volumes, "/etc/hosts") { + hostFile, err := b.generateHosts(path, spec.Hostname, b.CommonBuildOpts.AddHost, rootIDPair) + if err != nil { + return err + } + // Only bind /etc/hosts if there's a network + if options.ConfigureNetwork != define.NetworkDisabled { + bindFiles["/etc/hosts"] = hostFile + } + } + + if !(contains(volumes, "/etc/resolv.conf") || (len(b.CommonBuildOpts.DNSServers) == 1 && strings.ToLower(b.CommonBuildOpts.DNSServers[0]) == "none")) { + resolvFile, err := b.addResolvConf(path, rootIDPair, b.CommonBuildOpts.DNSServers, b.CommonBuildOpts.DNSSearch, b.CommonBuildOpts.DNSOptions, namespaceOptions) + if err != nil { + return err + } + // Only bind /etc/resolv.conf if there's a network + if options.ConfigureNetwork != define.NetworkDisabled { + bindFiles["/etc/resolv.conf"] = resolvFile + } + } + // Empty file, so no need to recreate if it exists + if _, ok := bindFiles["/run/.containerenv"]; !ok { + containerenvPath := filepath.Join(path, "/run/.containerenv") + if err = os.MkdirAll(filepath.Dir(containerenvPath), 0755); err != nil { + return err + } + + rootless := 0 + if unshare.IsRootless() { + rootless = 1 + } + // Populate the .containerenv with container information + containerenv := fmt.Sprintf(`\ +engine="buildah-%s" +name=%q +id=%q +image=%q +imageid=%q +rootless=%d +`, define.Version, b.Container, b.ContainerID, b.FromImage, b.FromImageID, rootless) + + if err = ioutils.AtomicWriteFile(containerenvPath, []byte(containerenv), 0755); err != nil { + return err + } + if err := label.Relabel(containerenvPath, b.MountLabel, false); err != nil { + return err + } + + bindFiles["/run/.containerenv"] = containerenvPath + } + runArtifacts, err := b.setupMounts(options.SystemContext, mountPoint, spec, path, options.Mounts, bindFiles, volumes, b.CommonBuildOpts.Volumes, b.CommonBuildOpts.ShmSize, namespaceOptions, options.Secrets, options.SSHSources, options.RunMounts, options.ContextDir, options.StageMountPoints) + if err != nil { + return errors.Wrapf(err, "error resolving mountpoints for container %q", b.ContainerID) + } + if runArtifacts.SSHAuthSock != "" { + sshenv := "SSH_AUTH_SOCK=" + runArtifacts.SSHAuthSock + spec.Process.Env = append(spec.Process.Env, sshenv) + } + + // following run was called from `buildah run` + // and some images were mounted for this run + // add them to cleanup artifacts + if len(options.ExternalImageMounts) > 0 { + runArtifacts.MountedImages = append(runArtifacts.MountedImages, options.ExternalImageMounts...) + } + + defer func() { + if err := b.cleanupRunMounts(options.SystemContext, mountPoint, runArtifacts); err != nil { + options.Logger.Errorf("unable to cleanup run mounts %v", err) + } + }() + + defer b.cleanupTempVolumes() + + switch isolation { + case define.IsolationOCI: + var moreCreateArgs []string + if options.NoPivot { + moreCreateArgs = []string{"--no-pivot"} + } else { + moreCreateArgs = nil + } + err = b.runUsingRuntimeSubproc(isolation, options, configureNetwork, configureNetworks, moreCreateArgs, spec, mountPoint, path, define.Package+"-"+filepath.Base(path)) + case IsolationChroot: + err = chroot.RunUsingChroot(spec, path, homeDir, options.Stdin, options.Stdout, options.Stderr) + case IsolationOCIRootless: + moreCreateArgs := []string{"--no-new-keyring"} + if options.NoPivot { + moreCreateArgs = append(moreCreateArgs, "--no-pivot") + } + if err := setupRootlessSpecChanges(spec, path, b.CommonBuildOpts.ShmSize); err != nil { + return err + } + err = b.runUsingRuntimeSubproc(isolation, options, configureNetwork, configureNetworks, moreCreateArgs, spec, mountPoint, path, define.Package+"-"+filepath.Base(path)) + default: + err = errors.Errorf("don't know how to run this command") + } + return err +} + +func addCommonOptsToSpec(commonOpts *define.CommonBuildOptions, g *generate.Generator) error { + // Resources - CPU + if commonOpts.CPUPeriod != 0 { + g.SetLinuxResourcesCPUPeriod(commonOpts.CPUPeriod) + } + if commonOpts.CPUQuota != 0 { + g.SetLinuxResourcesCPUQuota(commonOpts.CPUQuota) + } + if commonOpts.CPUShares != 0 { + g.SetLinuxResourcesCPUShares(commonOpts.CPUShares) + } + if commonOpts.CPUSetCPUs != "" { + g.SetLinuxResourcesCPUCpus(commonOpts.CPUSetCPUs) + } + if commonOpts.CPUSetMems != "" { + g.SetLinuxResourcesCPUMems(commonOpts.CPUSetMems) + } + + // Resources - Memory + if commonOpts.Memory != 0 { + g.SetLinuxResourcesMemoryLimit(commonOpts.Memory) + } + if commonOpts.MemorySwap != 0 { + g.SetLinuxResourcesMemorySwap(commonOpts.MemorySwap) + } + + // cgroup membership + if commonOpts.CgroupParent != "" { + g.SetLinuxCgroupsPath(commonOpts.CgroupParent) + } + + defaultContainerConfig, err := config.Default() + if err != nil { + return errors.Wrapf(err, "failed to get container config") + } + // Other process resource limits + if err := addRlimits(commonOpts.Ulimit, g, defaultContainerConfig.Containers.DefaultUlimits); err != nil { + return err + } + + logrus.Debugf("Resources: %#v", commonOpts) + return nil +} + +func runSetupBuiltinVolumes(mountLabel, mountPoint, containerDir string, builtinVolumes []string, rootUID, rootGID int) ([]specs.Mount, error) { + var mounts []specs.Mount + hostOwner := idtools.IDPair{UID: rootUID, GID: rootGID} + // Add temporary copies of the contents of volume locations at the + // volume locations, unless we already have something there. + for _, volume := range builtinVolumes { + volumePath := filepath.Join(containerDir, "buildah-volumes", digest.Canonical.FromString(volume).Hex()) + initializeVolume := false + // If we need to, create the directory that we'll use to hold + // the volume contents. If we do need to create it, then we'll + // need to populate it, too, so make a note of that. + if _, err := os.Stat(volumePath); err != nil { + if !os.IsNotExist(err) { + return nil, err + } + logrus.Debugf("setting up built-in volume path at %q for %q", volumePath, volume) + if err = os.MkdirAll(volumePath, 0755); err != nil { + return nil, err + } + if err = label.Relabel(volumePath, mountLabel, false); err != nil { + return nil, err + } + initializeVolume = true + } + // Make sure the volume exists in the rootfs and read its attributes. + createDirPerms := os.FileMode(0755) + err := copier.Mkdir(mountPoint, filepath.Join(mountPoint, volume), copier.MkdirOptions{ + ChownNew: &hostOwner, + ChmodNew: &createDirPerms, + }) + if err != nil { + return nil, errors.Wrapf(err, "ensuring volume path %q", filepath.Join(mountPoint, volume)) + } + srcPath, err := copier.Eval(mountPoint, filepath.Join(mountPoint, volume), copier.EvalOptions{}) + if err != nil { + return nil, errors.Wrapf(err, "evaluating path %q", srcPath) + } + stat, err := os.Stat(srcPath) + if err != nil && !os.IsNotExist(err) { + return nil, err + } + // If we need to populate the mounted volume's contents with + // content from the rootfs, set it up now. + if initializeVolume { + if err = os.Chmod(volumePath, stat.Mode().Perm()); err != nil { + return nil, err + } + if err = os.Chown(volumePath, int(stat.Sys().(*syscall.Stat_t).Uid), int(stat.Sys().(*syscall.Stat_t).Gid)); err != nil { + return nil, err + } + logrus.Debugf("populating directory %q for volume %q using contents of %q", volumePath, volume, srcPath) + if err = extractWithTar(mountPoint, srcPath, volumePath); err != nil && !os.IsNotExist(errors.Cause(err)) { + return nil, errors.Wrapf(err, "error populating directory %q for volume %q using contents of %q", volumePath, volume, srcPath) + } + } + // Add the bind mount. + mounts = append(mounts, specs.Mount{ + Source: volumePath, + Destination: volume, + Type: "bind", + Options: []string{"bind"}, + }) + } + return mounts, nil +} + +func (b *Builder) setupMounts(context *imagetypes.SystemContext, mountPoint string, spec *specs.Spec, bundlePath string, optionMounts []specs.Mount, bindFiles map[string]string, builtinVolumes, volumeMounts []string, shmSize string, namespaceOptions define.NamespaceOptions, secrets map[string]define.Secret, sshSources map[string]*sshagent.Source, runFileMounts []string, contextDir string, stageMountPoints map[string]internal.StageMountDetails) (*runMountArtifacts, error) { + // Start building a new list of mounts. + var mounts []specs.Mount + haveMount := func(destination string) bool { + for _, mount := range mounts { + if mount.Destination == destination { + // Already have something to mount there. + return true + } + } + return false + } + + ipc := namespaceOptions.Find(string(specs.IPCNamespace)) + hostIPC := ipc == nil || ipc.Host + net := namespaceOptions.Find(string(specs.NetworkNamespace)) + hostNetwork := net == nil || net.Host + user := namespaceOptions.Find(string(specs.UserNamespace)) + hostUser := (user == nil || user.Host) && !unshare.IsRootless() + + // Copy mounts from the generated list. + mountCgroups := true + specMounts := []specs.Mount{} + for _, specMount := range spec.Mounts { + // Override some of the mounts from the generated list if we're doing different things with namespaces. + if specMount.Destination == "/dev/shm" { + specMount.Options = []string{"nosuid", "noexec", "nodev", "mode=1777"} + if shmSize != "" { + specMount.Options = append(specMount.Options, "size="+shmSize) + } + if hostIPC && !hostUser { + if _, err := os.Stat("/dev/shm"); err != nil && os.IsNotExist(err) { + logrus.Debugf("/dev/shm is not present, not binding into container") + continue + } + specMount = specs.Mount{ + Source: "/dev/shm", + Type: "bind", + Destination: "/dev/shm", + Options: []string{bind.NoBindOption, "rbind", "nosuid", "noexec", "nodev"}, + } + } + } + if specMount.Destination == "/dev/mqueue" { + if hostIPC && !hostUser { + if _, err := os.Stat("/dev/mqueue"); err != nil && os.IsNotExist(err) { + logrus.Debugf("/dev/mqueue is not present, not binding into container") + continue + } + specMount = specs.Mount{ + Source: "/dev/mqueue", + Type: "bind", + Destination: "/dev/mqueue", + Options: []string{bind.NoBindOption, "rbind", "nosuid", "noexec", "nodev"}, + } + } + } + if specMount.Destination == "/sys" { + if hostNetwork && !hostUser { + mountCgroups = false + if _, err := os.Stat("/sys"); err != nil && os.IsNotExist(err) { + logrus.Debugf("/sys is not present, not binding into container") + continue + } + specMount = specs.Mount{ + Source: "/sys", + Type: "bind", + Destination: "/sys", + Options: []string{bind.NoBindOption, "rbind", "nosuid", "noexec", "nodev", "ro"}, + } + } + } + specMounts = append(specMounts, specMount) + } + + // Add a mount for the cgroups filesystem, unless we're already + // recursively bind mounting all of /sys, in which case we shouldn't + // bother with it. + sysfsMount := []specs.Mount{} + if mountCgroups { + sysfsMount = []specs.Mount{{ + Destination: "/sys/fs/cgroup", + Type: "cgroup", + Source: "cgroup", + Options: []string{bind.NoBindOption, "nosuid", "noexec", "nodev", "relatime", "ro"}, + }} + } + + // Get the list of files we need to bind into the container. + bindFileMounts := runSetupBoundFiles(bundlePath, bindFiles) + + // After this point we need to know the per-container persistent storage directory. + cdir, err := b.store.ContainerDirectory(b.ContainerID) + if err != nil { + return nil, errors.Wrapf(err, "error determining work directory for container %q", b.ContainerID) + } + + // Figure out which UID and GID to tell the subscriptions package to use + // for files that it creates. + rootUID, rootGID, err := util.GetHostRootIDs(spec) + if err != nil { + return nil, err + } + + // Get host UID and GID of the container process. + processUID, processGID, err := util.GetHostIDs(spec.Linux.UIDMappings, spec.Linux.GIDMappings, spec.Process.User.UID, spec.Process.User.GID) + if err != nil { + return nil, err + } + + // Get the list of subscriptions mounts. + subscriptionMounts := subscriptions.MountsWithUIDGID(b.MountLabel, cdir, b.DefaultMountsFilePath, mountPoint, int(rootUID), int(rootGID), unshare.IsRootless(), false) + + // Get the list of mounts that are just for this Run() call. + // TODO: acui: de-spaghettify run mounts + runMounts, mountArtifacts, err := b.runSetupRunMounts(context, runFileMounts, secrets, stageMountPoints, sshSources, cdir, contextDir, spec.Linux.UIDMappings, spec.Linux.GIDMappings, int(rootUID), int(rootGID), int(processUID), int(processGID)) + if err != nil { + return nil, err + } + // Add temporary copies of the contents of volume locations at the + // volume locations, unless we already have something there. + builtins, err := runSetupBuiltinVolumes(b.MountLabel, mountPoint, cdir, builtinVolumes, int(rootUID), int(rootGID)) + if err != nil { + return nil, err + } + + // Get the list of explicitly-specified volume mounts. + volumes, err := b.runSetupVolumeMounts(spec.Linux.MountLabel, volumeMounts, optionMounts, int(rootUID), int(rootGID), int(processUID), int(processGID)) + if err != nil { + return nil, err + } + + // prepare list of mount destinations which can be cleaned up safely. + // we can clean bindFiles, subscriptionMounts and specMounts + // everything other than these might have users content + mountArtifacts.RunMountTargets = append(append(append(mountArtifacts.RunMountTargets, cleanableDestinationListFromMounts(bindFileMounts)...), cleanableDestinationListFromMounts(subscriptionMounts)...), cleanableDestinationListFromMounts(specMounts)...) + + allMounts := util.SortMounts(append(append(append(append(append(append(volumes, builtins...), runMounts...), subscriptionMounts...), bindFileMounts...), specMounts...), sysfsMount...)) + // Add them all, in the preferred order, except where they conflict with something that was previously added. + for _, mount := range allMounts { + if haveMount(mount.Destination) { + // Already mounting something there, no need to bother with this one. + continue + } + // Add the mount. + mounts = append(mounts, mount) + } + + // Set the list in the spec. + spec.Mounts = mounts + return mountArtifacts, nil +} + +// Destinations which can be cleaned up after every RUN +func cleanableDestinationListFromMounts(mounts []spec.Mount) []string { + mountDest := []string{} + for _, mount := range mounts { + // Add all destination to mountArtifacts so that they can be cleaned up later + if mount.Destination != "" { + // we dont want to remove destinations with /etc, /dev, /sys, /proc as rootfs already contains these files + // and unionfs will create a `whiteout` i.e `.wh` files on removal of overlapping files from these directories. + // everything other than these will be cleanedup + if !strings.HasPrefix(mount.Destination, "/etc") && !strings.HasPrefix(mount.Destination, "/dev") && !strings.HasPrefix(mount.Destination, "/sys") && !strings.HasPrefix(mount.Destination, "/proc") { + mountDest = append(mountDest, mount.Destination) + } + } + } + return mountDest +} + +// addResolvConf copies files from host and sets them up to bind mount into container +func (b *Builder) addResolvConf(rdir string, chownOpts *idtools.IDPair, dnsServers, dnsSearch, dnsOptions []string, namespaceOptions define.NamespaceOptions) (string, error) { + resolvConf := "/etc/resolv.conf" + + stat, err := os.Stat(resolvConf) + if err != nil { + return "", err + } + contents, err := ioutil.ReadFile(resolvConf) + // resolv.conf doesn't have to exists + if err != nil && !os.IsNotExist(err) { + return "", err + } + + netns := false + ns := namespaceOptions.Find(string(spec.NetworkNamespace)) + if ns != nil && !ns.Host { + netns = true + } + + nameservers := resolvconf.GetNameservers(contents, types.IPv4) + // check if systemd-resolved is used, assume it is used when 127.0.0.53 is the only nameserver + if len(nameservers) == 1 && nameservers[0] == "127.0.0.53" && netns { + // read the actual resolv.conf file for systemd-resolved + resolvedContents, err := ioutil.ReadFile("/run/systemd/resolve/resolv.conf") + if err != nil { + if !os.IsNotExist(err) { + return "", errors.Wrapf(err, "detected that systemd-resolved is in use, but could not locate real resolv.conf") + } + } else { + contents = resolvedContents + } + } + + // Ensure that the container's /etc/resolv.conf is compatible with its + // network configuration. + if netns { + // FIXME handle IPv6 + resolve, err := resolvconf.FilterResolvDNS(contents, true) + if err != nil { + return "", errors.Wrapf(err, "error parsing host resolv.conf") + } + contents = resolve.Content + } + search := resolvconf.GetSearchDomains(contents) + nameservers = resolvconf.GetNameservers(contents, types.IP) + options := resolvconf.GetOptions(contents) + + defaultContainerConfig, err := config.Default() + if err != nil { + return "", errors.Wrapf(err, "failed to get container config") + } + dnsSearch = append(defaultContainerConfig.Containers.DNSSearches, dnsSearch...) + if len(dnsSearch) > 0 { + search = dnsSearch + } + + if b.Isolation == IsolationOCIRootless { + if ns != nil && !ns.Host && ns.Path == "" { + // if we are using slirp4netns, also add the built-in DNS server. + logrus.Debugf("adding slirp4netns 10.0.2.3 built-in DNS server") + nameservers = append([]string{"10.0.2.3"}, nameservers...) + } + } + + dnsServers = append(defaultContainerConfig.Containers.DNSServers, dnsServers...) + if len(dnsServers) != 0 { + dns, err := getDNSIP(dnsServers) + if err != nil { + return "", errors.Wrapf(err, "error getting dns servers") + } + nameservers = []string{} + for _, server := range dns { + nameservers = append(nameservers, server.String()) + } + } + + dnsOptions = append(defaultContainerConfig.Containers.DNSOptions, dnsOptions...) + if len(dnsOptions) != 0 { + options = dnsOptions + } + + cfile := filepath.Join(rdir, filepath.Base(resolvConf)) + if _, err = resolvconf.Build(cfile, nameservers, search, options); err != nil { + return "", errors.Wrapf(err, "error building resolv.conf for container %s", b.ContainerID) + } + + uid := int(stat.Sys().(*syscall.Stat_t).Uid) + gid := int(stat.Sys().(*syscall.Stat_t).Gid) + if chownOpts != nil { + uid = chownOpts.UID + gid = chownOpts.GID + } + if err = os.Chown(cfile, uid, gid); err != nil { + return "", err + } + + if err := label.Relabel(cfile, b.MountLabel, false); err != nil { + return "", err + } + return cfile, nil +} + +// generateHosts creates a containers hosts file +func (b *Builder) generateHosts(rdir, hostname string, addHosts []string, chownOpts *idtools.IDPair) (string, error) { + hostPath := "/etc/hosts" + stat, err := os.Stat(hostPath) + if err != nil { + return "", err + } + + hosts := bytes.NewBufferString("# Generated by Buildah\n") + orig, err := ioutil.ReadFile(hostPath) + if err != nil { + return "", err + } + hosts.Write(orig) + for _, host := range addHosts { + // verify the host format + values := strings.SplitN(host, ":", 2) + if len(values) != 2 { + return "", errors.Errorf("unable to parse host entry %q: incorrect format", host) + } + if values[0] == "" { + return "", errors.Errorf("hostname in host entry %q is empty", host) + } + if values[1] == "" { + return "", errors.Errorf("IP address in host entry %q is empty", host) + } + hosts.Write([]byte(fmt.Sprintf("%s\t%s\n", values[1], values[0]))) + } + hosts.Write([]byte(fmt.Sprintf("127.0.0.1 %s %s\n", b.Container, hostname))) + hosts.Write([]byte(fmt.Sprintf("::1 %s %s\n", b.Container, hostname))) + + if ip := util.LocalIP(); ip != "" { + hosts.Write([]byte(fmt.Sprintf("%s %s\n", ip, "host.containers.internal"))) + } + + cfile := filepath.Join(rdir, filepath.Base(hostPath)) + if err = ioutils.AtomicWriteFile(cfile, hosts.Bytes(), stat.Mode().Perm()); err != nil { + return "", errors.Wrapf(err, "error writing /etc/hosts into the container") + } + uid := int(stat.Sys().(*syscall.Stat_t).Uid) + gid := int(stat.Sys().(*syscall.Stat_t).Gid) + if chownOpts != nil { + uid = chownOpts.UID + gid = chownOpts.GID + } + if err = os.Chown(cfile, uid, gid); err != nil { + return "", err + } + if err := label.Relabel(cfile, b.MountLabel, false); err != nil { + return "", err + } + + return cfile, nil +} + +func setupTerminal(g *generate.Generator, terminalPolicy TerminalPolicy, terminalSize *specs.Box) { + switch terminalPolicy { + case DefaultTerminal: + onTerminal := term.IsTerminal(unix.Stdin) && term.IsTerminal(unix.Stdout) && term.IsTerminal(unix.Stderr) + if onTerminal { + logrus.Debugf("stdio is a terminal, defaulting to using a terminal") + } else { + logrus.Debugf("stdio is not a terminal, defaulting to not using a terminal") + } + g.SetProcessTerminal(onTerminal) + case WithTerminal: + g.SetProcessTerminal(true) + case WithoutTerminal: + g.SetProcessTerminal(false) + } + if terminalSize != nil { + g.SetProcessConsoleSize(terminalSize.Width, terminalSize.Height) + } +} + +func runUsingRuntime(options RunOptions, configureNetwork bool, moreCreateArgs []string, spec *specs.Spec, bundlePath, containerName string, + containerCreateW io.WriteCloser, containerStartR io.ReadCloser) (wstatus unix.WaitStatus, err error) { + if options.Logger == nil { + options.Logger = logrus.StandardLogger() + } + + // Lock the caller to a single OS-level thread. + runtime.LockOSThread() + + // Set up bind mounts for things that a namespaced user might not be able to get to directly. + unmountAll, err := bind.SetupIntermediateMountNamespace(spec, bundlePath) + if unmountAll != nil { + defer func() { + if err := unmountAll(); err != nil { + options.Logger.Error(err) + } + }() + } + if err != nil { + return 1, err + } + + // Write the runtime configuration. + specbytes, err := json.Marshal(spec) + if err != nil { + return 1, errors.Wrapf(err, "error encoding configuration %#v as json", spec) + } + if err = ioutils.AtomicWriteFile(filepath.Join(bundlePath, "config.json"), specbytes, 0600); err != nil { + return 1, errors.Wrapf(err, "error storing runtime configuration") + } + + logrus.Debugf("config = %v", string(specbytes)) + + // Decide which runtime to use. + runtime := options.Runtime + if runtime == "" { + runtime = util.Runtime() + } + localRuntime := util.FindLocalRuntime(runtime) + if localRuntime != "" { + runtime = localRuntime + } + + // Default to just passing down our stdio. + getCreateStdio := func() (io.ReadCloser, io.WriteCloser, io.WriteCloser) { + return os.Stdin, os.Stdout, os.Stderr + } + + // Figure out how we're doing stdio handling, and create pipes and sockets. + var stdio sync.WaitGroup + var consoleListener *net.UnixListener + var errorFds, closeBeforeReadingErrorFds []int + stdioPipe := make([][]int, 3) + copyConsole := false + copyPipes := false + finishCopy := make([]int, 2) + if err = unix.Pipe(finishCopy); err != nil { + return 1, errors.Wrapf(err, "error creating pipe for notifying to stop stdio") + } + finishedCopy := make(chan struct{}) + var pargs []string + if spec.Process != nil { + pargs = spec.Process.Args + if spec.Process.Terminal { + copyConsole = true + // Create a listening socket for accepting the container's terminal's PTY master. + socketPath := filepath.Join(bundlePath, "console.sock") + consoleListener, err = net.ListenUnix("unix", &net.UnixAddr{Name: socketPath, Net: "unix"}) + if err != nil { + return 1, errors.Wrapf(err, "error creating socket %q to receive terminal descriptor", consoleListener.Addr()) + } + // Add console socket arguments. + moreCreateArgs = append(moreCreateArgs, "--console-socket", socketPath) + } else { + copyPipes = true + // Figure out who should own the pipes. + uid, gid, err := util.GetHostRootIDs(spec) + if err != nil { + return 1, err + } + // Create stdio pipes. + if stdioPipe, err = runMakeStdioPipe(int(uid), int(gid)); err != nil { + return 1, err + } + if err = runLabelStdioPipes(stdioPipe, spec.Process.SelinuxLabel, spec.Linux.MountLabel); err != nil { + return 1, err + } + errorFds = []int{stdioPipe[unix.Stdout][0], stdioPipe[unix.Stderr][0]} + closeBeforeReadingErrorFds = []int{stdioPipe[unix.Stdout][1], stdioPipe[unix.Stderr][1]} + // Set stdio to our pipes. + getCreateStdio = func() (io.ReadCloser, io.WriteCloser, io.WriteCloser) { + stdin := os.NewFile(uintptr(stdioPipe[unix.Stdin][0]), "/dev/stdin") + stdout := os.NewFile(uintptr(stdioPipe[unix.Stdout][1]), "/dev/stdout") + stderr := os.NewFile(uintptr(stdioPipe[unix.Stderr][1]), "/dev/stderr") + return stdin, stdout, stderr + } + } + } else { + if options.Quiet { + // Discard stdout. + getCreateStdio = func() (io.ReadCloser, io.WriteCloser, io.WriteCloser) { + return os.Stdin, nil, os.Stderr + } + } + } + + runtimeArgs := options.Args[:] + if options.CgroupManager == config.SystemdCgroupsManager { + runtimeArgs = append(runtimeArgs, "--systemd-cgroup") + } + + // Build the commands that we'll execute. + pidFile := filepath.Join(bundlePath, "pid") + args := append(append(append(runtimeArgs, "create", "--bundle", bundlePath, "--pid-file", pidFile), moreCreateArgs...), containerName) + create := exec.Command(runtime, args...) + create.Dir = bundlePath + stdin, stdout, stderr := getCreateStdio() + create.Stdin, create.Stdout, create.Stderr = stdin, stdout, stderr + if create.SysProcAttr == nil { + create.SysProcAttr = &syscall.SysProcAttr{} + } + + args = append(options.Args, "start", containerName) + start := exec.Command(runtime, args...) + start.Dir = bundlePath + start.Stderr = os.Stderr + + args = append(options.Args, "kill", containerName) + kill := exec.Command(runtime, args...) + kill.Dir = bundlePath + kill.Stderr = os.Stderr + + args = append(options.Args, "delete", containerName) + del := exec.Command(runtime, args...) + del.Dir = bundlePath + del.Stderr = os.Stderr + + // Actually create the container. + logrus.Debugf("Running %q", create.Args) + err = create.Run() + if err != nil { + return 1, errors.Wrapf(err, "error from %s creating container for %v: %s", runtime, pargs, runCollectOutput(options.Logger, errorFds, closeBeforeReadingErrorFds)) + } + defer func() { + err2 := del.Run() + if err2 != nil { + if err == nil { + err = errors.Wrapf(err2, "error deleting container") + } else { + options.Logger.Infof("error from %s deleting container: %v", runtime, err2) + } + } + }() + + // Make sure we read the container's exit status when it exits. + pidValue, err := ioutil.ReadFile(pidFile) + if err != nil { + return 1, err + } + pid, err := strconv.Atoi(strings.TrimSpace(string(pidValue))) + if err != nil { + return 1, errors.Wrapf(err, "error parsing pid %s as a number", string(pidValue)) + } + var stopped uint32 + var reaping sync.WaitGroup + reaping.Add(1) + go func() { + defer reaping.Done() + var err error + _, err = unix.Wait4(pid, &wstatus, 0, nil) + if err != nil { + wstatus = 0 + options.Logger.Errorf("error waiting for container child process %d: %v\n", pid, err) + } + atomic.StoreUint32(&stopped, 1) + }() + + if configureNetwork { + if _, err := containerCreateW.Write([]byte{1}); err != nil { + return 1, err + } + containerCreateW.Close() + logrus.Debug("waiting for parent start message") + b := make([]byte, 1) + if _, err := containerStartR.Read(b); err != nil { + return 1, errors.Wrap(err, "did not get container start message from parent") + } + containerStartR.Close() + } + + if copyPipes { + // We don't need the ends of the pipes that belong to the container. + stdin.Close() + if stdout != nil { + stdout.Close() + } + stderr.Close() + } + + // Handle stdio for the container in the background. + stdio.Add(1) + go runCopyStdio(options.Logger, &stdio, copyPipes, stdioPipe, copyConsole, consoleListener, finishCopy, finishedCopy, spec) + + // Start the container. + logrus.Debugf("Running %q", start.Args) + err = start.Run() + if err != nil { + return 1, errors.Wrapf(err, "error from %s starting container", runtime) + } + defer func() { + if atomic.LoadUint32(&stopped) == 0 { + if err2 := kill.Run(); err2 != nil { + options.Logger.Infof("error from %s stopping container: %v", runtime, err2) + } + } + }() + + // Wait for the container to exit. + for { + now := time.Now() + var state specs.State + args = append(options.Args, "state", containerName) + stat := exec.Command(runtime, args...) + stat.Dir = bundlePath + stat.Stderr = os.Stderr + stateOutput, err := stat.Output() + if err != nil { + if atomic.LoadUint32(&stopped) != 0 { + // container exited + break + } + return 1, errors.Wrapf(err, "error reading container state from %s (got output: %q)", runtime, string(stateOutput)) + } + if err = json.Unmarshal(stateOutput, &state); err != nil { + return 1, errors.Wrapf(err, "error parsing container state %q from %s", string(stateOutput), runtime) + } + switch state.Status { + case "running": + case "stopped": + atomic.StoreUint32(&stopped, 1) + default: + return 1, errors.Errorf("container status unexpectedly changed to %q", state.Status) + } + if atomic.LoadUint32(&stopped) != 0 { + break + } + select { + case <-finishedCopy: + atomic.StoreUint32(&stopped, 1) + case <-time.After(time.Until(now.Add(100 * time.Millisecond))): + continue + } + if atomic.LoadUint32(&stopped) != 0 { + break + } + } + + // Close the writing end of the stop-handling-stdio notification pipe. + unix.Close(finishCopy[1]) + // Wait for the stdio copy goroutine to flush. + stdio.Wait() + // Wait until we finish reading the exit status. + reaping.Wait() + + return wstatus, nil +} + +func runCollectOutput(logger *logrus.Logger, fds, closeBeforeReadingFds []int) string { //nolint:interfacer + for _, fd := range closeBeforeReadingFds { + unix.Close(fd) + } + var b bytes.Buffer + buf := make([]byte, 8192) + for _, fd := range fds { + nread, err := unix.Read(fd, buf) + if err != nil { + if errno, isErrno := err.(syscall.Errno); isErrno { + switch errno { + default: + logger.Errorf("error reading from pipe %d: %v", fd, err) + case syscall.EINTR, syscall.EAGAIN: + } + } else { + logger.Errorf("unable to wait for data from pipe %d: %v", fd, err) + } + continue + } + for nread > 0 { + r := buf[:nread] + if nwritten, err := b.Write(r); err != nil || nwritten != len(r) { + if nwritten != len(r) { + logger.Errorf("error buffering data from pipe %d: %v", fd, err) + break + } + } + nread, err = unix.Read(fd, buf) + if err != nil { + if errno, isErrno := err.(syscall.Errno); isErrno { + switch errno { + default: + logger.Errorf("error reading from pipe %d: %v", fd, err) + case syscall.EINTR, syscall.EAGAIN: + } + } else { + logger.Errorf("unable to wait for data from pipe %d: %v", fd, err) + } + break + } + } + } + return b.String() +} + +func setupRootlessNetwork(pid int) (teardown func(), err error) { + slirp4netns, err := exec.LookPath("slirp4netns") + if err != nil { + return nil, err + } + + rootlessSlirpSyncR, rootlessSlirpSyncW, err := os.Pipe() + if err != nil { + return nil, errors.Wrapf(err, "cannot create slirp4netns sync pipe") + } + defer rootlessSlirpSyncR.Close() + + // Be sure there are no fds inherited to slirp4netns except the sync pipe + files, err := ioutil.ReadDir("/proc/self/fd") + if err != nil { + return nil, errors.Wrapf(err, "cannot list open fds") + } + for _, f := range files { + fd, err := strconv.Atoi(f.Name()) + if err != nil { + return nil, errors.Wrapf(err, "cannot parse fd") + } + if fd == int(rootlessSlirpSyncW.Fd()) { + continue + } + unix.CloseOnExec(fd) + } + + cmd := exec.Command(slirp4netns, "--mtu", "65520", "-r", "3", "-c", strconv.Itoa(pid), "tap0") + cmd.Stdin, cmd.Stdout, cmd.Stderr = nil, nil, nil + cmd.ExtraFiles = []*os.File{rootlessSlirpSyncW} + + err = cmd.Start() + rootlessSlirpSyncW.Close() + if err != nil { + return nil, errors.Wrapf(err, "cannot start slirp4netns") + } + + b := make([]byte, 1) + for { + if err := rootlessSlirpSyncR.SetDeadline(time.Now().Add(1 * time.Second)); err != nil { + return nil, errors.Wrapf(err, "error setting slirp4netns pipe timeout") + } + if _, err := rootlessSlirpSyncR.Read(b); err == nil { + break + } else { + if os.IsTimeout(err) { + // Check if the process is still running. + var status syscall.WaitStatus + _, err := syscall.Wait4(cmd.Process.Pid, &status, syscall.WNOHANG, nil) + if err != nil { + return nil, errors.Wrapf(err, "failed to read slirp4netns process status") + } + if status.Exited() || status.Signaled() { + return nil, errors.New("slirp4netns failed") + } + + continue + } + return nil, errors.Wrapf(err, "failed to read from slirp4netns sync pipe") + } + } + + return func() { + cmd.Process.Kill() // nolint:errcheck + cmd.Wait() // nolint:errcheck + }, nil +} + +func (b *Builder) runConfigureNetwork(pid int, isolation define.Isolation, options RunOptions, configureNetworks []string, containerName string) (teardown func(), err error) { + if isolation == IsolationOCIRootless { + if ns := options.NamespaceOptions.Find(string(specs.NetworkNamespace)); ns != nil && !ns.Host && ns.Path == "" { + return setupRootlessNetwork(pid) + } + } + + if len(configureNetworks) == 0 { + configureNetworks = []string{b.NetworkInterface.DefaultNetworkName()} + } + + // Make sure we can access the container's network namespace, + // even after it exits, to successfully tear down the + // interfaces. Ensure this by opening a handle to the network + // namespace, and using our copy to both configure and + // deconfigure it. + netns := fmt.Sprintf("/proc/%d/ns/net", pid) + netFD, err := unix.Open(netns, unix.O_RDONLY, 0) + if err != nil { + return nil, errors.Wrapf(err, "error opening network namespace") + } + mynetns := fmt.Sprintf("/proc/%d/fd/%d", unix.Getpid(), netFD) + + networks := make(map[string]nettypes.PerNetworkOptions, len(configureNetworks)) + for i, network := range configureNetworks { + networks[network] = nettypes.PerNetworkOptions{ + InterfaceName: fmt.Sprintf("eth%d", i), + } + } + + opts := nettypes.NetworkOptions{ + ContainerID: containerName, + ContainerName: containerName, + Networks: networks, + } + _, err = b.NetworkInterface.Setup(mynetns, nettypes.SetupOptions{NetworkOptions: opts}) + if err != nil { + return nil, err + } + + teardown = func() { + err := b.NetworkInterface.Teardown(mynetns, nettypes.TeardownOptions{NetworkOptions: opts}) + if err != nil { + options.Logger.Errorf("failed to cleanup network: %v", err) + } + } + + return teardown, nil +} + +func setNonblock(logger *logrus.Logger, fd int, description string, nonblocking bool) (bool, error) { //nolint:interfacer + mask, err := unix.FcntlInt(uintptr(fd), unix.F_GETFL, 0) + if err != nil { + return false, err + } + blocked := mask&unix.O_NONBLOCK == 0 + + if err := unix.SetNonblock(fd, nonblocking); err != nil { + if nonblocking { + logger.Errorf("error setting %s to nonblocking: %v", description, err) + } else { + logger.Errorf("error setting descriptor %s blocking: %v", description, err) + } + } + return blocked, err +} + +func runCopyStdio(logger *logrus.Logger, stdio *sync.WaitGroup, copyPipes bool, stdioPipe [][]int, copyConsole bool, consoleListener *net.UnixListener, finishCopy []int, finishedCopy chan struct{}, spec *specs.Spec) { + defer func() { + unix.Close(finishCopy[0]) + if copyPipes { + unix.Close(stdioPipe[unix.Stdin][1]) + unix.Close(stdioPipe[unix.Stdout][0]) + unix.Close(stdioPipe[unix.Stderr][0]) + } + stdio.Done() + finishedCopy <- struct{}{} + }() + // Map describing where data on an incoming descriptor should go. + relayMap := make(map[int]int) + // Map describing incoming and outgoing descriptors. + readDesc := make(map[int]string) + writeDesc := make(map[int]string) + // Buffers. + relayBuffer := make(map[int]*bytes.Buffer) + // Set up the terminal descriptor or pipes for polling. + if copyConsole { + // Accept a connection over our listening socket. + fd, err := runAcceptTerminal(logger, consoleListener, spec.Process.ConsoleSize) + if err != nil { + logger.Errorf("%v", err) + return + } + terminalFD := fd + // Input from our stdin, output from the terminal descriptor. + relayMap[unix.Stdin] = terminalFD + readDesc[unix.Stdin] = "stdin" + relayBuffer[terminalFD] = new(bytes.Buffer) + writeDesc[terminalFD] = "container terminal input" + relayMap[terminalFD] = unix.Stdout + readDesc[terminalFD] = "container terminal output" + relayBuffer[unix.Stdout] = new(bytes.Buffer) + writeDesc[unix.Stdout] = "output" + // Set our terminal's mode to raw, to pass handling of special + // terminal input to the terminal in the container. + if term.IsTerminal(unix.Stdin) { + if state, err := term.MakeRaw(unix.Stdin); err != nil { + logger.Warnf("error setting terminal state: %v", err) + } else { + defer func() { + if err = term.Restore(unix.Stdin, state); err != nil { + logger.Errorf("unable to restore terminal state: %v", err) + } + }() + } + } + } + if copyPipes { + // Input from our stdin, output from the stdout and stderr pipes. + relayMap[unix.Stdin] = stdioPipe[unix.Stdin][1] + readDesc[unix.Stdin] = "stdin" + relayBuffer[stdioPipe[unix.Stdin][1]] = new(bytes.Buffer) + writeDesc[stdioPipe[unix.Stdin][1]] = "container stdin" + relayMap[stdioPipe[unix.Stdout][0]] = unix.Stdout + readDesc[stdioPipe[unix.Stdout][0]] = "container stdout" + relayBuffer[unix.Stdout] = new(bytes.Buffer) + writeDesc[unix.Stdout] = "stdout" + relayMap[stdioPipe[unix.Stderr][0]] = unix.Stderr + readDesc[stdioPipe[unix.Stderr][0]] = "container stderr" + relayBuffer[unix.Stderr] = new(bytes.Buffer) + writeDesc[unix.Stderr] = "stderr" + } + // Set our reading descriptors to non-blocking. + for rfd, wfd := range relayMap { + blocked, err := setNonblock(logger, rfd, readDesc[rfd], true) + if err != nil { + return + } + if blocked { + defer setNonblock(logger, rfd, readDesc[rfd], false) // nolint:errcheck + } + setNonblock(logger, wfd, writeDesc[wfd], false) // nolint:errcheck + } + + if copyPipes { + setNonblock(logger, stdioPipe[unix.Stdin][1], writeDesc[stdioPipe[unix.Stdin][1]], true) // nolint:errcheck + } + + runCopyStdioPassData(copyPipes, stdioPipe, finishCopy, relayMap, relayBuffer, readDesc, writeDesc) +} + +func canRetry(err error) bool { + if errno, isErrno := err.(syscall.Errno); isErrno { + return errno == syscall.EINTR || errno == syscall.EAGAIN + } + return false +} + +func runCopyStdioPassData(copyPipes bool, stdioPipe [][]int, finishCopy []int, relayMap map[int]int, relayBuffer map[int]*bytes.Buffer, readDesc map[int]string, writeDesc map[int]string) { + closeStdin := false + + // Pass data back and forth. + pollTimeout := -1 + for len(relayMap) > 0 { + // Start building the list of descriptors to poll. + pollFds := make([]unix.PollFd, 0, len(relayMap)+1) + // Poll for a notification that we should stop handling stdio. + pollFds = append(pollFds, unix.PollFd{Fd: int32(finishCopy[0]), Events: unix.POLLIN | unix.POLLHUP}) + // Poll on our reading descriptors. + for rfd := range relayMap { + pollFds = append(pollFds, unix.PollFd{Fd: int32(rfd), Events: unix.POLLIN | unix.POLLHUP}) + } + buf := make([]byte, 8192) + // Wait for new data from any input descriptor, or a notification that we're done. + _, err := unix.Poll(pollFds, pollTimeout) + if !util.LogIfNotRetryable(err, fmt.Sprintf("error waiting for stdio/terminal data to relay: %v", err)) { + return + } + removes := make(map[int]struct{}) + for _, pollFd := range pollFds { + // If this descriptor's just been closed from the other end, mark it for + // removal from the set that we're checking for. + if pollFd.Revents&unix.POLLHUP == unix.POLLHUP { + removes[int(pollFd.Fd)] = struct{}{} + } + // If the descriptor was closed elsewhere, remove it from our list. + if pollFd.Revents&unix.POLLNVAL != 0 { + logrus.Debugf("error polling descriptor %s: closed?", readDesc[int(pollFd.Fd)]) + removes[int(pollFd.Fd)] = struct{}{} + } + // If the POLLIN flag isn't set, then there's no data to be read from this descriptor. + if pollFd.Revents&unix.POLLIN == 0 { + continue + } + // Read whatever there is to be read. + readFD := int(pollFd.Fd) + writeFD, needToRelay := relayMap[readFD] + if needToRelay { + n, err := unix.Read(readFD, buf) + if !util.LogIfNotRetryable(err, fmt.Sprintf("unable to read %s data: %v", readDesc[readFD], err)) { + return + } + // If it's zero-length on our stdin and we're + // using pipes, it's an EOF, so close the stdin + // pipe's writing end. + if n == 0 && !canRetry(err) && int(pollFd.Fd) == unix.Stdin { + removes[int(pollFd.Fd)] = struct{}{} + } else if n > 0 { + // Buffer the data in case we get blocked on where they need to go. + nwritten, err := relayBuffer[writeFD].Write(buf[:n]) + if err != nil { + logrus.Debugf("buffer: %v", err) + continue + } + if nwritten != n { + logrus.Debugf("buffer: expected to buffer %d bytes, wrote %d", n, nwritten) + continue + } + // If this is the last of the data we'll be able to read from this + // descriptor, read all that there is to read. + for pollFd.Revents&unix.POLLHUP == unix.POLLHUP { + nr, err := unix.Read(readFD, buf) + util.LogIfUnexpectedWhileDraining(err, fmt.Sprintf("read %s: %v", readDesc[readFD], err)) + if nr <= 0 { + break + } + nwritten, err := relayBuffer[writeFD].Write(buf[:nr]) + if err != nil { + logrus.Debugf("buffer: %v", err) + break + } + if nwritten != nr { + logrus.Debugf("buffer: expected to buffer %d bytes, wrote %d", nr, nwritten) + break + } + } + } + } + } + // Try to drain the output buffers. Set the default timeout + // for the next poll() to 100ms if we still have data to write. + pollTimeout = -1 + for writeFD := range relayBuffer { + if relayBuffer[writeFD].Len() > 0 { + n, err := unix.Write(writeFD, relayBuffer[writeFD].Bytes()) + if !util.LogIfNotRetryable(err, fmt.Sprintf("unable to write %s data: %v", writeDesc[writeFD], err)) { + return + } + if n > 0 { + relayBuffer[writeFD].Next(n) + } + if closeStdin && writeFD == stdioPipe[unix.Stdin][1] && stdioPipe[unix.Stdin][1] >= 0 && relayBuffer[stdioPipe[unix.Stdin][1]].Len() == 0 { + logrus.Debugf("closing stdin") + unix.Close(stdioPipe[unix.Stdin][1]) + stdioPipe[unix.Stdin][1] = -1 + } + } + if relayBuffer[writeFD].Len() > 0 { + pollTimeout = 100 + } + } + // Remove any descriptors which we don't need to poll any more from the poll descriptor list. + for remove := range removes { + if copyPipes && remove == unix.Stdin { + closeStdin = true + if relayBuffer[stdioPipe[unix.Stdin][1]].Len() == 0 { + logrus.Debugf("closing stdin") + unix.Close(stdioPipe[unix.Stdin][1]) + stdioPipe[unix.Stdin][1] = -1 + } + } + delete(relayMap, remove) + } + // If the we-can-return pipe had anything for us, we're done. + for _, pollFd := range pollFds { + if int(pollFd.Fd) == finishCopy[0] && pollFd.Revents != 0 { + // The pipe is closed, indicating that we can stop now. + return + } + } + } +} + +func runAcceptTerminal(logger *logrus.Logger, consoleListener *net.UnixListener, terminalSize *specs.Box) (int, error) { + defer consoleListener.Close() + c, err := consoleListener.AcceptUnix() + if err != nil { + return -1, errors.Wrapf(err, "error accepting socket descriptor connection") + } + defer c.Close() + // Expect a control message over our new connection. + b := make([]byte, 8192) + oob := make([]byte, 8192) + n, oobn, _, _, err := c.ReadMsgUnix(b, oob) + if err != nil { + return -1, errors.Wrapf(err, "error reading socket descriptor") + } + if n > 0 { + logrus.Debugf("socket descriptor is for %q", string(b[:n])) + } + if oobn > len(oob) { + return -1, errors.Errorf("too much out-of-bounds data (%d bytes)", oobn) + } + // Parse the control message. + scm, err := unix.ParseSocketControlMessage(oob[:oobn]) + if err != nil { + return -1, errors.Wrapf(err, "error parsing out-of-bound data as a socket control message") + } + logrus.Debugf("control messages: %v", scm) + // Expect to get a descriptor. + terminalFD := -1 + for i := range scm { + fds, err := unix.ParseUnixRights(&scm[i]) + if err != nil { + return -1, errors.Wrapf(err, "error parsing unix rights control message: %v", &scm[i]) + } + logrus.Debugf("fds: %v", fds) + if len(fds) == 0 { + continue + } + terminalFD = fds[0] + break + } + if terminalFD == -1 { + return -1, errors.Errorf("unable to read terminal descriptor") + } + // Set the pseudoterminal's size to the configured size, or our own. + winsize := &unix.Winsize{} + if terminalSize != nil { + // Use configured sizes. + winsize.Row = uint16(terminalSize.Height) + winsize.Col = uint16(terminalSize.Width) + } else { + if term.IsTerminal(unix.Stdin) { + // Use the size of our terminal. + if winsize, err = unix.IoctlGetWinsize(unix.Stdin, unix.TIOCGWINSZ); err != nil { + logger.Warnf("error reading size of controlling terminal: %v", err) + winsize.Row = 0 + winsize.Col = 0 + } + } + } + if winsize.Row != 0 && winsize.Col != 0 { + if err = unix.IoctlSetWinsize(terminalFD, unix.TIOCSWINSZ, winsize); err != nil { + logger.Warnf("error setting size of container pseudoterminal: %v", err) + } + // FIXME - if we're connected to a terminal, we should + // be passing the updated terminal size down when we + // receive a SIGWINCH. + } + return terminalFD, nil +} + +// Create pipes to use for relaying stdio. +func runMakeStdioPipe(uid, gid int) ([][]int, error) { + stdioPipe := make([][]int, 3) + for i := range stdioPipe { + stdioPipe[i] = make([]int, 2) + if err := unix.Pipe(stdioPipe[i]); err != nil { + return nil, errors.Wrapf(err, "error creating pipe for container FD %d", i) + } + } + if err := unix.Fchown(stdioPipe[unix.Stdin][0], uid, gid); err != nil { + return nil, errors.Wrapf(err, "error setting owner of stdin pipe descriptor") + } + if err := unix.Fchown(stdioPipe[unix.Stdout][1], uid, gid); err != nil { + return nil, errors.Wrapf(err, "error setting owner of stdout pipe descriptor") + } + if err := unix.Fchown(stdioPipe[unix.Stderr][1], uid, gid); err != nil { + return nil, errors.Wrapf(err, "error setting owner of stderr pipe descriptor") + } + return stdioPipe, nil +} + +func runUsingRuntimeMain() { + var options runUsingRuntimeSubprocOptions + // Set logging. + if level := os.Getenv("LOGLEVEL"); level != "" { + if ll, err := strconv.Atoi(level); err == nil { + logrus.SetLevel(logrus.Level(ll)) + } + } + // Unpack our configuration. + confPipe := os.NewFile(3, "confpipe") + if confPipe == nil { + fmt.Fprintf(os.Stderr, "error reading options pipe\n") + os.Exit(1) + } + defer confPipe.Close() + if err := json.NewDecoder(confPipe).Decode(&options); err != nil { + fmt.Fprintf(os.Stderr, "error decoding options: %v\n", err) + os.Exit(1) + } + // Set ourselves up to read the container's exit status. We're doing this in a child process + // so that we won't mess with the setting in a caller of the library. + if err := setChildProcess(); err != nil { + os.Exit(1) + } + ospec := options.Spec + if ospec == nil { + fmt.Fprintf(os.Stderr, "options spec not specified\n") + os.Exit(1) + } + + // open the pipes used to communicate with the parent process + var containerCreateW *os.File + var containerStartR *os.File + if options.ConfigureNetwork { + containerCreateW = os.NewFile(4, "containercreatepipe") + if containerCreateW == nil { + fmt.Fprintf(os.Stderr, "could not open fd 4\n") + os.Exit(1) + } + containerStartR = os.NewFile(5, "containerstartpipe") + if containerStartR == nil { + fmt.Fprintf(os.Stderr, "could not open fd 5\n") + os.Exit(1) + } + } + + // Run the container, start to finish. + status, err := runUsingRuntime(options.Options, options.ConfigureNetwork, options.MoreCreateArgs, ospec, options.BundlePath, options.ContainerName, containerCreateW, containerStartR) + if err != nil { + fmt.Fprintf(os.Stderr, "error running container: %v\n", err) + os.Exit(1) + } + // Pass the container's exit status back to the caller by exiting with the same status. + if status.Exited() { + os.Exit(status.ExitStatus()) + } else if status.Signaled() { + fmt.Fprintf(os.Stderr, "container exited on %s\n", status.Signal()) + os.Exit(1) + } + os.Exit(1) +} + +func setupNamespaces(logger *logrus.Logger, g *generate.Generator, namespaceOptions define.NamespaceOptions, idmapOptions define.IDMappingOptions, policy define.NetworkConfigurationPolicy) (configureNetwork bool, configureNetworks []string, configureUTS bool, err error) { + // Set namespace options in the container configuration. + configureUserns := false + specifiedNetwork := false + for _, namespaceOption := range namespaceOptions { + switch namespaceOption.Name { + case string(specs.UserNamespace): + configureUserns = false + if !namespaceOption.Host && namespaceOption.Path == "" { + configureUserns = true + } + case string(specs.NetworkNamespace): + specifiedNetwork = true + configureNetwork = false + if !namespaceOption.Host && (namespaceOption.Path == "" || !filepath.IsAbs(namespaceOption.Path)) { + if namespaceOption.Path != "" && !filepath.IsAbs(namespaceOption.Path) { + configureNetworks = strings.Split(namespaceOption.Path, ",") + namespaceOption.Path = "" + } + configureNetwork = (policy != define.NetworkDisabled) + } + case string(specs.UTSNamespace): + configureUTS = false + if !namespaceOption.Host && namespaceOption.Path == "" { + configureUTS = true + } + } + if namespaceOption.Host { + if err := g.RemoveLinuxNamespace(namespaceOption.Name); err != nil { + return false, nil, false, errors.Wrapf(err, "error removing %q namespace for run", namespaceOption.Name) + } + } else if err := g.AddOrReplaceLinuxNamespace(namespaceOption.Name, namespaceOption.Path); err != nil { + if namespaceOption.Path == "" { + return false, nil, false, errors.Wrapf(err, "error adding new %q namespace for run", namespaceOption.Name) + } + return false, nil, false, errors.Wrapf(err, "error adding %q namespace %q for run", namespaceOption.Name, namespaceOption.Path) + } + } + + // If we've got mappings, we're going to have to create a user namespace. + if len(idmapOptions.UIDMap) > 0 || len(idmapOptions.GIDMap) > 0 || configureUserns { + if err := g.AddOrReplaceLinuxNamespace(string(specs.UserNamespace), ""); err != nil { + return false, nil, false, errors.Wrapf(err, "error adding new %q namespace for run", string(specs.UserNamespace)) + } + hostUidmap, hostGidmap, err := unshare.GetHostIDMappings("") + if err != nil { + return false, nil, false, err + } + for _, m := range idmapOptions.UIDMap { + g.AddLinuxUIDMapping(m.HostID, m.ContainerID, m.Size) + } + if len(idmapOptions.UIDMap) == 0 { + for _, m := range hostUidmap { + g.AddLinuxUIDMapping(m.ContainerID, m.ContainerID, m.Size) + } + } + for _, m := range idmapOptions.GIDMap { + g.AddLinuxGIDMapping(m.HostID, m.ContainerID, m.Size) + } + if len(idmapOptions.GIDMap) == 0 { + for _, m := range hostGidmap { + g.AddLinuxGIDMapping(m.ContainerID, m.ContainerID, m.Size) + } + } + if !specifiedNetwork { + if err := g.AddOrReplaceLinuxNamespace(string(specs.NetworkNamespace), ""); err != nil { + return false, nil, false, errors.Wrapf(err, "error adding new %q namespace for run", string(specs.NetworkNamespace)) + } + configureNetwork = (policy != define.NetworkDisabled) + } + } else { + if err := g.RemoveLinuxNamespace(string(specs.UserNamespace)); err != nil { + return false, nil, false, errors.Wrapf(err, "error removing %q namespace for run", string(specs.UserNamespace)) + } + if !specifiedNetwork { + if err := g.RemoveLinuxNamespace(string(specs.NetworkNamespace)); err != nil { + return false, nil, false, errors.Wrapf(err, "error removing %q namespace for run", string(specs.NetworkNamespace)) + } + } + } + if configureNetwork && !unshare.IsRootless() { + for name, val := range define.DefaultNetworkSysctl { + // Check that the sysctl we are adding is actually supported + // by the kernel + p := filepath.Join("/proc/sys", strings.Replace(name, ".", "/", -1)) + _, err := os.Stat(p) + if err != nil && !os.IsNotExist(err) { + return false, nil, false, err + } + if err == nil { + g.AddLinuxSysctl(name, val) + } else { + logger.Warnf("ignoring sysctl %s since %s doesn't exist", name, p) + } + } + } + return configureNetwork, configureNetworks, configureUTS, nil +} + +func (b *Builder) configureNamespaces(g *generate.Generator, options *RunOptions) (bool, []string, error) { + defaultNamespaceOptions, err := DefaultNamespaceOptions() + if err != nil { + return false, nil, err + } + + namespaceOptions := defaultNamespaceOptions + namespaceOptions.AddOrReplace(b.NamespaceOptions...) + namespaceOptions.AddOrReplace(options.NamespaceOptions...) + + networkPolicy := options.ConfigureNetwork + //Nothing was specified explicitly so network policy should be inherited from builder + if networkPolicy == NetworkDefault { + networkPolicy = b.ConfigureNetwork + + // If builder policy was NetworkDisabled and + // we want to disable network for this run. + // reset options.ConfigureNetwork to NetworkDisabled + // since it will be treated as source of truth later. + if networkPolicy == NetworkDisabled { + options.ConfigureNetwork = networkPolicy + } + } + + configureNetwork, configureNetworks, configureUTS, err := setupNamespaces(options.Logger, g, namespaceOptions, b.IDMappingOptions, networkPolicy) + if err != nil { + return false, nil, err + } + + if configureUTS { + if options.Hostname != "" { + g.SetHostname(options.Hostname) + } else if b.Hostname() != "" { + g.SetHostname(b.Hostname()) + } else { + g.SetHostname(stringid.TruncateID(b.ContainerID)) + } + } else { + g.SetHostname("") + } + + found := false + spec := g.Config + for i := range spec.Process.Env { + if strings.HasPrefix(spec.Process.Env[i], "HOSTNAME=") { + found = true + break + } + } + if !found { + spec.Process.Env = append(spec.Process.Env, fmt.Sprintf("HOSTNAME=%s", spec.Hostname)) + } + + return configureNetwork, configureNetworks, nil +} + +func runSetupBoundFiles(bundlePath string, bindFiles map[string]string) (mounts []specs.Mount) { + for dest, src := range bindFiles { + options := []string{"rbind"} + if strings.HasPrefix(src, bundlePath) { + options = append(options, bind.NoBindOption) + } + mounts = append(mounts, specs.Mount{ + Source: src, + Destination: dest, + Type: "bind", + Options: options, + }) + } + return mounts +} + +func addRlimits(ulimit []string, g *generate.Generator, defaultUlimits []string) error { + var ( + ul *units.Ulimit + err error + ) + + ulimit = append(defaultUlimits, ulimit...) + for _, u := range ulimit { + if ul, err = units.ParseUlimit(u); err != nil { + return errors.Wrapf(err, "ulimit option %q requires name=SOFT:HARD, failed to be parsed", u) + } + + g.AddProcessRlimits("RLIMIT_"+strings.ToUpper(ul.Name), uint64(ul.Hard), uint64(ul.Soft)) + } + return nil +} + +func (b *Builder) cleanupTempVolumes() { + for tempVolume, val := range b.TempVolumes { + if val { + if err := overlay.RemoveTemp(tempVolume); err != nil { + b.Logger.Errorf(err.Error()) + } + b.TempVolumes[tempVolume] = false + } + } +} + +func (b *Builder) runSetupVolumeMounts(mountLabel string, volumeMounts []string, optionMounts []specs.Mount, rootUID, rootGID, processUID, processGID int) (mounts []specs.Mount, Err error) { + // Make sure the overlay directory is clean before running + containerDir, err := b.store.ContainerDirectory(b.ContainerID) + if err != nil { + return nil, errors.Wrapf(err, "error looking up container directory for %s", b.ContainerID) + } + if err := overlay.CleanupContent(containerDir); err != nil { + return nil, errors.Wrapf(err, "error cleaning up overlay content for %s", b.ContainerID) + } + + parseMount := func(mountType, host, container string, options []string) (specs.Mount, error) { + var foundrw, foundro, foundz, foundZ, foundO, foundU bool + var rootProp, upperDir, workDir string + for _, opt := range options { + switch opt { + case "rw": + foundrw = true + case "ro": + foundro = true + case "z": + foundz = true + case "Z": + foundZ = true + case "O": + foundO = true + case "U": + foundU = true + case "private", "rprivate", "slave", "rslave", "shared", "rshared": + rootProp = opt + } + + if strings.HasPrefix(opt, "upperdir") { + splitOpt := strings.SplitN(opt, "=", 2) + if len(splitOpt) > 1 { + upperDir = splitOpt[1] + } + } + if strings.HasPrefix(opt, "workdir") { + splitOpt := strings.SplitN(opt, "=", 2) + if len(splitOpt) > 1 { + workDir = splitOpt[1] + } + } + } + if !foundrw && !foundro { + options = append(options, "rw") + } + if foundz { + if err := label.Relabel(host, mountLabel, true); err != nil { + return specs.Mount{}, err + } + } + if foundZ { + if err := label.Relabel(host, mountLabel, false); err != nil { + return specs.Mount{}, err + } + } + if foundU { + if err := chown.ChangeHostPathOwnership(host, true, processUID, processGID); err != nil { + return specs.Mount{}, err + } + } + if foundO { + if (upperDir != "" && workDir == "") || (workDir != "" && upperDir == "") { + return specs.Mount{}, errors.New("if specifying upperdir then workdir must be specified or vice versa") + } + + containerDir, err := b.store.ContainerDirectory(b.ContainerID) + if err != nil { + return specs.Mount{}, err + } + + contentDir, err := overlay.TempDir(containerDir, rootUID, rootGID) + if err != nil { + return specs.Mount{}, errors.Wrapf(err, "failed to create TempDir in the %s directory", containerDir) + } + + overlayOpts := overlay.Options{RootUID: rootUID, + RootGID: rootGID, + UpperDirOptionFragment: upperDir, + WorkDirOptionFragment: workDir, + GraphOpts: b.store.GraphOptions(), + } + + overlayMount, err := overlay.MountWithOptions(contentDir, host, container, &overlayOpts) + if err == nil { + b.TempVolumes[contentDir] = true + } + + // If chown true, add correct ownership to the overlay temp directories. + if foundU { + if err := chown.ChangeHostPathOwnership(contentDir, true, processUID, processGID); err != nil { + return specs.Mount{}, err + } + } + + return overlayMount, err + } + if rootProp == "" { + options = append(options, "private") + } + if mountType != "tmpfs" { + mountType = "bind" + options = append(options, "rbind") + } + return specs.Mount{ + Destination: container, + Type: mountType, + Source: host, + Options: options, + }, nil + } + + // Bind mount volumes specified for this particular Run() invocation + for _, i := range optionMounts { + logrus.Debugf("setting up mounted volume at %q", i.Destination) + mount, err := parseMount(i.Type, i.Source, i.Destination, i.Options) + if err != nil { + return nil, err + } + mounts = append(mounts, mount) + } + // Bind mount volumes given by the user when the container was created + for _, i := range volumeMounts { + var options []string + spliti := parse.SplitStringWithColonEscape(i) + if len(spliti) > 2 { + options = strings.Split(spliti[2], ",") + } + options = append(options, "rbind") + mount, err := parseMount("bind", spliti[0], spliti[1], options) + if err != nil { + return nil, err + } + mounts = append(mounts, mount) + } + return mounts, nil +} + +func setupMaskedPaths(g *generate.Generator) { + for _, mp := range []string{ + "/proc/acpi", + "/proc/kcore", + "/proc/keys", + "/proc/latency_stats", + "/proc/timer_list", + "/proc/timer_stats", + "/proc/sched_debug", + "/proc/scsi", + "/sys/firmware", + "/sys/fs/selinux", + "/sys/dev", + } { + g.AddLinuxMaskedPaths(mp) + } +} + +func setupReadOnlyPaths(g *generate.Generator) { + for _, rp := range []string{ + "/proc/asound", + "/proc/bus", + "/proc/fs", + "/proc/irq", + "/proc/sys", + "/proc/sysrq-trigger", + } { + g.AddLinuxReadonlyPaths(rp) + } +} + +func setupCapAdd(g *generate.Generator, caps ...string) error { + for _, cap := range caps { + if err := g.AddProcessCapabilityBounding(cap); err != nil { + return errors.Wrapf(err, "error adding %q to the bounding capability set", cap) + } + if err := g.AddProcessCapabilityEffective(cap); err != nil { + return errors.Wrapf(err, "error adding %q to the effective capability set", cap) + } + if err := g.AddProcessCapabilityPermitted(cap); err != nil { + return errors.Wrapf(err, "error adding %q to the permitted capability set", cap) + } + if err := g.AddProcessCapabilityAmbient(cap); err != nil { + return errors.Wrapf(err, "error adding %q to the ambient capability set", cap) + } + } + return nil +} + +func setupCapDrop(g *generate.Generator, caps ...string) error { + for _, cap := range caps { + if err := g.DropProcessCapabilityBounding(cap); err != nil { + return errors.Wrapf(err, "error removing %q from the bounding capability set", cap) + } + if err := g.DropProcessCapabilityEffective(cap); err != nil { + return errors.Wrapf(err, "error removing %q from the effective capability set", cap) + } + if err := g.DropProcessCapabilityPermitted(cap); err != nil { + return errors.Wrapf(err, "error removing %q from the permitted capability set", cap) + } + if err := g.DropProcessCapabilityAmbient(cap); err != nil { + return errors.Wrapf(err, "error removing %q from the ambient capability set", cap) + } + } + return nil +} + +func setupCapabilities(g *generate.Generator, defaultCapabilities, adds, drops []string) error { + g.ClearProcessCapabilities() + if err := setupCapAdd(g, defaultCapabilities...); err != nil { + return err + } + for _, c := range adds { + if strings.ToLower(c) == "all" { + adds = capabilities.AllCapabilities() + break + } + } + for _, c := range drops { + if strings.ToLower(c) == "all" { + g.ClearProcessCapabilities() + return nil + } + } + if err := setupCapAdd(g, adds...); err != nil { + return err + } + return setupCapDrop(g, drops...) +} + +// Search for a command that isn't given as an absolute path using the $PATH +// under the rootfs. We can't resolve absolute symbolic links without +// chroot()ing, which we may not be able to do, so just accept a link as a +// valid resolution. +func runLookupPath(g *generate.Generator, command []string) []string { + // Look for the configured $PATH. + spec := g.Config + envPath := "" + for i := range spec.Process.Env { + if strings.HasPrefix(spec.Process.Env[i], "PATH=") { + envPath = spec.Process.Env[i] + } + } + // If there is no configured $PATH, supply one. + if envPath == "" { + defaultPath := "/usr/local/bin:/usr/local/sbin:/usr/bin:/usr/sbin:/bin:/sbin" + envPath = "PATH=" + defaultPath + g.AddProcessEnv("PATH", defaultPath) + } + // No command, nothing to do. + if len(command) == 0 { + return command + } + // Command is already an absolute path, use it as-is. + if filepath.IsAbs(command[0]) { + return command + } + // For each element in the PATH, + for _, pathEntry := range filepath.SplitList(envPath[5:]) { + // if it's the empty string, it's ".", which is the Cwd, + if pathEntry == "" { + pathEntry = spec.Process.Cwd + } + // build the absolute path which it might be, + candidate := filepath.Join(pathEntry, command[0]) + // check if it's there, + if fi, err := os.Lstat(filepath.Join(spec.Root.Path, candidate)); fi != nil && err == nil { + // and if it's not a directory, and either a symlink or executable, + if !fi.IsDir() && ((fi.Mode()&os.ModeSymlink != 0) || (fi.Mode()&0111 != 0)) { + // use that. + return append([]string{candidate}, command[1:]...) + } + } + } + return command +} + +func getDNSIP(dnsServers []string) (dns []net.IP, err error) { + for _, i := range dnsServers { + result := net.ParseIP(i) + if result == nil { + return dns, errors.Errorf("invalid IP address %s", i) + } + dns = append(dns, result) + } + return dns, nil +} + +func (b *Builder) configureUIDGID(g *generate.Generator, mountPoint string, options RunOptions) (string, error) { + // Set the user UID/GID/supplemental group list/capabilities lists. + user, homeDir, err := b.userForRun(mountPoint, options.User) + if err != nil { + return "", err + } + if err := setupCapabilities(g, b.Capabilities, options.AddCapabilities, options.DropCapabilities); err != nil { + return "", err + } + g.SetProcessUID(user.UID) + g.SetProcessGID(user.GID) + for _, gid := range user.AdditionalGids { + g.AddProcessAdditionalGid(gid) + } + + // Remove capabilities if not running as root except Bounding set + if user.UID != 0 { + bounding := g.Config.Process.Capabilities.Bounding + g.ClearProcessCapabilities() + g.Config.Process.Capabilities.Bounding = bounding + } + + return homeDir, nil +} + +func (b *Builder) configureEnvironment(g *generate.Generator, options RunOptions, defaultEnv []string) { + g.ClearProcessEnv() + + if b.CommonBuildOpts.HTTPProxy { + for _, envSpec := range config.ProxyEnv { + if envVal, ok := os.LookupEnv(envSpec); ok { + g.AddProcessEnv(envSpec, envVal) + } + } + } + + for _, envSpec := range util.MergeEnv(util.MergeEnv(defaultEnv, b.Env()), options.Env) { + env := strings.SplitN(envSpec, "=", 2) + if len(env) > 1 { + g.AddProcessEnv(env[0], env[1]) + } + } +} + +func setupRootlessSpecChanges(spec *specs.Spec, bundleDir string, shmSize string) error { + emptyDir := filepath.Join(bundleDir, "empty") + if err := os.Mkdir(emptyDir, 0); err != nil { + return err + } + + // If the container has a network namespace, we can create a fresh /sys mount + for _, ns := range spec.Linux.Namespaces { + if ns.Type == specs.NetworkNamespace { + return nil + } + } + + // Replace /sys with a read-only bind mount. + mounts := []specs.Mount{ + { + Source: "/dev", + Destination: "/dev", + Type: "tmpfs", + Options: []string{"private", "strictatime", "noexec", "nosuid", "mode=755", "size=65536k"}, + }, + { + Source: "mqueue", + Destination: "/dev/mqueue", + Type: "mqueue", + Options: []string{"private", "nodev", "noexec", "nosuid"}, + }, + { + Source: "pts", + Destination: "/dev/pts", + Type: "devpts", + Options: []string{"private", "noexec", "nosuid", "newinstance", "ptmxmode=0666", "mode=0620"}, + }, + { + Source: "shm", + Destination: "/dev/shm", + Type: "tmpfs", + Options: []string{"private", "nodev", "noexec", "nosuid", "mode=1777", fmt.Sprintf("size=%s", shmSize)}, + }, + { + Source: "/proc", + Destination: "/proc", + Type: "proc", + Options: []string{"private", "nodev", "noexec", "nosuid"}, + }, + { + Source: "/sys", + Destination: "/sys", + Type: "bind", + Options: []string{bind.NoBindOption, "rbind", "private", "nodev", "noexec", "nosuid", "ro"}, + }, + } + + cgroup2, err := cgroups.IsCgroup2UnifiedMode() + if err != nil { + return err + } + if cgroup2 { + hasCgroupNs := false + for _, ns := range spec.Linux.Namespaces { + if ns.Type == specs.CgroupNamespace { + hasCgroupNs = true + break + } + } + if hasCgroupNs { + mounts = append(mounts, specs.Mount{ + Destination: "/sys/fs/cgroup", + Type: "cgroup", + Source: "cgroup", + Options: []string{"private", "rw"}, + }) + } + } else { + spec.Linux.Resources = nil + // Cover up /sys/fs/cgroup, if it exist in our source for /sys. + if _, err := os.Stat("/sys/fs/cgroup"); err == nil { + spec.Linux.MaskedPaths = append(spec.Linux.MaskedPaths, "/sys/fs/cgroup") + } + } + // Keep anything that isn't under /dev, /proc, or /sys. + for i := range spec.Mounts { + if spec.Mounts[i].Destination == "/dev" || strings.HasPrefix(spec.Mounts[i].Destination, "/dev/") || + spec.Mounts[i].Destination == "/proc" || strings.HasPrefix(spec.Mounts[i].Destination, "/proc/") || + spec.Mounts[i].Destination == "/sys" || strings.HasPrefix(spec.Mounts[i].Destination, "/sys/") { + continue + } + mounts = append(mounts, spec.Mounts[i]) + } + spec.Mounts = mounts + return nil +} + +func (b *Builder) runUsingRuntimeSubproc(isolation define.Isolation, options RunOptions, configureNetwork bool, configureNetworks, moreCreateArgs []string, spec *specs.Spec, rootPath, bundlePath, containerName string) (err error) { + var confwg sync.WaitGroup + config, conferr := json.Marshal(runUsingRuntimeSubprocOptions{ + Options: options, + Spec: spec, + RootPath: rootPath, + BundlePath: bundlePath, + ConfigureNetwork: configureNetwork, + MoreCreateArgs: moreCreateArgs, + ContainerName: containerName, + Isolation: isolation, + }) + if conferr != nil { + return errors.Wrapf(conferr, "error encoding configuration for %q", runUsingRuntimeCommand) + } + cmd := reexec.Command(runUsingRuntimeCommand) + cmd.Dir = bundlePath + cmd.Stdin = options.Stdin + if cmd.Stdin == nil { + cmd.Stdin = os.Stdin + } + cmd.Stdout = options.Stdout + if cmd.Stdout == nil { + cmd.Stdout = os.Stdout + } + cmd.Stderr = options.Stderr + if cmd.Stderr == nil { + cmd.Stderr = os.Stderr + } + cmd.Env = util.MergeEnv(os.Environ(), []string{fmt.Sprintf("LOGLEVEL=%d", logrus.GetLevel())}) + preader, pwriter, err := os.Pipe() + if err != nil { + return errors.Wrapf(err, "error creating configuration pipe") + } + confwg.Add(1) + go func() { + _, conferr = io.Copy(pwriter, bytes.NewReader(config)) + if conferr != nil { + conferr = errors.Wrapf(conferr, "error while copying configuration down pipe to child process") + } + confwg.Done() + }() + + // create network configuration pipes + var containerCreateR, containerCreateW *os.File + var containerStartR, containerStartW *os.File + if configureNetwork { + containerCreateR, containerCreateW, err = os.Pipe() + if err != nil { + return errors.Wrapf(err, "error creating container create pipe") + } + defer containerCreateR.Close() + defer containerCreateW.Close() + + containerStartR, containerStartW, err = os.Pipe() + if err != nil { + return errors.Wrapf(err, "error creating container create pipe") + } + defer containerStartR.Close() + defer containerStartW.Close() + cmd.ExtraFiles = []*os.File{containerCreateW, containerStartR} + } + + cmd.ExtraFiles = append([]*os.File{preader}, cmd.ExtraFiles...) + defer preader.Close() + defer pwriter.Close() + if err := cmd.Start(); err != nil { + return errors.Wrapf(err, "error while starting runtime") + } + + if configureNetwork { + if err := waitForSync(containerCreateR); err != nil { + // we do not want to return here since we want to capture the exit code from the child via cmd.Wait() + // close the pipes here so that the child will not hang forever + containerCreateR.Close() + containerStartW.Close() + logrus.Errorf("did not get container create message from subprocess: %v", err) + } else { + pidFile := filepath.Join(bundlePath, "pid") + pidValue, err := ioutil.ReadFile(pidFile) + if err != nil { + return err + } + pid, err := strconv.Atoi(strings.TrimSpace(string(pidValue))) + if err != nil { + return errors.Wrapf(err, "error parsing pid %s as a number", string(pidValue)) + } + + teardown, err := b.runConfigureNetwork(pid, isolation, options, configureNetworks, containerName) + if teardown != nil { + defer teardown() + } + if err != nil { + return err + } + + logrus.Debug("network namespace successfully setup, send start message to child") + _, err = containerStartW.Write([]byte{1}) + if err != nil { + return err + } + } + } + if err := cmd.Wait(); err != nil { + return errors.Wrapf(err, "error while running runtime") + } + confwg.Wait() + if err == nil { + return conferr + } + if conferr != nil { + logrus.Debugf("%v", conferr) + } + return err +} + +// waitForSync waits for a maximum of 5 seconds to read something from the file +func waitForSync(pipeR *os.File) error { + if err := pipeR.SetDeadline(time.Now().Add(5 * time.Second)); err != nil { + return err + } + b := make([]byte, 16) + _, err := pipeR.Read(b) + return err +} + +func checkAndOverrideIsolationOptions(isolation define.Isolation, options *RunOptions) error { + switch isolation { + case IsolationOCIRootless: + if ns := options.NamespaceOptions.Find(string(specs.IPCNamespace)); ns == nil || ns.Host { + logrus.Debugf("Forcing use of an IPC namespace.") + } + options.NamespaceOptions.AddOrReplace(define.NamespaceOption{Name: string(specs.IPCNamespace)}) + _, err := exec.LookPath("slirp4netns") + hostNetworking := err != nil + networkNamespacePath := "" + if ns := options.NamespaceOptions.Find(string(specs.NetworkNamespace)); ns != nil { + hostNetworking = ns.Host + networkNamespacePath = ns.Path + if hostNetworking { + networkNamespacePath = "" + } + } + options.NamespaceOptions.AddOrReplace(define.NamespaceOption{ + Name: string(specs.NetworkNamespace), + Host: hostNetworking, + Path: networkNamespacePath, + }) + if ns := options.NamespaceOptions.Find(string(specs.PIDNamespace)); ns == nil || ns.Host { + logrus.Debugf("Forcing use of a PID namespace.") + } + options.NamespaceOptions.AddOrReplace(define.NamespaceOption{Name: string(specs.PIDNamespace), Host: false}) + if ns := options.NamespaceOptions.Find(string(specs.UserNamespace)); ns == nil || ns.Host { + logrus.Debugf("Forcing use of a user namespace.") + } + options.NamespaceOptions.AddOrReplace(define.NamespaceOption{Name: string(specs.UserNamespace)}) + case IsolationOCI: + pidns := options.NamespaceOptions.Find(string(specs.PIDNamespace)) + userns := options.NamespaceOptions.Find(string(specs.UserNamespace)) + if (pidns != nil && pidns.Host) && (userns != nil && !userns.Host) { + return errors.Errorf("not allowed to mix host PID namespace with container user namespace") + } + } + return nil +} + +// DefaultNamespaceOptions returns the default namespace settings from the +// runtime-tools generator library. +func DefaultNamespaceOptions() (define.NamespaceOptions, error) { + cfg, err := config.Default() + if err != nil { + return nil, errors.Wrapf(err, "failed to get container config") + } + options := define.NamespaceOptions{ + {Name: string(specs.CgroupNamespace), Host: cfg.CgroupNS() == "host"}, + {Name: string(specs.IPCNamespace), Host: cfg.IPCNS() == "host"}, + {Name: string(specs.MountNamespace), Host: true}, + {Name: string(specs.NetworkNamespace), Host: cfg.NetNS() == "host" || cfg.NetNS() == "container"}, + {Name: string(specs.PIDNamespace), Host: cfg.PidNS() == "host"}, + {Name: string(specs.UserNamespace), Host: true}, + {Name: string(specs.UTSNamespace), Host: cfg.UTSNS() == "host"}, + } + g, err := generate.New("linux") + if err != nil { + return options, errors.Wrapf(err, "error generating new 'linux' runtime spec") + } + spec := g.Config + if spec.Linux != nil { + for _, ns := range spec.Linux.Namespaces { + options.AddOrReplace(define.NamespaceOption{ + Name: string(ns.Type), + Path: ns.Path, + }) + } + } + return options, nil +} + +func contains(volumes []string, v string) bool { + for _, i := range volumes { + if i == v { + return true + } + } + return false +} + +type runUsingRuntimeSubprocOptions struct { + Options RunOptions + Spec *specs.Spec + RootPath string + BundlePath string + ConfigureNetwork bool + MoreCreateArgs []string + ContainerName string + Isolation define.Isolation +} + +func init() { + reexec.Register(runUsingRuntimeCommand, runUsingRuntimeMain) +} + +// runSetupRunMounts sets up mounts that exist only in this RUN, not in subsequent runs +func (b *Builder) runSetupRunMounts(context *imagetypes.SystemContext, mounts []string, secrets map[string]define.Secret, stageMountPoints map[string]internal.StageMountDetails, sshSources map[string]*sshagent.Source, containerWorkingDir string, contextDir string, uidmap []spec.LinuxIDMapping, gidmap []spec.LinuxIDMapping, rootUID int, rootGID int, processUID int, processGID int) ([]spec.Mount, *runMountArtifacts, error) { + mountTargets := make([]string, 0, 10) + tmpFiles := make([]string, 0, len(mounts)) + mountImages := make([]string, 0, 10) + finalMounts := make([]specs.Mount, 0, len(mounts)) + agents := make([]*sshagent.AgentServer, 0, len(mounts)) + sshCount := 0 + defaultSSHSock := "" + tokens := []string{} + for _, mount := range mounts { + arr := strings.SplitN(mount, ",", 2) + + kv := strings.Split(arr[0], "=") + if len(kv) != 2 || kv[0] != "type" { + return nil, nil, errors.New("invalid mount type") + } + if len(arr) == 2 { + tokens = strings.Split(arr[1], ",") + } + // For now, we only support type secret. + switch kv[1] { + case "secret": + mount, envFile, err := getSecretMount(tokens, secrets, b.MountLabel, containerWorkingDir, uidmap, gidmap) + if err != nil { + return nil, nil, err + } + if mount != nil { + finalMounts = append(finalMounts, *mount) + mountTargets = append(mountTargets, mount.Destination) + if envFile != "" { + tmpFiles = append(tmpFiles, envFile) + } + } + case "ssh": + mount, agent, err := b.getSSHMount(tokens, sshCount, sshSources, b.MountLabel, uidmap, gidmap, b.ProcessLabel) + if err != nil { + return nil, nil, err + } + if mount != nil { + finalMounts = append(finalMounts, *mount) + mountTargets = append(mountTargets, mount.Destination) + agents = append(agents, agent) + if sshCount == 0 { + defaultSSHSock = mount.Destination + } + // Count is needed as the default destination of the ssh sock inside the container is /run/buildkit/ssh_agent.{i} + sshCount++ + } + case "bind": + mount, image, err := b.getBindMount(context, tokens, contextDir, rootUID, rootGID, processUID, processGID, stageMountPoints) + if err != nil { + return nil, nil, err + } + finalMounts = append(finalMounts, *mount) + mountTargets = append(mountTargets, mount.Destination) + // only perform cleanup if image was mounted ignore everything else + if image != "" { + mountImages = append(mountImages, image) + } + case "tmpfs": + mount, err := b.getTmpfsMount(tokens, rootUID, rootGID, processUID, processGID) + if err != nil { + return nil, nil, err + } + finalMounts = append(finalMounts, *mount) + mountTargets = append(mountTargets, mount.Destination) + case "cache": + mount, err := b.getCacheMount(tokens, rootUID, rootGID, processUID, processGID, stageMountPoints) + if err != nil { + return nil, nil, err + } + finalMounts = append(finalMounts, *mount) + mountTargets = append(mountTargets, mount.Destination) + default: + return nil, nil, errors.Errorf("invalid mount type %q", kv[1]) + } + } + artifacts := &runMountArtifacts{ + RunMountTargets: mountTargets, + TmpFiles: tmpFiles, + Agents: agents, + MountedImages: mountImages, + SSHAuthSock: defaultSSHSock, + } + return finalMounts, artifacts, nil +} + +func (b *Builder) getBindMount(context *imagetypes.SystemContext, tokens []string, contextDir string, rootUID, rootGID, processUID, processGID int, stageMountPoints map[string]internal.StageMountDetails) (*spec.Mount, string, error) { + if contextDir == "" { + return nil, "", errors.New("Context Directory for current run invocation is not configured") + } + var optionMounts []specs.Mount + mount, image, err := internalParse.GetBindMount(context, tokens, contextDir, b.store, b.MountLabel, stageMountPoints) + if err != nil { + return nil, image, err + } + optionMounts = append(optionMounts, mount) + volumes, err := b.runSetupVolumeMounts(b.MountLabel, nil, optionMounts, rootUID, rootGID, processUID, processGID) + if err != nil { + return nil, image, err + } + return &volumes[0], image, nil +} + +func (b *Builder) getTmpfsMount(tokens []string, rootUID, rootGID, processUID, processGID int) (*spec.Mount, error) { + var optionMounts []specs.Mount + mount, err := internalParse.GetTmpfsMount(tokens) + if err != nil { + return nil, err + } + optionMounts = append(optionMounts, mount) + volumes, err := b.runSetupVolumeMounts(b.MountLabel, nil, optionMounts, rootUID, rootGID, processUID, processGID) + if err != nil { + return nil, err + } + return &volumes[0], nil +} + +func (b *Builder) getCacheMount(tokens []string, rootUID, rootGID, processUID, processGID int, stageMountPoints map[string]internal.StageMountDetails) (*spec.Mount, error) { + var optionMounts []specs.Mount + mount, err := internalParse.GetCacheMount(tokens, b.store, b.MountLabel, stageMountPoints) + if err != nil { + return nil, err + } + optionMounts = append(optionMounts, mount) + volumes, err := b.runSetupVolumeMounts(b.MountLabel, nil, optionMounts, rootUID, rootGID, processUID, processGID) + if err != nil { + return nil, err + } + return &volumes[0], nil +} + +func getSecretMount(tokens []string, secrets map[string]define.Secret, mountlabel string, containerWorkingDir string, uidmap []spec.LinuxIDMapping, gidmap []spec.LinuxIDMapping) (*spec.Mount, string, error) { + errInvalidSyntax := errors.New("secret should have syntax id=id[,target=path,required=bool,mode=uint,uid=uint,gid=uint") + if len(tokens) == 0 { + return nil, "", errInvalidSyntax + } + var err error + var id, target string + var required bool + var uid, gid uint32 + var mode uint32 = 0400 + for _, val := range tokens { + kv := strings.SplitN(val, "=", 2) + switch kv[0] { + case "id": + id = kv[1] + case "target", "dst", "destination": + target = kv[1] + case "required": + required, err = strconv.ParseBool(kv[1]) + if err != nil { + return nil, "", errInvalidSyntax + } + case "mode": + mode64, err := strconv.ParseUint(kv[1], 8, 32) + if err != nil { + return nil, "", errInvalidSyntax + } + mode = uint32(mode64) + case "uid": + uid64, err := strconv.ParseUint(kv[1], 10, 32) + if err != nil { + return nil, "", errInvalidSyntax + } + uid = uint32(uid64) + case "gid": + gid64, err := strconv.ParseUint(kv[1], 10, 32) + if err != nil { + return nil, "", errInvalidSyntax + } + gid = uint32(gid64) + default: + return nil, "", errInvalidSyntax + } + } + + if id == "" { + return nil, "", errInvalidSyntax + } + // Default location for secretis is /run/secrets/id + if target == "" { + target = "/run/secrets/" + id + } + + secr, ok := secrets[id] + if !ok { + if required { + return nil, "", errors.Errorf("secret required but no secret with id %s found", id) + } + return nil, "", nil + } + var data []byte + var envFile string + var ctrFileOnHost string + + switch secr.SourceType { + case "env": + data = []byte(os.Getenv(secr.Source)) + tmpFile, err := ioutil.TempFile("/dev/shm", "buildah*") + if err != nil { + return nil, "", err + } + envFile = tmpFile.Name() + ctrFileOnHost = tmpFile.Name() + case "file": + data, err = ioutil.ReadFile(secr.Source) + if err != nil { + return nil, "", err + } + ctrFileOnHost = filepath.Join(containerWorkingDir, "secrets", id) + _, err = os.Stat(ctrFileOnHost) + if !os.IsNotExist(err) { + return nil, "", err + } + default: + return nil, "", errors.New("invalid source secret type") + } + + // Copy secrets to container working dir (or tmp dir if it's an env), since we need to chmod, + // chown and relabel it for the container user and we don't want to mess with the original file + if err := os.MkdirAll(filepath.Dir(ctrFileOnHost), 0755); err != nil { + return nil, "", err + } + if err := ioutil.WriteFile(ctrFileOnHost, data, 0644); err != nil { + return nil, "", err + } + + if err := label.Relabel(ctrFileOnHost, mountlabel, false); err != nil { + return nil, "", err + } + hostUID, hostGID, err := util.GetHostIDs(uidmap, gidmap, uid, gid) + if err != nil { + return nil, "", err + } + if err := os.Lchown(ctrFileOnHost, int(hostUID), int(hostGID)); err != nil { + return nil, "", err + } + if err := os.Chmod(ctrFileOnHost, os.FileMode(mode)); err != nil { + return nil, "", err + } + newMount := specs.Mount{ + Destination: target, + Type: "bind", + Source: ctrFileOnHost, + Options: []string{"bind", "rprivate", "ro"}, + } + return &newMount, envFile, nil +} + +// getSSHMount parses the --mount type=ssh flag in the Containerfile, checks if there's an ssh source provided, and creates and starts an ssh-agent to be forwarded into the container +func (b *Builder) getSSHMount(tokens []string, count int, sshsources map[string]*sshagent.Source, mountlabel string, uidmap []spec.LinuxIDMapping, gidmap []spec.LinuxIDMapping, processLabel string) (*spec.Mount, *sshagent.AgentServer, error) { + errInvalidSyntax := errors.New("ssh should have syntax id=id[,target=path,required=bool,mode=uint,uid=uint,gid=uint") + + var err error + var id, target string + var required bool + var uid, gid uint32 + var mode uint32 = 400 + for _, val := range tokens { + kv := strings.SplitN(val, "=", 2) + if len(kv) < 2 { + return nil, nil, errInvalidSyntax + } + switch kv[0] { + case "id": + id = kv[1] + case "target", "dst", "destination": + target = kv[1] + case "required": + required, err = strconv.ParseBool(kv[1]) + if err != nil { + return nil, nil, errInvalidSyntax + } + case "mode": + mode64, err := strconv.ParseUint(kv[1], 8, 32) + if err != nil { + return nil, nil, errInvalidSyntax + } + mode = uint32(mode64) + case "uid": + uid64, err := strconv.ParseUint(kv[1], 10, 32) + if err != nil { + return nil, nil, errInvalidSyntax + } + uid = uint32(uid64) + case "gid": + gid64, err := strconv.ParseUint(kv[1], 10, 32) + if err != nil { + return nil, nil, errInvalidSyntax + } + gid = uint32(gid64) + default: + return nil, nil, errInvalidSyntax + } + } + + if id == "" { + id = "default" + } + // Default location for secretis is /run/buildkit/ssh_agent.{i} + if target == "" { + target = fmt.Sprintf("/run/buildkit/ssh_agent.%d", count) + } + + sshsource, ok := sshsources[id] + if !ok { + if required { + return nil, nil, errors.Errorf("ssh required but no ssh with id %s found", id) + } + return nil, nil, nil + } + // Create new agent from keys or socket + fwdAgent, err := sshagent.NewAgentServer(sshsource) + if err != nil { + return nil, nil, err + } + // Start ssh server, and get the host sock we're mounting in the container + hostSock, err := fwdAgent.Serve(processLabel) + if err != nil { + return nil, nil, err + } + + if err := label.Relabel(filepath.Dir(hostSock), mountlabel, false); err != nil { + if shutdownErr := fwdAgent.Shutdown(); shutdownErr != nil { + b.Logger.Errorf("error shutting down agent: %v", shutdownErr) + } + return nil, nil, err + } + if err := label.Relabel(hostSock, mountlabel, false); err != nil { + if shutdownErr := fwdAgent.Shutdown(); shutdownErr != nil { + b.Logger.Errorf("error shutting down agent: %v", shutdownErr) + } + return nil, nil, err + } + + hostUID, hostGID, err := util.GetHostIDs(uidmap, gidmap, uid, gid) + if err != nil { + if shutdownErr := fwdAgent.Shutdown(); shutdownErr != nil { + b.Logger.Errorf("error shutting down agent: %v", shutdownErr) + } + return nil, nil, err + } + if err := os.Lchown(hostSock, int(hostUID), int(hostGID)); err != nil { + if shutdownErr := fwdAgent.Shutdown(); shutdownErr != nil { + b.Logger.Errorf("error shutting down agent: %v", shutdownErr) + } + return nil, nil, err + } + if err := os.Chmod(hostSock, os.FileMode(mode)); err != nil { + if shutdownErr := fwdAgent.Shutdown(); shutdownErr != nil { + b.Logger.Errorf("error shutting down agent: %v", shutdownErr) + } + return nil, nil, err + } + newMount := specs.Mount{ + Destination: target, + Type: "bind", + Source: hostSock, + Options: []string{"bind", "rprivate", "ro"}, + } + return &newMount, fwdAgent, nil +} + +// cleanupRunMounts cleans up run mounts so they only appear in this run. +func (b *Builder) cleanupRunMounts(context *imagetypes.SystemContext, mountpoint string, artifacts *runMountArtifacts) error { + for _, agent := range artifacts.Agents { + err := agent.Shutdown() + if err != nil { + return err + } + } + + //cleanup any mounted images for this run + for _, image := range artifacts.MountedImages { + if image != "" { + // if flow hits here some image was mounted for this run + i, err := internalUtil.LookupImage(context, b.store, image) + if err == nil { + // silently try to unmount and do nothing + // if image is being used by something else + _ = i.Unmount(false) + } + if errors.Cause(err) == storagetypes.ErrImageUnknown { + // Ignore only if ErrImageUnknown + // Reason: Image is already unmounted do nothing + continue + } + return err + } + } + + opts := copier.RemoveOptions{ + All: true, + } + for _, path := range artifacts.RunMountTargets { + err := copier.Remove(mountpoint, path, opts) + if err != nil { + return err + } + } + var prevErr error + for _, path := range artifacts.TmpFiles { + err := os.Remove(path) + if !os.IsNotExist(err) { + if prevErr != nil { + logrus.Error(prevErr) + } + prevErr = err + } + } + return prevErr +} + +// getNetworkInterface creates the network interface +func getNetworkInterface(store storage.Store, cniConfDir, cniPluginPath string) (nettypes.ContainerNetwork, error) { + conf, err := config.Default() + if err != nil { + return nil, err + } + // copy the config to not modify the default by accident + newconf := *conf + if len(cniConfDir) > 0 { + newconf.Network.NetworkConfigDir = cniConfDir + } + if len(cniPluginPath) > 0 { + plugins := strings.Split(cniPluginPath, string(os.PathListSeparator)) + newconf.Network.CNIPluginDirs = plugins + } + + _, netInt, err := network.NetworkBackend(store, &newconf, false) + if err != nil { + return nil, err + } + return netInt, nil +} diff --git a/vendor/github.com/containers/buildah/run_unix.go b/vendor/github.com/containers/buildah/run_unix.go new file mode 100644 index 00000000000..9e62691e854 --- /dev/null +++ b/vendor/github.com/containers/buildah/run_unix.go @@ -0,0 +1,31 @@ +// +build darwin + +package buildah + +import ( + "github.com/containers/buildah/define" + nettypes "github.com/containers/common/libnetwork/types" + "github.com/containers/storage" + "github.com/pkg/errors" +) + +// ContainerDevices is an alias for a slice of github.com/opencontainers/runc/libcontainer/configs.Device structures. +type ContainerDevices define.ContainerDevices + +func setChildProcess() error { + return errors.New("function not supported on non-linux systems") +} + +func runUsingRuntimeMain() {} + +func (b *Builder) Run(command []string, options RunOptions) error { + return errors.New("function not supported on non-linux systems") +} +func DefaultNamespaceOptions() (NamespaceOptions, error) { + return NamespaceOptions{}, errors.New("function not supported on non-linux systems") +} + +// getNetworkInterface creates the network interface +func getNetworkInterface(store storage.Store, cniConfDir, cniPluginPath string) (nettypes.ContainerNetwork, error) { + return nil, errors.New("function not supported on non-linux systems") +} diff --git a/vendor/github.com/containers/buildah/run_unsupported.go b/vendor/github.com/containers/buildah/run_unsupported.go new file mode 100644 index 00000000000..f0640ffe2ab --- /dev/null +++ b/vendor/github.com/containers/buildah/run_unsupported.go @@ -0,0 +1,27 @@ +// +build !linux,!darwin + +package buildah + +import ( + nettypes "github.com/containers/common/libnetwork/types" + "github.com/containers/storage" + "github.com/pkg/errors" +) + +func setChildProcess() error { + return errors.New("function not supported on non-linux systems") +} + +func runUsingRuntimeMain() {} + +func (b *Builder) Run(command []string, options RunOptions) error { + return errors.New("function not supported on non-linux systems") +} +func DefaultNamespaceOptions() (NamespaceOptions, error) { + return NamespaceOptions{}, errors.New("function not supported on non-linux systems") +} + +// getNetworkInterface creates the network interface +func getNetworkInterface(store storage.Store, cniConfDir, cniPluginPath string) (nettypes.ContainerNetwork, error) { + return nil, errors.New("function not supported on non-linux systems") +} diff --git a/vendor/github.com/containers/buildah/seccomp.go b/vendor/github.com/containers/buildah/seccomp.go new file mode 100644 index 00000000000..fc7811098d8 --- /dev/null +++ b/vendor/github.com/containers/buildah/seccomp.go @@ -0,0 +1,35 @@ +// +build seccomp,linux + +package buildah + +import ( + "io/ioutil" + + "github.com/containers/common/pkg/seccomp" + "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" +) + +func setupSeccomp(spec *specs.Spec, seccompProfilePath string) error { + switch seccompProfilePath { + case "unconfined": + spec.Linux.Seccomp = nil + case "": + seccompConfig, err := seccomp.GetDefaultProfile(spec) + if err != nil { + return errors.Wrapf(err, "loading default seccomp profile failed") + } + spec.Linux.Seccomp = seccompConfig + default: + seccompProfile, err := ioutil.ReadFile(seccompProfilePath) + if err != nil { + return errors.Wrapf(err, "opening seccomp profile (%s) failed", seccompProfilePath) + } + seccompConfig, err := seccomp.LoadProfile(string(seccompProfile), spec) + if err != nil { + return errors.Wrapf(err, "loading seccomp profile (%s) failed", seccompProfilePath) + } + spec.Linux.Seccomp = seccompConfig + } + return nil +} diff --git a/vendor/github.com/containers/buildah/seccomp_unsupported.go b/vendor/github.com/containers/buildah/seccomp_unsupported.go new file mode 100644 index 00000000000..cba8390c5a6 --- /dev/null +++ b/vendor/github.com/containers/buildah/seccomp_unsupported.go @@ -0,0 +1,15 @@ +// +build !seccomp !linux + +package buildah + +import ( + "github.com/opencontainers/runtime-spec/specs-go" +) + +func setupSeccomp(spec *specs.Spec, seccompProfilePath string) error { + if spec.Linux != nil { + // runtime-tools may have supplied us with a default filter + spec.Linux.Seccomp = nil + } + return nil +} diff --git a/vendor/github.com/containers/buildah/selinux.go b/vendor/github.com/containers/buildah/selinux.go new file mode 100644 index 00000000000..e7e9fd8c27e --- /dev/null +++ b/vendor/github.com/containers/buildah/selinux.go @@ -0,0 +1,41 @@ +// +build linux + +package buildah + +import ( + "fmt" + + "github.com/opencontainers/runtime-tools/generate" + selinux "github.com/opencontainers/selinux/go-selinux" + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/pkg/errors" +) + +func selinuxGetEnabled() bool { + return selinux.GetEnabled() +} + +func setupSelinux(g *generate.Generator, processLabel, mountLabel string) { + if processLabel != "" && selinux.GetEnabled() { + g.SetProcessSelinuxLabel(processLabel) + g.SetLinuxMountLabel(mountLabel) + } +} + +func runLabelStdioPipes(stdioPipe [][]int, processLabel, mountLabel string) error { + if !selinuxGetEnabled() || processLabel == "" || mountLabel == "" { + // SELinux is completely disabled, or we're not doing anything at all with labeling + return nil + } + pipeContext, err := selinux.ComputeCreateContext(processLabel, mountLabel, "fifo_file") + if err != nil { + return errors.Wrapf(err, "computing file creation context for pipes") + } + for i := range stdioPipe { + pipeFdName := fmt.Sprintf("/proc/self/fd/%d", stdioPipe[i][0]) + if err := label.Relabel(pipeFdName, pipeContext, false); err != nil { + return errors.Wrapf(err, "setting file label on %q", pipeFdName) + } + } + return nil +} diff --git a/vendor/github.com/containers/buildah/selinux_tag.sh b/vendor/github.com/containers/buildah/selinux_tag.sh new file mode 100644 index 00000000000..993630ad621 --- /dev/null +++ b/vendor/github.com/containers/buildah/selinux_tag.sh @@ -0,0 +1,4 @@ +#!/usr/bin/env bash +if pkg-config libselinux 2> /dev/null ; then + echo selinux +fi diff --git a/vendor/github.com/containers/buildah/selinux_unsupported.go b/vendor/github.com/containers/buildah/selinux_unsupported.go new file mode 100644 index 00000000000..5b1948315a4 --- /dev/null +++ b/vendor/github.com/containers/buildah/selinux_unsupported.go @@ -0,0 +1,18 @@ +// +build !linux + +package buildah + +import ( + "github.com/opencontainers/runtime-tools/generate" +) + +func selinuxGetEnabled() bool { + return false +} + +func setupSelinux(g *generate.Generator, processLabel, mountLabel string) { +} + +func runLabelStdioPipes(stdioPipe [][]int, processLabel, mountLabel string) error { + return nil +} diff --git a/vendor/github.com/containers/buildah/troubleshooting.md b/vendor/github.com/containers/buildah/troubleshooting.md new file mode 100644 index 00000000000..1299b354f58 --- /dev/null +++ b/vendor/github.com/containers/buildah/troubleshooting.md @@ -0,0 +1,158 @@ +![buildah logo](https://cdn.rawgit.com/containers/buildah/main/logos/buildah-logo_large.png) + +# Troubleshooting + +## A list of common issues and solutions for Buildah + +--- +### 1) No such image + +When doing a `buildah pull` or `buildah build` command and a "common" image can not be pulled, +it is likely that the `/etc/containers/registries.conf` file is either not installed or possibly +misconfigured. This issue might also indicate that other required files as listed in the +[Configuration Files](https://github.com/containers/buildah/blob/main/install.md#configuration-files) +section of the Installation Instructions are also not installed. + +#### Symptom +```console +$ sudo buildah build -f Dockerfile . +STEP 1: FROM alpine +error creating build container: 2 errors occurred: + +* Error determining manifest MIME type for docker://localhost/alpine:latest: pinging docker registry returned: Get https://localhost/v2/: dial tcp [::1]:443: connect: connection refused +* Error determining manifest MIME type for docker://registry.access.redhat.com/alpine:latest: Error reading manifest latest in registry.access.redhat.com/alpine: unknown: Not Found +error building: error creating build container: no such image "alpine" in registry: image not known +``` + +#### Solution + + * Verify that the `/etc/containers/registries.conf` file exists. If not, verify that the containers-common package is installed. + * Verify that the entries in the `[registries.search]` section of the /etc/containers/registries file are valid and reachable. + * Verify that the image you requested is either fully qualified, or that it exists on one of your search registries. + * Verify that the image is public or that you have logged in to at least one search registry which contains the private image. + * Verify that the other required [Configuration Files](https://github.com/containers/buildah/blob/main/install.md#configuration-files) are installed. + +--- +### 2) http: server gave HTTP response to HTTPS client + +When doing a Buildah command such as `build`, `commit`, `from`, or `push` to a registry, +tls verification is turned on by default. If authentication is not used with +those commands, this error can occur. + +#### Symptom +```console +# buildah push alpine docker://localhost:5000/myalpine:latest +Getting image source signatures +Get https://localhost:5000/v2/: http: server gave HTTP response to HTTPS client +``` + +#### Solution + +By default tls verification is turned on when communicating to registries from +Buildah. If the registry does not require authentication the Buildah commands +such as `build`, `commit`, `from` and `pull` will fail unless tls verification is turned +off using the `--tls-verify` option. **NOTE:** It is not at all recommended to +communicate with a registry and not use tls verification. + + * Turn off tls verification by passing false to the tls-verification option. + * I.e. `buildah push --tls-verify=false alpine docker://localhost:5000/myalpine:latest` + +--- +### 3) `buildah run` command fails with pipe or output redirection + +When doing a `buildah run` command while using a pipe ('|') or output redirection ('>>'), +the command will fail, often times with a `command not found` type of error. + +#### Symptom +When executing a `buildah run` command with a pipe or output redirection such as the +following commands: + +```console +# buildah run $whalecontainer /usr/games/fortune -a | cowsay +# buildah run $newcontainer echo "daemon off;" >> /etc/nginx/nginx.conf +# buildah run $newcontainer echo "nginx on Fedora" > /usr/share/nginx/html/index.html +``` +the `buildah run` command will not complete and an error will be raised. + +#### Solution +There are two solutions to this problem. The +[`podman run`](https://github.com/containers/podman/blob/main/docs/podman-run.1.md) +command can be used in place of `buildah run`. To still use `buildah run`, surround +the command with single quotes and use `bash -c`. The previous examples would be +changed to: + +```console +# buildah run $whalecontainer bash -c '/usr/games/fortune -a | cowsay' +# buildah run $newcontainer bash -c 'echo "daemon off;" >> /etc/nginx/nginx.conf' +# buildah run $newcontainer bash -c 'echo "nginx on Fedora" > /usr/share/nginx/html/index.html' +``` + +--- +### 4) `buildah push alpine oci:~/myalpine:latest` fails with lstat error + +When doing a `buildah push` command and the target image has a tilde (`~`) character +in it, an lstat error will be raised stating there is no such file or directory. +This is expected behavior for shell expansion of the tilde character as it is only +expanded at the start of a word. This behavior is documented +[here](https://www.gnu.org/software/libc/manual/html_node/Tilde-Expansion.html). + +#### Symptom +```console +$ sudo pull alpine +$ sudo buildah push alpine oci:~/myalpine:latest +lstat /home/myusername/~: no such file or directory +``` + +#### Solution + + * Replace `~` with `$HOME` or the fully specified directory `/home/myusername`. + * `$ sudo buildah push alpine oci:${HOME}/myalpine:latest` + + +--- +### 5) Rootless buildah build fails EPERM on NFS: + +NFS enforces file creation on different UIDs on the server side and does not understand user namespace, which rootless Podman requires. When a container root process like YUM attempts to create a file owned by a different UID, NFS Server denies the creation. NFS is also a problem for the file locks when the storage is on it. Other distributed file systems (for example: Lustre, Spectrum Scale, the General Parallel File System (GPFS)) are also not supported when running in rootless mode as these file systems do not understand user namespace. + +#### Symptom +```console +$ buildah build . +ERRO[0014] Error while applying layer: ApplyLayer exit status 1 stdout: stderr: open /root/.bash_logout: permission denied +error creating build container: Error committing the finished image: error adding layer with blob "sha256:a02a4930cb5d36f3290eb84f4bfa30668ef2e9fe3a1fb73ec015fc58b9958b17": ApplyLayer exit status 1 stdout: stderr: open /root/.bash_logout: permission denied +``` + +#### Solution +Choose one of the following: + * Setup containers/storage in a different directory, not on an NFS share. + * Otherwise just run buildah as root, via `sudo buildah` +--- +### 6) Rootless buildah build fails when using OverlayFS: + +The Overlay file system (OverlayFS) requires the ability to call the `mknod` command when creating whiteout files +when extracting an image. However, a rootless user does not have the privileges to use `mknod` in this capacity. + +#### Symptom +```console +buildah build --storage-driver overlay . +STEP 1: FROM docker.io/ubuntu:xenial +Getting image source signatures +Copying blob edf72af6d627 done +Copying blob 3e4f86211d23 done +Copying blob 8d3eac894db4 done +Copying blob f7277927d38a done +Copying config 5e13f8dd4c done +Writing manifest to image destination +Storing signatures +Error: error creating build container: Error committing the finished image: error adding layer with blob "sha256:8d3eac894db4dc4154377ad28643dfe6625ff0e54bcfa63e0d04921f1a8ef7f8": Error processing tar file(exit status 1): operation not permitted +$ buildah build . +ERRO[0014] Error while applying layer: ApplyLayer exit status 1 stdout: stderr: open /root/.bash_logout: permission denied +error creating build container: Error committing the finished image: error adding layer with blob "sha256:a02a4930cb5d36f3290eb84f4bfa30668ef2e9fe3a1fb73ec015fc58b9958b17": ApplyLayer exit status 1 stdout: stderr: open /root/.bash_logout: permission denied +``` + +#### Solution +Choose one of the following: + * Complete the build operation as a privileged user. + * Install and configure fuse-overlayfs. + * Install the fuse-overlayfs package for your Linux Distribution. + * Add `mount_program = "/usr/bin/fuse-overlayfs"` under `[storage.options]` in your `~/.config/containers/storage.conf` file. +--- diff --git a/vendor/github.com/containers/buildah/unmount.go b/vendor/github.com/containers/buildah/unmount.go new file mode 100644 index 00000000000..b86ad92fdae --- /dev/null +++ b/vendor/github.com/containers/buildah/unmount.go @@ -0,0 +1,19 @@ +package buildah + +import ( + "github.com/pkg/errors" +) + +// Unmount unmounts a build container. +func (b *Builder) Unmount() error { + _, err := b.store.Unmount(b.ContainerID, false) + if err != nil { + return errors.Wrapf(err, "error unmounting build container %q", b.ContainerID) + } + b.MountPoint = "" + err = b.Save() + if err != nil { + return errors.Wrapf(err, "error saving updated state for build container %q", b.ContainerID) + } + return nil +} diff --git a/vendor/github.com/containers/buildah/util.go b/vendor/github.com/containers/buildah/util.go new file mode 100644 index 00000000000..9bfa9d268ea --- /dev/null +++ b/vendor/github.com/containers/buildah/util.go @@ -0,0 +1,227 @@ +package buildah + +import ( + "io" + "os" + "path/filepath" + "sync" + + "github.com/containers/buildah/copier" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/pkg/sysregistriesv2" + "github.com/containers/image/v5/types" + "github.com/containers/storage" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/reexec" + v1 "github.com/opencontainers/image-spec/specs-go/v1" + rspec "github.com/opencontainers/runtime-spec/specs-go" + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// InitReexec is a wrapper for reexec.Init(). It should be called at +// the start of main(), and if it returns true, main() should return +// immediately. +func InitReexec() bool { + return reexec.Init() +} + +func copyStringStringMap(m map[string]string) map[string]string { + n := map[string]string{} + for k, v := range m { + n[k] = v + } + return n +} + +func copyStringSlice(s []string) []string { + t := make([]string, len(s)) + copy(t, s) + return t +} + +func copyHistory(history []v1.History) []v1.History { + if len(history) == 0 { + return nil + } + h := make([]v1.History, 0, len(history)) + for _, entry := range history { + created := entry.Created + if created != nil { + timestamp := *created + created = ×tamp + } + h = append(h, v1.History{ + Created: created, + CreatedBy: entry.CreatedBy, + Author: entry.Author, + Comment: entry.Comment, + EmptyLayer: entry.EmptyLayer, + }) + } + return h +} + +func convertStorageIDMaps(UIDMap, GIDMap []idtools.IDMap) ([]rspec.LinuxIDMapping, []rspec.LinuxIDMapping) { + uidmap := make([]rspec.LinuxIDMapping, 0, len(UIDMap)) + gidmap := make([]rspec.LinuxIDMapping, 0, len(GIDMap)) + for _, m := range UIDMap { + uidmap = append(uidmap, rspec.LinuxIDMapping{ + HostID: uint32(m.HostID), + ContainerID: uint32(m.ContainerID), + Size: uint32(m.Size), + }) + } + for _, m := range GIDMap { + gidmap = append(gidmap, rspec.LinuxIDMapping{ + HostID: uint32(m.HostID), + ContainerID: uint32(m.ContainerID), + Size: uint32(m.Size), + }) + } + return uidmap, gidmap +} + +func convertRuntimeIDMaps(UIDMap, GIDMap []rspec.LinuxIDMapping) ([]idtools.IDMap, []idtools.IDMap) { + uidmap := make([]idtools.IDMap, 0, len(UIDMap)) + gidmap := make([]idtools.IDMap, 0, len(GIDMap)) + for _, m := range UIDMap { + uidmap = append(uidmap, idtools.IDMap{ + HostID: int(m.HostID), + ContainerID: int(m.ContainerID), + Size: int(m.Size), + }) + } + for _, m := range GIDMap { + gidmap = append(gidmap, idtools.IDMap{ + HostID: int(m.HostID), + ContainerID: int(m.ContainerID), + Size: int(m.Size), + }) + } + return uidmap, gidmap +} + +// isRegistryBlocked checks if the named registry is marked as blocked +func isRegistryBlocked(registry string, sc *types.SystemContext) (bool, error) { + reginfo, err := sysregistriesv2.FindRegistry(sc, registry) + if err != nil { + return false, errors.Wrapf(err, "unable to parse the registries configuration (%s)", sysregistriesv2.ConfigPath(sc)) + } + if reginfo != nil { + if reginfo.Blocked { + logrus.Debugf("registry %q is marked as blocked in registries configuration %q", registry, sysregistriesv2.ConfigPath(sc)) + } else { + logrus.Debugf("registry %q is not marked as blocked in registries configuration %q", registry, sysregistriesv2.ConfigPath(sc)) + } + return reginfo.Blocked, nil + } + logrus.Debugf("registry %q is not listed in registries configuration %q, assuming it's not blocked", registry, sysregistriesv2.ConfigPath(sc)) + return false, nil +} + +// isReferenceSomething checks if the registry part of a reference is insecure or blocked +func isReferenceSomething(ref types.ImageReference, sc *types.SystemContext, what func(string, *types.SystemContext) (bool, error)) (bool, error) { + if ref != nil { + if named := ref.DockerReference(); named != nil { + if domain := reference.Domain(named); domain != "" { + return what(domain, sc) + } + } + } + return false, nil +} + +// isReferenceBlocked checks if the registry part of a reference is blocked +func isReferenceBlocked(ref types.ImageReference, sc *types.SystemContext) (bool, error) { + if ref != nil && ref.Transport() != nil { + switch ref.Transport().Name() { + case "docker": + return isReferenceSomething(ref, sc, isRegistryBlocked) + } + } + return false, nil +} + +// ReserveSELinuxLabels reads containers storage and reserves SELinux contexts +// which are already being used by buildah containers. +func ReserveSELinuxLabels(store storage.Store, id string) error { + if selinuxGetEnabled() { + containers, err := store.Containers() + if err != nil { + return errors.Wrapf(err, "error getting list of containers") + } + + for _, c := range containers { + if id == c.ID { + continue + } else { + b, err := OpenBuilder(store, c.ID) + if err != nil { + if os.IsNotExist(errors.Cause(err)) { + // Ignore not exist errors since containers probably created by other tool + // TODO, we need to read other containers json data to reserve their SELinux labels + continue + } + return err + } + // Prevent different containers from using same MCS label + if err := label.ReserveLabel(b.ProcessLabel); err != nil { + return errors.Wrapf(err, "error reserving SELinux label %q", b.ProcessLabel) + } + } + } + } + return nil +} + +// IsContainer identifies if the specified container id is a buildah container +// in the specified store. +func IsContainer(id string, store storage.Store) (bool, error) { + cdir, err := store.ContainerDirectory(id) + if err != nil { + return false, err + } + // Assuming that if the stateFile exists, that this is a Buildah + // container. + if _, err = os.Stat(filepath.Join(cdir, stateFile)); err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, err + } + return true, nil +} + +// Copy content from the directory "src" to the directory "dest", ensuring that +// content from outside of "root" (which is a parent of "src" or "src" itself) +// isn't read. +func extractWithTar(root, src, dest string) error { + var getErr, putErr error + var wg sync.WaitGroup + + pipeReader, pipeWriter := io.Pipe() + + wg.Add(1) + go func() { + getErr = copier.Get(root, src, copier.GetOptions{}, []string{"."}, pipeWriter) + pipeWriter.Close() + wg.Done() + }() + wg.Add(1) + go func() { + putErr = copier.Put(dest, dest, copier.PutOptions{}, pipeReader) + pipeReader.Close() + wg.Done() + }() + wg.Wait() + + if getErr != nil { + return errors.Wrapf(getErr, "error reading %q", src) + } + if putErr != nil { + return errors.Wrapf(putErr, "error copying contents of %q to %q", src, dest) + } + return nil +} diff --git a/vendor/github.com/containers/buildah/util/types.go b/vendor/github.com/containers/buildah/util/types.go new file mode 100644 index 00000000000..12546dbd5ce --- /dev/null +++ b/vendor/github.com/containers/buildah/util/types.go @@ -0,0 +1,20 @@ +package util + +import ( + "github.com/containers/buildah/define" +) + +const ( + // DefaultRuntime if containers.conf fails. + DefaultRuntime = define.DefaultRuntime +) + +var ( + // DefaultCapabilities is the list of capabilities which we grant by + // default to containers which are running under UID 0. + DefaultCapabilities = define.DefaultCapabilities + + // DefaultNetworkSysctl is the list of Kernel parameters which we + // grant by default to containers which are running under UID 0. + DefaultNetworkSysctl = define.DefaultNetworkSysctl +) diff --git a/vendor/github.com/containers/buildah/util/util.go b/vendor/github.com/containers/buildah/util/util.go new file mode 100644 index 00000000000..13c602c00de --- /dev/null +++ b/vendor/github.com/containers/buildah/util/util.go @@ -0,0 +1,485 @@ +package util + +import ( + "fmt" + "io" + "net" + "net/url" + "os" + "path/filepath" + "sort" + "strings" + "sync" + "syscall" + + "github.com/containers/buildah/define" + "github.com/containers/common/libimage" + "github.com/containers/common/pkg/config" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/pkg/shortnames" + "github.com/containers/image/v5/signature" + "github.com/containers/image/v5/transports/alltransports" + "github.com/containers/image/v5/types" + "github.com/containers/storage" + "github.com/docker/distribution/registry/api/errcode" + "github.com/opencontainers/go-digest" + specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const ( + minimumTruncatedIDLength = 3 + // DefaultTransport is a prefix that we apply to an image name if we + // can't find one in the local Store, in order to generate a source + // reference for the image that we can then copy to the local Store. + DefaultTransport = "docker://" +) + +var ( + // RegistryDefaultPathPrefix contains a per-registry listing of default prefixes + // to prepend to image names that only contain a single path component. + RegistryDefaultPathPrefix = map[string]string{ + "index.docker.io": "library", + "docker.io": "library", + } +) + +// resolveName checks if name is a valid image name, and if that name doesn't +// include a domain portion, returns a list of the names which it might +// correspond to in the set of configured registries, and the transport used to +// pull the image. +// +// The returned image names never include a transport: prefix, and if transport != "", +// (transport, image) should be a valid input to alltransports.ParseImageName. +// transport == "" indicates that image that already exists in a local storage, +// and the name is valid for store.Image() / storage.Transport.ParseStoreReference(). +// +// NOTE: The "list of search registries is empty" check does not count blocked registries, +// and neither the implied "localhost" nor a possible firstRegistry are counted +func resolveName(name string, sc *types.SystemContext, store storage.Store) ([]string, string, error) { + if name == "" { + return nil, "", nil + } + + // Maybe it's a truncated image ID. Don't prepend a registry name, then. + if len(name) >= minimumTruncatedIDLength { + if img, err := store.Image(name); err == nil && img != nil && strings.HasPrefix(img.ID, name) { + // It's a truncated version of the ID of an image that's present in local storage; + // we need only expand the ID. + return []string{img.ID}, "", nil + } + } + // If we're referring to an image by digest, it *must* be local and we + // should not have any fall through/back logic. + if strings.HasPrefix(name, "sha256:") { + d, err := digest.Parse(name) + if err != nil { + return nil, "", err + } + img, err := store.Image(d.Encoded()) + if err != nil { + return nil, "", err + } + return []string{img.ID}, "", nil + } + + // Transports are not supported for local image look ups. + srcRef, err := alltransports.ParseImageName(name) + if err == nil { + return []string{srcRef.StringWithinTransport()}, srcRef.Transport().Name(), nil + } + + var candidates []string + // Local short-name resolution. + namedCandidates, err := shortnames.ResolveLocally(sc, name) + if err != nil { + return nil, "", err + } + for _, named := range namedCandidates { + candidates = append(candidates, named.String()) + } + + return candidates, DefaultTransport, nil +} + +// ExpandNames takes unqualified names, parses them as image names, and returns +// the fully expanded result, including a tag. Names which don't include a registry +// name will be marked for the most-preferred registry (i.e., the first one in our +// configuration). +func ExpandNames(names []string, systemContext *types.SystemContext, store storage.Store) ([]string, error) { + expanded := make([]string, 0, len(names)) + for _, n := range names { + var name reference.Named + nameList, _, err := resolveName(n, systemContext, store) + if err != nil { + return nil, errors.Wrapf(err, "error parsing name %q", n) + } + if len(nameList) == 0 { + named, err := reference.ParseNormalizedNamed(n) + if err != nil { + return nil, errors.Wrapf(err, "error parsing name %q", n) + } + name = named + } else { + named, err := reference.ParseNormalizedNamed(nameList[0]) + if err != nil { + return nil, errors.Wrapf(err, "error parsing name %q", nameList[0]) + } + name = named + } + name = reference.TagNameOnly(name) + expanded = append(expanded, name.String()) + } + return expanded, nil +} + +// FindImage locates the locally-stored image which corresponds to a given name. +// Please note that the `firstRegistry` argument has been deprecated and has no +// effect anymore. +func FindImage(store storage.Store, firstRegistry string, systemContext *types.SystemContext, image string) (types.ImageReference, *storage.Image, error) { + runtime, err := libimage.RuntimeFromStore(store, &libimage.RuntimeOptions{SystemContext: systemContext}) + if err != nil { + return nil, nil, err + } + + localImage, _, err := runtime.LookupImage(image, nil) + if err != nil { + return nil, nil, err + } + ref, err := localImage.StorageReference() + if err != nil { + return nil, nil, err + } + + return ref, localImage.StorageImage(), nil +} + +// resolveNameToReferences tries to create a list of possible references +// (including their transports) from the provided image name. +func ResolveNameToReferences( + store storage.Store, + systemContext *types.SystemContext, + image string, +) (refs []types.ImageReference, err error) { + names, transport, err := resolveName(image, systemContext, store) + if err != nil { + return nil, errors.Wrapf(err, "error parsing name %q", image) + } + + if transport != DefaultTransport { + transport += ":" + } + + for _, name := range names { + ref, err := alltransports.ParseImageName(transport + name) + if err != nil { + logrus.Debugf("error parsing reference to image %q: %v", name, err) + continue + } + refs = append(refs, ref) + } + if len(refs) == 0 { + return nil, errors.Errorf("error locating images with names %v", names) + } + return refs, nil +} + +// AddImageNames adds the specified names to the specified image. Please note +// that the `firstRegistry` argument has been deprecated and has no effect +// anymore. +func AddImageNames(store storage.Store, firstRegistry string, systemContext *types.SystemContext, image *storage.Image, addNames []string) error { + runtime, err := libimage.RuntimeFromStore(store, &libimage.RuntimeOptions{SystemContext: systemContext}) + if err != nil { + return err + } + + localImage, _, err := runtime.LookupImage(image.ID, nil) + if err != nil { + return err + } + + for _, tag := range addNames { + if err := localImage.Tag(tag); err != nil { + return errors.Wrapf(err, "error tagging image %s", image.ID) + } + } + + return nil +} + +// GetFailureCause checks the type of the error "err" and returns a new +// error message that reflects the reason of the failure. +// In case err type is not a familiar one the error "defaultError" is returned. +func GetFailureCause(err, defaultError error) error { + switch nErr := errors.Cause(err).(type) { + case errcode.Errors: + return err + case errcode.Error, *url.Error: + return nErr + default: + return defaultError + } +} + +// WriteError writes `lastError` into `w` if not nil and return the next error `err` +func WriteError(w io.Writer, err error, lastError error) error { + if lastError != nil { + fmt.Fprintln(w, lastError) + } + return err +} + +// Runtime is the default command to use to run the container. +func Runtime() string { + runtime := os.Getenv("BUILDAH_RUNTIME") + if runtime != "" { + return runtime + } + + conf, err := config.Default() + if err != nil { + logrus.Warnf("Error loading container config when searching for local runtime: %v", err) + return define.DefaultRuntime + } + return conf.Engine.OCIRuntime +} + +// StringInSlice returns a boolean indicating if the exact value s is present +// in the slice slice. +func StringInSlice(s string, slice []string) bool { + for _, v := range slice { + if v == s { + return true + } + } + return false +} + +// GetContainerIDs uses ID mappings to compute the container-level IDs that will +// correspond to a UID/GID pair on the host. +func GetContainerIDs(uidmap, gidmap []specs.LinuxIDMapping, uid, gid uint32) (uint32, uint32, error) { + uidMapped := true + for _, m := range uidmap { + uidMapped = false + if uid >= m.HostID && uid < m.HostID+m.Size { + uid = (uid - m.HostID) + m.ContainerID + uidMapped = true + break + } + } + if !uidMapped { + return 0, 0, errors.Errorf("container uses ID mappings (%#v), but doesn't map UID %d", uidmap, uid) + } + gidMapped := true + for _, m := range gidmap { + gidMapped = false + if gid >= m.HostID && gid < m.HostID+m.Size { + gid = (gid - m.HostID) + m.ContainerID + gidMapped = true + break + } + } + if !gidMapped { + return 0, 0, errors.Errorf("container uses ID mappings (%#v), but doesn't map GID %d", gidmap, gid) + } + return uid, gid, nil +} + +// GetHostIDs uses ID mappings to compute the host-level IDs that will +// correspond to a UID/GID pair in the container. +func GetHostIDs(uidmap, gidmap []specs.LinuxIDMapping, uid, gid uint32) (uint32, uint32, error) { + uidMapped := true + for _, m := range uidmap { + uidMapped = false + if uid >= m.ContainerID && uid < m.ContainerID+m.Size { + uid = (uid - m.ContainerID) + m.HostID + uidMapped = true + break + } + } + if !uidMapped { + return 0, 0, errors.Errorf("container uses ID mappings (%#v), but doesn't map UID %d", uidmap, uid) + } + gidMapped := true + for _, m := range gidmap { + gidMapped = false + if gid >= m.ContainerID && gid < m.ContainerID+m.Size { + gid = (gid - m.ContainerID) + m.HostID + gidMapped = true + break + } + } + if !gidMapped { + return 0, 0, errors.Errorf("container uses ID mappings (%#v), but doesn't map GID %d", gidmap, gid) + } + return uid, gid, nil +} + +// GetHostRootIDs uses ID mappings in spec to compute the host-level IDs that will +// correspond to UID/GID 0/0 in the container. +func GetHostRootIDs(spec *specs.Spec) (uint32, uint32, error) { + if spec == nil || spec.Linux == nil { + return 0, 0, nil + } + return GetHostIDs(spec.Linux.UIDMappings, spec.Linux.GIDMappings, 0, 0) +} + +// GetPolicyContext sets up, initializes and returns a new context for the specified policy +func GetPolicyContext(ctx *types.SystemContext) (*signature.PolicyContext, error) { + policy, err := signature.DefaultPolicy(ctx) + if err != nil { + return nil, err + } + + policyContext, err := signature.NewPolicyContext(policy) + if err != nil { + return nil, err + } + return policyContext, nil +} + +// logIfNotErrno logs the error message unless err is either nil or one of the +// listed syscall.Errno values. It returns true if it logged an error. +func logIfNotErrno(err error, what string, ignores ...syscall.Errno) (logged bool) { + if err == nil { + return false + } + if errno, isErrno := err.(syscall.Errno); isErrno { + for _, ignore := range ignores { + if errno == ignore { + return false + } + } + } + logrus.Error(what) + return true +} + +// LogIfNotRetryable logs "what" if err is set and is not an EINTR or EAGAIN +// syscall.Errno. Returns "true" if we can continue. +func LogIfNotRetryable(err error, what string) (retry bool) { + return !logIfNotErrno(err, what, syscall.EINTR, syscall.EAGAIN) +} + +// LogIfUnexpectedWhileDraining logs "what" if err is set and is not an EINTR +// or EAGAIN or EIO syscall.Errno. +func LogIfUnexpectedWhileDraining(err error, what string) { + logIfNotErrno(err, what, syscall.EINTR, syscall.EAGAIN, syscall.EIO) +} + +// TruncateString trims the given string to the provided maximum amount of +// characters and shortens it with `...`. +func TruncateString(str string, to int) string { + newStr := str + if len(str) > to { + const tr = "..." + if to > len(tr) { + to -= len(tr) + } + newStr = str[0:to] + tr + } + return newStr +} + +var ( + isUnifiedOnce sync.Once + isUnified bool + isUnifiedErr error +) + +// fileExistsAndNotADir - Check to see if a file exists +// and that it is not a directory. +func fileExistsAndNotADir(path string) bool { + file, err := os.Stat(path) + + if file == nil || err != nil || os.IsNotExist(err) { + return false + } + return !file.IsDir() +} + +// FindLocalRuntime find the local runtime of the +// system searching through the config file for +// possible locations. +func FindLocalRuntime(runtime string) string { + var localRuntime string + conf, err := config.Default() + if err != nil { + logrus.Debugf("Error loading container config when searching for local runtime.") + return localRuntime + } + for _, val := range conf.Engine.OCIRuntimes[runtime] { + if fileExistsAndNotADir(val) { + localRuntime = val + break + } + } + return localRuntime +} + +// MergeEnv merges two lists of environment variables, avoiding duplicates. +func MergeEnv(defaults, overrides []string) []string { + s := make([]string, 0, len(defaults)+len(overrides)) + index := make(map[string]int) + for _, envSpec := range append(defaults, overrides...) { + envVar := strings.SplitN(envSpec, "=", 2) + if i, ok := index[envVar[0]]; ok { + s[i] = envSpec + continue + } + s = append(s, envSpec) + index[envVar[0]] = len(s) - 1 + } + return s +} + +type byDestination []specs.Mount + +func (m byDestination) Len() int { + return len(m) +} + +func (m byDestination) Less(i, j int) bool { + return m.parts(i) < m.parts(j) +} + +func (m byDestination) Swap(i, j int) { + m[i], m[j] = m[j], m[i] +} + +func (m byDestination) parts(i int) int { + return strings.Count(filepath.Clean(m[i].Destination), string(os.PathSeparator)) +} + +func SortMounts(m []specs.Mount) []specs.Mount { + sort.Sort(byDestination(m)) + return m +} + +func VerifyTagName(imageSpec string) (types.ImageReference, error) { + ref, err := alltransports.ParseImageName(imageSpec) + if err != nil { + if ref, err = alltransports.ParseImageName(DefaultTransport + imageSpec); err != nil { + return nil, err + } + } + return ref, nil +} + +// LocalIP returns the non loopback local IP of the host +func LocalIP() string { + addrs, err := net.InterfaceAddrs() + if err != nil { + return "" + } + for _, address := range addrs { + // check the address type and if it is not a loopback the display it + if ipnet, ok := address.(*net.IPNet); ok && !ipnet.IP.IsLoopback() { + if ipnet.IP.To4() != nil { + return ipnet.IP.String() + } + } + } + return "" +} diff --git a/vendor/github.com/containers/buildah/util/util_linux.go b/vendor/github.com/containers/buildah/util/util_linux.go new file mode 100644 index 00000000000..cca1f9e7e1a --- /dev/null +++ b/vendor/github.com/containers/buildah/util/util_linux.go @@ -0,0 +1,20 @@ +package util + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +// IsCgroup2UnifiedMode returns whether we are running in cgroup 2 cgroup2 mode. +func IsCgroup2UnifiedMode() (bool, error) { + isUnifiedOnce.Do(func() { + var st syscall.Statfs_t + if err := syscall.Statfs("/sys/fs/cgroup", &st); err != nil { + isUnified, isUnifiedErr = false, err + } else { + isUnified, isUnifiedErr = st.Type == unix.CGROUP2_SUPER_MAGIC, nil + } + }) + return isUnified, isUnifiedErr +} diff --git a/vendor/github.com/containers/buildah/util/util_not_uint64.go b/vendor/github.com/containers/buildah/util/util_not_uint64.go new file mode 100644 index 00000000000..a318fb3085f --- /dev/null +++ b/vendor/github.com/containers/buildah/util/util_not_uint64.go @@ -0,0 +1,14 @@ +// +build darwin linux,mips linux,mipsle linux,mips64 linux,mips64le + +package util + +import ( + "syscall" +) + +func makeHardlinkDeviceAndInode(st *syscall.Stat_t) hardlinkDeviceAndInode { + return hardlinkDeviceAndInode{ + device: uint64(st.Dev), + inode: uint64(st.Ino), + } +} diff --git a/vendor/github.com/containers/buildah/util/util_uint64.go b/vendor/github.com/containers/buildah/util/util_uint64.go new file mode 100644 index 00000000000..b0b9225316f --- /dev/null +++ b/vendor/github.com/containers/buildah/util/util_uint64.go @@ -0,0 +1,14 @@ +// +build linux,!mips,!mipsle,!mips64,!mips64le + +package util + +import ( + "syscall" +) + +func makeHardlinkDeviceAndInode(st *syscall.Stat_t) hardlinkDeviceAndInode { + return hardlinkDeviceAndInode{ + device: st.Dev, + inode: st.Ino, + } +} diff --git a/vendor/github.com/containers/buildah/util/util_unix.go b/vendor/github.com/containers/buildah/util/util_unix.go new file mode 100644 index 00000000000..29983e40fcf --- /dev/null +++ b/vendor/github.com/containers/buildah/util/util_unix.go @@ -0,0 +1,39 @@ +// +build linux darwin + +package util + +import ( + "os" + "sync" + "syscall" +) + +type hardlinkDeviceAndInode struct { + device, inode uint64 +} + +type HardlinkChecker struct { + hardlinks sync.Map +} + +func (h *HardlinkChecker) Check(fi os.FileInfo) string { + if st, ok := fi.Sys().(*syscall.Stat_t); ok && fi.Mode().IsRegular() && st.Nlink > 1 { + if name, ok := h.hardlinks.Load(makeHardlinkDeviceAndInode(st)); ok && name.(string) != "" { + return name.(string) + } + } + return "" +} +func (h *HardlinkChecker) Add(fi os.FileInfo, name string) { + if st, ok := fi.Sys().(*syscall.Stat_t); ok && fi.Mode().IsRegular() && st.Nlink > 1 { + h.hardlinks.Store(makeHardlinkDeviceAndInode(st), name) + } +} + +func UID(st os.FileInfo) int { + return int(st.Sys().(*syscall.Stat_t).Uid) +} + +func GID(st os.FileInfo) int { + return int(st.Sys().(*syscall.Stat_t).Gid) +} diff --git a/vendor/github.com/containers/buildah/util/util_unsupported.go b/vendor/github.com/containers/buildah/util/util_unsupported.go new file mode 100644 index 00000000000..05a68f60bf2 --- /dev/null +++ b/vendor/github.com/containers/buildah/util/util_unsupported.go @@ -0,0 +1,8 @@ +// +build !linux + +package util + +// IsCgroup2UnifiedMode returns whether we are running in cgroup 2 cgroup2 mode. +func IsCgroup2UnifiedMode() (bool, error) { + return false, nil +} diff --git a/vendor/github.com/containers/buildah/util/util_windows.go b/vendor/github.com/containers/buildah/util/util_windows.go new file mode 100644 index 00000000000..18965ab1796 --- /dev/null +++ b/vendor/github.com/containers/buildah/util/util_windows.go @@ -0,0 +1,24 @@ +// +build !linux,!darwin + +package util + +import ( + "os" +) + +type HardlinkChecker struct { +} + +func (h *HardlinkChecker) Check(fi os.FileInfo) string { + return "" +} +func (h *HardlinkChecker) Add(fi os.FileInfo, name string) { +} + +func UID(st os.FileInfo) int { + return 0 +} + +func GID(st os.FileInfo) int { + return 0 +} diff --git a/vendor/github.com/containers/common/LICENSE b/vendor/github.com/containers/common/LICENSE new file mode 100644 index 00000000000..8dada3edaf5 --- /dev/null +++ b/vendor/github.com/containers/common/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containers/common/libimage/copier.go b/vendor/github.com/containers/common/libimage/copier.go new file mode 100644 index 00000000000..2a8f47f7f51 --- /dev/null +++ b/vendor/github.com/containers/common/libimage/copier.go @@ -0,0 +1,441 @@ +package libimage + +import ( + "context" + "io" + "os" + "strings" + "time" + + "github.com/containers/common/libimage/manifests" + "github.com/containers/common/pkg/config" + "github.com/containers/common/pkg/retry" + "github.com/containers/image/v5/copy" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/pkg/compression" + "github.com/containers/image/v5/signature" + storageTransport "github.com/containers/image/v5/storage" + "github.com/containers/image/v5/types" + encconfig "github.com/containers/ocicrypt/config" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const ( + defaultMaxRetries = 3 + defaultRetryDelay = time.Second +) + +// LookupReferenceFunc return an image reference based on the specified one. +// The returned reference can return custom ImageSource or ImageDestination +// objects which intercept or filter blobs, manifests, and signatures as +// they are read and written. +type LookupReferenceFunc = manifests.LookupReferenceFunc + +// CopyOptions allow for customizing image-copy operations. +type CopyOptions struct { + // If set, will be used for copying the image. Fields below may + // override certain settings. + SystemContext *types.SystemContext + // Allows for customizing the source reference lookup. This can be + // used to use custom blob caches. + SourceLookupReferenceFunc LookupReferenceFunc + // Allows for customizing the destination reference lookup. This can + // be used to use custom blob caches. + DestinationLookupReferenceFunc LookupReferenceFunc + // CompressionFormat is the format to use for the compression of the blobs + CompressionFormat *compression.Algorithm + // CompressionLevel specifies what compression level is used + CompressionLevel *int + + // containers-auth.json(5) file to use when authenticating against + // container registries. + AuthFilePath string + // Custom path to a blob-info cache. + BlobInfoCacheDirPath string + // Path to the certificates directory. + CertDirPath string + // Force layer compression when copying to a `dir` transport destination. + DirForceCompress bool + // Allow contacting registries over HTTP, or HTTPS with failed TLS + // verification. Note that this does not affect other TLS connections. + InsecureSkipTLSVerify types.OptionalBool + // Maximum number of retries with exponential backoff when facing + // transient network errors. A reasonable default is used if not set. + // Default 3. + MaxRetries *uint + // RetryDelay used for the exponential back off of MaxRetries. + // Default 1 time.Scond. + RetryDelay *time.Duration + // ManifestMIMEType is the desired media type the image will be + // converted to if needed. Note that it must contain the exact MIME + // types. Short forms (e.g., oci, v2s2) used by some tools are not + // supported. + ManifestMIMEType string + // Accept uncompressed layers when copying OCI images. + OciAcceptUncompressedLayers bool + // If OciEncryptConfig is non-nil, it indicates that an image should be + // encrypted. The encryption options is derived from the construction + // of EncryptConfig object. Note: During initial encryption process of + // a layer, the resultant digest is not known during creation, so + // newDigestingReader has to be set with validateDigest = false + OciEncryptConfig *encconfig.EncryptConfig + // OciEncryptLayers represents the list of layers to encrypt. If nil, + // don't encrypt any layers. If non-nil and len==0, denotes encrypt + // all layers. integers in the slice represent 0-indexed layer + // indices, with support for negative indexing. i.e. 0 is the first + // layer, -1 is the last (top-most) layer. + OciEncryptLayers *[]int + // OciDecryptConfig contains the config that can be used to decrypt an + // image if it is encrypted if non-nil. If nil, it does not attempt to + // decrypt an image. + OciDecryptConfig *encconfig.DecryptConfig + // Reported to when ProgressInterval has arrived for a single + // artifact+offset. + Progress chan types.ProgressProperties + // If set, allow using the storage transport even if it's disabled by + // the specified SignaturePolicyPath. + PolicyAllowStorage bool + // SignaturePolicyPath to overwrite the default one. + SignaturePolicyPath string + // If non-empty, asks for a signature to be added during the copy, and + // specifies a key ID. + SignBy string + // Remove any pre-existing signatures. SignBy will still add a new + // signature. + RemoveSignatures bool + // Writer is used to display copy information including progress bars. + Writer io.Writer + + // ----- platform ----------------------------------------------------- + + // Architecture to use for choosing images. + Architecture string + // OS to use for choosing images. + OS string + // Variant to use when choosing images. + Variant string + + // ----- credentials -------------------------------------------------- + + // Username to use when authenticating at a container registry. + Username string + // Password to use when authenticating at a container registry. + Password string + // Credentials is an alternative way to specify credentials in format + // "username[:password]". Cannot be used in combination with + // Username/Password. + Credentials string + // IdentityToken is used to authenticate the user and get + // an access token for the registry. + IdentityToken string `json:"identitytoken,omitempty"` + + // ----- internal ----------------------------------------------------- + + // Additional tags when creating or copying a docker-archive. + dockerArchiveAdditionalTags []reference.NamedTagged +} + +// copier is an internal helper to conveniently copy images. +type copier struct { + imageCopyOptions copy.Options + retryOptions retry.RetryOptions + systemContext *types.SystemContext + policyContext *signature.PolicyContext + + sourceLookup LookupReferenceFunc + destinationLookup LookupReferenceFunc +} + +var ( + // storageAllowedPolicyScopes overrides the policy for local storage + // to ensure that we can read images from it. + storageAllowedPolicyScopes = signature.PolicyTransportScopes{ + "": []signature.PolicyRequirement{ + signature.NewPRInsecureAcceptAnything(), + }, + } +) + +// getDockerAuthConfig extracts a docker auth config from the CopyOptions. Returns +// nil if no credentials are set. +func (options *CopyOptions) getDockerAuthConfig() (*types.DockerAuthConfig, error) { + authConf := &types.DockerAuthConfig{IdentityToken: options.IdentityToken} + + if options.Username != "" { + if options.Credentials != "" { + return nil, errors.New("username/password cannot be used with credentials") + } + authConf.Username = options.Username + authConf.Password = options.Password + return authConf, nil + } + + if options.Credentials != "" { + split := strings.SplitN(options.Credentials, ":", 2) + switch len(split) { + case 1: + authConf.Username = split[0] + default: + authConf.Username = split[0] + authConf.Password = split[1] + } + return authConf, nil + } + + // We should return nil unless a token was set. That's especially + // useful for Podman's remote API. + if options.IdentityToken != "" { + return authConf, nil + } + + return nil, nil +} + +// newCopier creates a copier. Note that fields in options *may* overwrite the +// counterparts of the specified system context. Please make sure to call +// `(*copier).close()`. +func (r *Runtime) newCopier(options *CopyOptions) (*copier, error) { + c := copier{} + c.systemContext = r.systemContextCopy() + + if options.SourceLookupReferenceFunc != nil { + c.sourceLookup = options.SourceLookupReferenceFunc + } + + if options.DestinationLookupReferenceFunc != nil { + c.destinationLookup = options.DestinationLookupReferenceFunc + } + + if options.InsecureSkipTLSVerify != types.OptionalBoolUndefined { + c.systemContext.DockerInsecureSkipTLSVerify = options.InsecureSkipTLSVerify + c.systemContext.OCIInsecureSkipTLSVerify = options.InsecureSkipTLSVerify == types.OptionalBoolTrue + c.systemContext.DockerDaemonInsecureSkipTLSVerify = options.InsecureSkipTLSVerify == types.OptionalBoolTrue + } + + c.systemContext.DirForceCompress = c.systemContext.DirForceCompress || options.DirForceCompress + + if options.AuthFilePath != "" { + c.systemContext.AuthFilePath = options.AuthFilePath + } + + c.systemContext.DockerArchiveAdditionalTags = options.dockerArchiveAdditionalTags + + c.systemContext.OSChoice, c.systemContext.ArchitectureChoice, c.systemContext.VariantChoice = NormalizePlatform(options.OS, options.Architecture, options.Variant) + + if options.SignaturePolicyPath != "" { + c.systemContext.SignaturePolicyPath = options.SignaturePolicyPath + } + + dockerAuthConfig, err := options.getDockerAuthConfig() + if err != nil { + return nil, err + } + if dockerAuthConfig != nil { + c.systemContext.DockerAuthConfig = dockerAuthConfig + } + + if options.BlobInfoCacheDirPath != "" { + c.systemContext.BlobInfoCacheDir = options.BlobInfoCacheDirPath + } + + if options.CertDirPath != "" { + c.systemContext.DockerCertPath = options.CertDirPath + } + + if options.CompressionFormat != nil { + c.systemContext.CompressionFormat = options.CompressionFormat + } + + if options.CompressionLevel != nil { + c.systemContext.CompressionLevel = options.CompressionLevel + } + + // NOTE: for the sake of consistency it's called Oci* in the CopyOptions. + c.systemContext.OCIAcceptUncompressedLayers = options.OciAcceptUncompressedLayers + + policy, err := signature.DefaultPolicy(c.systemContext) + if err != nil { + return nil, err + } + + // Buildah compatibility: even if the policy denies _all_ transports, + // Buildah still wants the storage to be accessible. + if options.PolicyAllowStorage { + policy.Transports[storageTransport.Transport.Name()] = storageAllowedPolicyScopes + } + + policyContext, err := signature.NewPolicyContext(policy) + if err != nil { + return nil, err + } + + c.policyContext = policyContext + + c.retryOptions.MaxRetry = defaultMaxRetries + if options.MaxRetries != nil { + c.retryOptions.MaxRetry = int(*options.MaxRetries) + } + c.retryOptions.Delay = defaultRetryDelay + if options.RetryDelay != nil { + c.retryOptions.Delay = *options.RetryDelay + } + + c.imageCopyOptions.Progress = options.Progress + if c.imageCopyOptions.Progress != nil { + c.imageCopyOptions.ProgressInterval = time.Second + } + + c.imageCopyOptions.ForceManifestMIMEType = options.ManifestMIMEType + c.imageCopyOptions.SourceCtx = c.systemContext + c.imageCopyOptions.DestinationCtx = c.systemContext + c.imageCopyOptions.OciEncryptConfig = options.OciEncryptConfig + c.imageCopyOptions.OciEncryptLayers = options.OciEncryptLayers + c.imageCopyOptions.OciDecryptConfig = options.OciDecryptConfig + c.imageCopyOptions.RemoveSignatures = options.RemoveSignatures + c.imageCopyOptions.SignBy = options.SignBy + c.imageCopyOptions.ReportWriter = options.Writer + + defaultContainerConfig, err := config.Default() + if err != nil { + logrus.Warnf("Failed to get container config for copy options: %v", err) + } else { + c.imageCopyOptions.MaxParallelDownloads = defaultContainerConfig.Engine.ImageParallelCopies + } + + return &c, nil +} + +// close open resources. +func (c *copier) close() error { + return c.policyContext.Destroy() +} + +// copy the source to the destination. Returns the bytes of the copied +// manifest which may be used for digest computation. +func (c *copier) copy(ctx context.Context, source, destination types.ImageReference) ([]byte, error) { + logrus.Debugf("Copying source image %s to destination image %s", source.StringWithinTransport(), destination.StringWithinTransport()) + + var err error + + if c.sourceLookup != nil { + source, err = c.sourceLookup(source) + if err != nil { + return nil, err + } + } + + if c.destinationLookup != nil { + destination, err = c.destinationLookup(destination) + if err != nil { + return nil, err + } + } + + // Buildah compat: used when running in OpenShift. + sourceInsecure, err := checkRegistrySourcesAllows(source) + if err != nil { + return nil, err + } + destinationInsecure, err := checkRegistrySourcesAllows(destination) + if err != nil { + return nil, err + } + + // Sanity checks for Buildah. + if sourceInsecure != nil && *sourceInsecure { + if c.systemContext.DockerInsecureSkipTLSVerify == types.OptionalBoolFalse { + return nil, errors.Errorf("can't require tls verification on an insecured registry") + } + } + if destinationInsecure != nil && *destinationInsecure { + if c.systemContext.DockerInsecureSkipTLSVerify == types.OptionalBoolFalse { + return nil, errors.Errorf("can't require tls verification on an insecured registry") + } + } + + var returnManifest []byte + f := func() error { + opts := c.imageCopyOptions + if sourceInsecure != nil { + value := types.NewOptionalBool(*sourceInsecure) + opts.SourceCtx.DockerInsecureSkipTLSVerify = value + } + if destinationInsecure != nil { + value := types.NewOptionalBool(*destinationInsecure) + opts.DestinationCtx.DockerInsecureSkipTLSVerify = value + } + + copiedManifest, err := copy.Image(ctx, c.policyContext, destination, source, &opts) + if err == nil { + returnManifest = copiedManifest + } + return err + } + return returnManifest, retry.RetryIfNecessary(ctx, f, &c.retryOptions) +} + +// checkRegistrySourcesAllows checks the $BUILD_REGISTRY_SOURCES environment +// variable, if it's set. The contents are expected to be a JSON-encoded +// github.com/openshift/api/config/v1.Image, set by an OpenShift build +// controller that arranged for us to be run in a container. +// +// If set, the insecure return value indicates whether the registry is set to +// be insecure. +// +// NOTE: this functionality is required by Buildah for OpenShift. +func checkRegistrySourcesAllows(dest types.ImageReference) (insecure *bool, err error) { + registrySources, ok := os.LookupEnv("BUILD_REGISTRY_SOURCES") + if !ok || registrySources == "" { + return nil, nil + } + + logrus.Debugf("BUILD_REGISTRY_SOURCES set %q", registrySources) + + dref := dest.DockerReference() + if dref == nil || reference.Domain(dref) == "" { + return nil, nil + } + + // Use local struct instead of github.com/openshift/api/config/v1 RegistrySources + var sources struct { + InsecureRegistries []string `json:"insecureRegistries,omitempty"` + BlockedRegistries []string `json:"blockedRegistries,omitempty"` + AllowedRegistries []string `json:"allowedRegistries,omitempty"` + } + if err := json.Unmarshal([]byte(registrySources), &sources); err != nil { + return nil, errors.Wrapf(err, "error parsing $BUILD_REGISTRY_SOURCES (%q) as JSON", registrySources) + } + blocked := false + if len(sources.BlockedRegistries) > 0 { + for _, blockedDomain := range sources.BlockedRegistries { + if blockedDomain == reference.Domain(dref) { + blocked = true + } + } + } + if blocked { + return nil, errors.Errorf("registry %q denied by policy: it is in the blocked registries list (%s)", reference.Domain(dref), registrySources) + } + allowed := true + if len(sources.AllowedRegistries) > 0 { + allowed = false + for _, allowedDomain := range sources.AllowedRegistries { + if allowedDomain == reference.Domain(dref) { + allowed = true + } + } + } + if !allowed { + return nil, errors.Errorf("registry %q denied by policy: not in allowed registries list (%s)", reference.Domain(dref), registrySources) + } + + for _, inseureDomain := range sources.InsecureRegistries { + if inseureDomain == reference.Domain(dref) { + insecure := true + return &insecure, nil + } + } + + return nil, nil +} diff --git a/vendor/github.com/containers/common/libimage/disk_usage.go b/vendor/github.com/containers/common/libimage/disk_usage.go new file mode 100644 index 00000000000..2cde098468f --- /dev/null +++ b/vendor/github.com/containers/common/libimage/disk_usage.go @@ -0,0 +1,130 @@ +package libimage + +import ( + "context" + "time" +) + +// ImageDiskUsage reports the total size of an image. That is the size +type ImageDiskUsage struct { + // Number of containers using the image. + Containers int + // ID of the image. + ID string + // Repository of the image. + Repository string + // Tag of the image. + Tag string + // Created time stamp. + Created time.Time + // The amount of space that an image shares with another one (i.e. their common data). + SharedSize int64 + // The the amount of space that is only used by a given image. + UniqueSize int64 + // Sum of shared an unique size. + Size int64 +} + +// DiskUsage calculates the disk usage for each image in the local containers +// storage. Note that a single image may yield multiple usage reports, one for +// each repository tag. +func (r *Runtime) DiskUsage(ctx context.Context) ([]ImageDiskUsage, error) { + layerTree, err := r.layerTree() + if err != nil { + return nil, err + } + + images, err := r.ListImages(ctx, nil, nil) + if err != nil { + return nil, err + } + + var allUsages []ImageDiskUsage + for _, image := range images { + usages, err := diskUsageForImage(ctx, image, layerTree) + if err != nil { + return nil, err + } + allUsages = append(allUsages, usages...) + } + return allUsages, err +} + +// diskUsageForImage returns the disk-usage baseistics for the specified image. +func diskUsageForImage(ctx context.Context, image *Image, tree *layerTree) ([]ImageDiskUsage, error) { + if err := image.isCorrupted(""); err != nil { + return nil, err + } + + base := ImageDiskUsage{ + ID: image.ID(), + Created: image.Created(), + Repository: "", + Tag: "", + } + + // Shared, unique and total size. + parent, err := tree.parent(ctx, image) + if err != nil { + return nil, err + } + childIDs, err := tree.children(ctx, image, false) + if err != nil { + return nil, err + } + + // Optimistically set unique size to the full size of the image. + size, err := image.Size() + if err != nil { + return nil, err + } + base.UniqueSize = size + + if len(childIDs) > 0 { + // If we have children, we share everything. + base.SharedSize = base.UniqueSize + base.UniqueSize = 0 + } else if parent != nil { + // If we have no children but a parent, remove the parent + // (shared) size from the unique one. + size, err := parent.Size() + if err != nil { + return nil, err + } + base.UniqueSize -= size + base.SharedSize = size + } + + base.Size = base.SharedSize + base.UniqueSize + + // Number of containers using the image. + containers, err := image.Containers() + if err != nil { + return nil, err + } + base.Containers = len(containers) + + repoTags, err := image.NamedRepoTags() + if err != nil { + return nil, err + } + + if len(repoTags) == 0 { + return []ImageDiskUsage{base}, nil + } + + pairs, err := ToNameTagPairs(repoTags) + if err != nil { + return nil, err + } + + results := make([]ImageDiskUsage, len(pairs)) + for i, pair := range pairs { + res := base + res.Repository = pair.Name + res.Tag = pair.Tag + results[i] = res + } + + return results, nil +} diff --git a/vendor/github.com/containers/common/libimage/events.go b/vendor/github.com/containers/common/libimage/events.go new file mode 100644 index 00000000000..c7733564db5 --- /dev/null +++ b/vendor/github.com/containers/common/libimage/events.go @@ -0,0 +1,62 @@ +package libimage + +import ( + "time" + + "github.com/sirupsen/logrus" +) + +// EventType indicates the type of an event. Currently, there is only one +// supported type for container image but we may add more (e.g., for manifest +// lists) in the future. +type EventType int + +const ( + // EventTypeUnknown is an uninitialized EventType. + EventTypeUnknown EventType = iota + // EventTypeImagePull represents an image pull. + EventTypeImagePull + // EventTypeImagePush represents an image push. + EventTypeImagePush + // EventTypeImageRemove represents an image removal. + EventTypeImageRemove + // EventTypeImageLoad represents an image being loaded. + EventTypeImageLoad + // EventTypeImageSave represents an image being saved. + EventTypeImageSave + // EventTypeImageTag represents an image being tagged. + EventTypeImageTag + // EventTypeImageUntag represents an image being untagged. + EventTypeImageUntag + // EventTypeImageMount represents an image being mounted. + EventTypeImageMount + // EventTypeImageUnmount represents an image being unmounted. + EventTypeImageUnmount +) + +// Event represents an event such an image pull or image tag. +type Event struct { + // ID of the object (e.g., image ID). + ID string + // Name of the object (e.g., image name "quay.io/containers/podman:latest") + Name string + // Time of the event. + Time time.Time + // Type of the event. + Type EventType +} + +// writeEvent writes the specified event to the Runtime's event channel. The +// event is discarded if no event channel has been registered (yet). +func (r *Runtime) writeEvent(event *Event) { + select { + case r.eventChannel <- event: + // Done + case <-time.After(2 * time.Second): + // The Runtime's event channel has a buffer of size 100 which + // should be enough even under high load. However, we + // shouldn't block too long in case the buffer runs full (could + // be an honest user error or bug). + logrus.Warnf("Discarding libimage event which was not read within 2 seconds: %v", event) + } +} diff --git a/vendor/github.com/containers/common/libimage/filters.go b/vendor/github.com/containers/common/libimage/filters.go new file mode 100644 index 00000000000..063f0714914 --- /dev/null +++ b/vendor/github.com/containers/common/libimage/filters.go @@ -0,0 +1,382 @@ +package libimage + +import ( + "context" + "fmt" + "path" + "strconv" + "strings" + "time" + + filtersPkg "github.com/containers/common/pkg/filters" + "github.com/containers/common/pkg/timetype" + "github.com/containers/image/v5/docker/reference" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// filterFunc is a prototype for a positive image filter. Returning `true` +// indicates that the image matches the criteria. +type filterFunc func(*Image) (bool, error) + +// Apply the specified filters. At least one filter of each key must apply. +func (i *Image) applyFilters(filters map[string][]filterFunc) (bool, error) { + matches := false + for key := range filters { // and + matches = false + for _, filter := range filters[key] { // or + var err error + matches, err = filter(i) + if err != nil { + // Some images may have been corrupted in the + // meantime, so do an extra check and make the + // error non-fatal (see containers/podman/issues/12582). + if errCorrupted := i.isCorrupted(""); errCorrupted != nil { + logrus.Errorf(errCorrupted.Error()) + return false, nil + } + return false, err + } + if matches { + break + } + } + if !matches { + return false, nil + } + } + return matches, nil +} + +// filterImages returns a slice of images which are passing all specified +// filters. +func (r *Runtime) filterImages(ctx context.Context, images []*Image, options *ListImagesOptions) ([]*Image, error) { + if len(options.Filters) == 0 || len(images) == 0 { + return images, nil + } + + filters, err := r.compileImageFilters(ctx, options) + if err != nil { + return nil, err + } + result := []*Image{} + for i := range images { + match, err := images[i].applyFilters(filters) + if err != nil { + return nil, err + } + if match { + result = append(result, images[i]) + } + } + return result, nil +} + +// compileImageFilters creates `filterFunc`s for the specified filters. The +// required format is `key=value` with the following supported keys: +// after, since, before, containers, dangling, id, label, readonly, reference, intermediate +func (r *Runtime) compileImageFilters(ctx context.Context, options *ListImagesOptions) (map[string][]filterFunc, error) { + logrus.Tracef("Parsing image filters %s", options.Filters) + + var tree *layerTree + getTree := func() (*layerTree, error) { + if tree == nil { + t, err := r.layerTree() + if err != nil { + return nil, err + } + tree = t + } + return tree, nil + } + + filters := map[string][]filterFunc{} + duplicate := map[string]string{} + for _, f := range options.Filters { + var key, value string + var filter filterFunc + split := strings.SplitN(f, "=", 2) + if len(split) != 2 { + return nil, errors.Errorf("invalid image filter %q: must be in the format %q", f, "filter=value") + } + + key = split[0] + value = split[1] + switch key { + + case "after", "since": + img, err := r.time(key, value) + if err != nil { + return nil, err + } + key = "since" + filter = filterAfter(img.Created()) + + case "before": + img, err := r.time(key, value) + if err != nil { + return nil, err + } + filter = filterBefore(img.Created()) + + case "containers": + if err := r.containers(duplicate, key, value, options.IsExternalContainerFunc); err != nil { + return nil, err + } + filter = filterContainers(value, options.IsExternalContainerFunc) + + case "dangling": + dangling, err := r.bool(duplicate, key, value) + if err != nil { + return nil, err + } + t, err := getTree() + if err != nil { + return nil, err + } + + filter = filterDangling(ctx, dangling, t) + + case "id": + filter = filterID(value) + + case "intermediate": + intermediate, err := r.bool(duplicate, key, value) + if err != nil { + return nil, err + } + t, err := getTree() + if err != nil { + return nil, err + } + + filter = filterIntermediate(ctx, intermediate, t) + + case "label": + filter = filterLabel(ctx, value) + + case "readonly": + readOnly, err := r.bool(duplicate, key, value) + if err != nil { + return nil, err + } + filter = filterReadOnly(readOnly) + + case "manifest": + manifest, err := r.bool(duplicate, key, value) + if err != nil { + return nil, err + } + filter = filterManifest(ctx, manifest) + + case "reference": + filter = filterReferences(value) + + case "until": + until, err := r.until(value) + if err != nil { + return nil, err + } + filter = filterBefore(until) + + default: + return nil, errors.Errorf("unsupported image filter %q", key) + } + filters[key] = append(filters[key], filter) + } + + return filters, nil +} + +func (r *Runtime) containers(duplicate map[string]string, key, value string, externalFunc IsExternalContainerFunc) error { + if exists, ok := duplicate[key]; ok && exists != value { + return errors.Errorf("specifying %q filter more than once with different values is not supported", key) + } + duplicate[key] = value + switch value { + case "false", "true": + case "external": + if externalFunc == nil { + return fmt.Errorf("libimage error: external containers filter without callback") + } + default: + return fmt.Errorf("unsupported value %q for containers filter", value) + } + return nil +} + +func (r *Runtime) until(value string) (time.Time, error) { + var until time.Time + ts, err := timetype.GetTimestamp(value, time.Now()) + if err != nil { + return until, err + } + seconds, nanoseconds, err := timetype.ParseTimestamps(ts, 0) + if err != nil { + return until, err + } + return time.Unix(seconds, nanoseconds), nil +} + +func (r *Runtime) time(key, value string) (*Image, error) { + img, _, err := r.LookupImage(value, nil) + if err != nil { + return nil, errors.Wrapf(err, "could not find local image for filter filter %q=%q", key, value) + } + return img, nil +} + +func (r *Runtime) bool(duplicate map[string]string, key, value string) (bool, error) { + if exists, ok := duplicate[key]; ok && exists != value { + return false, errors.Errorf("specifying %q filter more than once with different values is not supported", key) + } + duplicate[key] = value + set, err := strconv.ParseBool(value) + if err != nil { + return false, errors.Wrapf(err, "non-boolean value %q for %s filter", key, value) + } + return set, nil +} + +// filterManifest filters whether or not the image is a manifest list +func filterManifest(ctx context.Context, value bool) filterFunc { + return func(img *Image) (bool, error) { + isManifestList, err := img.IsManifestList(ctx) + if err != nil { + return false, err + } + return isManifestList == value, nil + } +} + +// filterReferences creates a reference filter for matching the specified value. +func filterReferences(value string) filterFunc { + return func(img *Image) (bool, error) { + refs, err := img.NamesReferences() + if err != nil { + return false, err + } + + for _, ref := range refs { + refString := ref.String() // FQN with tag/digest + candidates := []string{refString} + + // Split the reference into 3 components (twice if diggested/tagged): + // 1) Fully-qualified reference + // 2) Without domain + // 3) Without domain and path + if named, isNamed := ref.(reference.Named); isNamed { + candidates = append(candidates, + reference.Path(named), // path/name without tag/digest (Path() removes it) + refString[strings.LastIndex(refString, "/")+1:]) // name with tag/digest + + trimmedString := reference.TrimNamed(named).String() + if refString != trimmedString { + tagOrDigest := refString[len(trimmedString):] + candidates = append(candidates, + trimmedString, // FQN without tag/digest + reference.Path(named)+tagOrDigest, // path/name with tag/digest + trimmedString[strings.LastIndex(trimmedString, "/")+1:]) // name without tag/digest + } + } + + for _, candidate := range candidates { + // path.Match() is also used by Docker's reference.FamiliarMatch(). + matched, _ := path.Match(value, candidate) + if matched { + return true, nil + } + } + } + return false, nil + } +} + +// filterLabel creates a label for matching the specified value. +func filterLabel(ctx context.Context, value string) filterFunc { + return func(img *Image) (bool, error) { + labels, err := img.Labels(ctx) + if err != nil { + return false, err + } + return filtersPkg.MatchLabelFilters([]string{value}, labels), nil + } +} + +// filterAfter creates an after filter for matching the specified value. +func filterAfter(value time.Time) filterFunc { + return func(img *Image) (bool, error) { + return img.Created().After(value), nil + } +} + +// filterBefore creates a before filter for matching the specified value. +func filterBefore(value time.Time) filterFunc { + return func(img *Image) (bool, error) { + return img.Created().Before(value), nil + } +} + +// filterReadOnly creates a readonly filter for matching the specified value. +func filterReadOnly(value bool) filterFunc { + return func(img *Image) (bool, error) { + return img.IsReadOnly() == value, nil + } +} + +// filterContainers creates a container filter for matching the specified value. +func filterContainers(value string, fn IsExternalContainerFunc) filterFunc { + return func(img *Image) (bool, error) { + ctrs, err := img.Containers() + if err != nil { + return false, err + } + if value != "external" { + boolValue := value == "true" + return (len(ctrs) > 0) == boolValue, nil + } + + // Check whether all associated containers are external ones. + for _, c := range ctrs { + isExternal, err := fn(c) + if err != nil { + return false, fmt.Errorf("checking if %s is an external container in filter: %w", c, err) + } + if !isExternal { + return isExternal, nil + } + } + return true, nil + } +} + +// filterDangling creates a dangling filter for matching the specified value. +func filterDangling(ctx context.Context, value bool, tree *layerTree) filterFunc { + return func(img *Image) (bool, error) { + isDangling, err := img.isDangling(ctx, tree) + if err != nil { + return false, err + } + return isDangling == value, nil + } +} + +// filterID creates an image-ID filter for matching the specified value. +func filterID(value string) filterFunc { + return func(img *Image) (bool, error) { + return img.ID() == value, nil + } +} + +// filterIntermediate creates an intermediate filter for images. An image is +// considered to be an intermediate image if it is dangling (i.e., no tags) and +// has no children (i.e., no other image depends on it). +func filterIntermediate(ctx context.Context, value bool, tree *layerTree) filterFunc { + return func(img *Image) (bool, error) { + isIntermediate, err := img.isIntermediate(ctx, tree) + if err != nil { + return false, err + } + return isIntermediate == value, nil + } +} diff --git a/vendor/github.com/containers/common/libimage/history.go b/vendor/github.com/containers/common/libimage/history.go new file mode 100644 index 00000000000..b63fe696bc5 --- /dev/null +++ b/vendor/github.com/containers/common/libimage/history.go @@ -0,0 +1,80 @@ +package libimage + +import ( + "context" + "time" + + "github.com/containers/storage" +) + +// ImageHistory contains the history information of an image. +type ImageHistory struct { + ID string `json:"id"` + Created *time.Time `json:"created"` + CreatedBy string `json:"createdBy"` + Size int64 `json:"size"` + Comment string `json:"comment"` + Tags []string `json:"tags"` +} + +// History computes the image history of the image including all of its parents. +func (i *Image) History(ctx context.Context) ([]ImageHistory, error) { + ociImage, err := i.toOCI(ctx) + if err != nil { + return nil, err + } + + layerTree, err := i.runtime.layerTree() + if err != nil { + return nil, err + } + + var allHistory []ImageHistory + var layer *storage.Layer + if i.TopLayer() != "" { + layer, err = i.runtime.store.Layer(i.TopLayer()) + if err != nil { + return nil, err + } + } + + // Iterate in reverse order over the history entries, and lookup the + // corresponding image ID, size and get the next later if needed. + numHistories := len(ociImage.History) - 1 + usedIDs := make(map[string]bool) // prevents assigning images IDs more than once + for x := numHistories; x >= 0; x-- { + history := ImageHistory{ + ID: "", // may be overridden below + Created: ociImage.History[x].Created, + CreatedBy: ociImage.History[x].CreatedBy, + Comment: ociImage.History[x].Comment, + } + + if layer != nil { + history.Tags = layer.Names + if !ociImage.History[x].EmptyLayer { + history.Size = layer.UncompressedSize + } + // Query the layer tree if it's the top layer of an + // image. + node := layerTree.node(layer.ID) + if len(node.images) > 0 { + id := node.images[0].ID() // always use the first one + if _, used := usedIDs[id]; !used { + history.ID = id + usedIDs[id] = true + } + } + if layer.Parent != "" && !ociImage.History[x].EmptyLayer { + layer, err = i.runtime.store.Layer(layer.Parent) + if err != nil { + return nil, err + } + } + } + + allHistory = append(allHistory, history) + } + + return allHistory, nil +} diff --git a/vendor/github.com/containers/common/libimage/image.go b/vendor/github.com/containers/common/libimage/image.go new file mode 100644 index 00000000000..661ca159b48 --- /dev/null +++ b/vendor/github.com/containers/common/libimage/image.go @@ -0,0 +1,949 @@ +package libimage + +import ( + "context" + "fmt" + "path/filepath" + "sort" + "strings" + "time" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/manifest" + storageTransport "github.com/containers/image/v5/storage" + "github.com/containers/image/v5/types" + "github.com/containers/storage" + "github.com/hashicorp/go-multierror" + "github.com/opencontainers/go-digest" + ociv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// Image represents an image in the containers storage and allows for further +// operations and data manipulation. +type Image struct { + // Backwards pointer to the runtime. + runtime *Runtime + + // Counterpart in the local containers storage. + storageImage *storage.Image + + // Image reference to the containers storage. + storageReference types.ImageReference + + // All fields in the below structure are cached. They may be cleared + // at any time. When adding a new field, please make sure to clear + // it in `(*Image).reload()`. + cached struct { + // Image source. Cached for performance reasons. + imageSource types.ImageSource + // Inspect data we get from containers/image. + partialInspectData *types.ImageInspectInfo + // Fully assembled image data. + completeInspectData *ImageData + // Corresponding OCI image. + ociv1Image *ociv1.Image + // Names() parsed into references. + namesReferences []reference.Reference + } +} + +// reload the image and pessimitically clear all cached data. +func (i *Image) reload() error { + logrus.Tracef("Reloading image %s", i.ID()) + img, err := i.runtime.store.Image(i.ID()) + if err != nil { + return errors.Wrap(err, "reloading image") + } + i.storageImage = img + i.cached.imageSource = nil + i.cached.partialInspectData = nil + i.cached.completeInspectData = nil + i.cached.ociv1Image = nil + i.cached.namesReferences = nil + return nil +} + +// isCorrupted returns an error if the image may be corrupted. +func (i *Image) isCorrupted(name string) error { + // If it's a manifest list, we're good for now. + if _, err := i.getManifestList(); err == nil { + return nil + } + + ref, err := i.StorageReference() + if err != nil { + return err + } + + if _, err := ref.NewImage(context.Background(), nil); err != nil { + if name == "" { + name = i.ID()[:12] + } + return errors.Errorf("Image %s exists in local storage but may be corrupted (remove the image to resolve the issue): %v", name, err) + } + return nil +} + +// Names returns associated names with the image which may be a mix of tags and +// digests. +func (i *Image) Names() []string { + return i.storageImage.Names +} + +// NamesReferences returns Names() as references. +func (i *Image) NamesReferences() ([]reference.Reference, error) { + if i.cached.namesReferences != nil { + return i.cached.namesReferences, nil + } + refs := make([]reference.Reference, 0, len(i.Names())) + for _, name := range i.Names() { + ref, err := reference.Parse(name) + if err != nil { + return nil, err + } + refs = append(refs, ref) + } + i.cached.namesReferences = refs + return refs, nil +} + +// StorageImage returns the underlying storage.Image. +func (i *Image) StorageImage() *storage.Image { + return i.storageImage +} + +// NamesHistory returns a string array of names previously associated with the +// image, which may be a mixture of tags and digests. +func (i *Image) NamesHistory() []string { + return i.storageImage.NamesHistory +} + +// ID returns the ID of the image. +func (i *Image) ID() string { + return i.storageImage.ID +} + +// Digest is a digest value that we can use to locate the image, if one was +// specified at creation-time. Typically it is the digest of one among +// possibly many digests that we have stored for the image, so many +// applications are better off using the entire list returned by Digests(). +func (i *Image) Digest() digest.Digest { + return i.storageImage.Digest +} + +// Digests is a list of digest values of the image's manifests, and possibly a +// manually-specified value, that we can use to locate the image. If Digest is +// set, its value is also in this list. +func (i *Image) Digests() []digest.Digest { + return i.storageImage.Digests +} + +// IsReadOnly returns whether the image is set read only. +func (i *Image) IsReadOnly() bool { + return i.storageImage.ReadOnly +} + +// IsDangling returns true if the image is dangling, that is an untagged image +// without children. +func (i *Image) IsDangling(ctx context.Context) (bool, error) { + return i.isDangling(ctx, nil) +} + +// isDangling returns true if the image is dangling, that is an untagged image +// without children. If tree is nil, it will created for this invocation only. +func (i *Image) isDangling(ctx context.Context, tree *layerTree) (bool, error) { + if len(i.Names()) > 0 { + return false, nil + } + children, err := i.getChildren(ctx, false, tree) + if err != nil { + return false, err + } + return len(children) == 0, nil +} + +// IsIntermediate returns true if the image is an intermediate image, that is +// an untagged image with children. +func (i *Image) IsIntermediate(ctx context.Context) (bool, error) { + return i.isIntermediate(ctx, nil) +} + +// isIntermediate returns true if the image is an intermediate image, that is +// an untagged image with children. If tree is nil, it will created for this +// invocation only. +func (i *Image) isIntermediate(ctx context.Context, tree *layerTree) (bool, error) { + if len(i.Names()) > 0 { + return false, nil + } + children, err := i.getChildren(ctx, false, tree) + if err != nil { + return false, err + } + return len(children) != 0, nil +} + +// Created returns the time the image was created. +func (i *Image) Created() time.Time { + return i.storageImage.Created +} + +// Labels returns the label of the image. +func (i *Image) Labels(ctx context.Context) (map[string]string, error) { + data, err := i.inspectInfo(ctx) + if err != nil { + isManifestList, listErr := i.IsManifestList(ctx) + if listErr != nil { + err = errors.Wrapf(err, "fallback error checking whether image is a manifest list: %v", err) + } else if isManifestList { + logrus.Debugf("Ignoring error: cannot return labels for manifest list or image index %s", i.ID()) + return nil, nil + } + return nil, err + } + + return data.Labels, nil +} + +// TopLayer returns the top layer id as a string +func (i *Image) TopLayer() string { + return i.storageImage.TopLayer +} + +// Parent returns the parent image or nil if there is none +func (i *Image) Parent(ctx context.Context) (*Image, error) { + tree, err := i.runtime.layerTree() + if err != nil { + return nil, err + } + return tree.parent(ctx, i) +} + +// HasChildren returns indicates if the image has children. +func (i *Image) HasChildren(ctx context.Context) (bool, error) { + children, err := i.getChildren(ctx, false, nil) + if err != nil { + return false, err + } + return len(children) > 0, nil +} + +// Children returns the image's children. +func (i *Image) Children(ctx context.Context) ([]*Image, error) { + children, err := i.getChildren(ctx, true, nil) + if err != nil { + return nil, err + } + return children, nil +} + +// getChildren returns a list of imageIDs that depend on the image. If all is +// false, only the first child image is returned. If tree is nil, it will be +// created for this invocation only. +func (i *Image) getChildren(ctx context.Context, all bool, tree *layerTree) ([]*Image, error) { + if tree == nil { + t, err := i.runtime.layerTree() + if err != nil { + return nil, err + } + tree = t + } + return tree.children(ctx, i, all) +} + +// Containers returns a list of containers using the image. +func (i *Image) Containers() ([]string, error) { + var containerIDs []string + containers, err := i.runtime.store.Containers() + if err != nil { + return nil, err + } + imageID := i.ID() + for i := range containers { + if containers[i].ImageID == imageID { + containerIDs = append(containerIDs, containers[i].ID) + } + } + return containerIDs, nil +} + +// removeContainers removes all containers using the image. +func (i *Image) removeContainers(options *RemoveImagesOptions) error { + if !options.Force && !options.ExternalContainers { + // Nothing to do. + return nil + } + + if options.Force && options.RemoveContainerFunc != nil { + logrus.Debugf("Removing containers of image %s with custom removal function", i.ID()) + if err := options.RemoveContainerFunc(i.ID()); err != nil { + return err + } + } + + containers, err := i.Containers() + if err != nil { + return err + } + + if !options.Force && options.ExternalContainers { + // All containers must be external ones. + for _, cID := range containers { + isExternal, err := options.IsExternalContainerFunc(cID) + if err != nil { + return fmt.Errorf("checking if %s is an external container: %w", cID, err) + } + if !isExternal { + return fmt.Errorf("cannot remove container %s: not an external container", cID) + } + } + } + + logrus.Debugf("Removing containers of image %s from the local containers storage", i.ID()) + var multiE error + for _, cID := range containers { + if err := i.runtime.store.DeleteContainer(cID); err != nil { + // If the container does not exist anymore, we're good. + if errors.Cause(err) != storage.ErrContainerUnknown { + multiE = multierror.Append(multiE, err) + } + } + } + + return multiE +} + +// RemoveContainerFunc allows for customizing the removal of containers using +// an image specified by imageID. +type RemoveContainerFunc func(imageID string) error + +// RemoveImagesReport is the assembled data from removing *one* image. +type RemoveImageReport struct { + // ID of the image. + ID string + // Image was removed. + Removed bool + // Size of the removed image. Only set when explicitly requested in + // RemoveImagesOptions. + Size int64 + // The untagged tags. + Untagged []string +} + +// remove removes the image along with all dangling parent images that no other +// image depends on. The image must not be set read-only and not be used by +// containers. Returns IDs of removed/untagged images in order. +// +// If the image is used by containers return storage.ErrImageUsedByContainer. +// Use force to remove these containers. +// +// NOTE: the rmMap is used to assemble image-removal data across multiple +// invocations of this function. The recursive nature requires some +// bookkeeping to make sure that all data is aggregated correctly. +// +// This function is internal. Users of libimage should always use +// `(*Runtime).RemoveImages()`. +func (i *Image) remove(ctx context.Context, rmMap map[string]*RemoveImageReport, referencedBy string, options *RemoveImagesOptions) ([]string, error) { + processedIDs := []string{} + return i.removeRecursive(ctx, rmMap, processedIDs, referencedBy, options) +} + +func (i *Image) removeRecursive(ctx context.Context, rmMap map[string]*RemoveImageReport, processedIDs []string, referencedBy string, options *RemoveImagesOptions) ([]string, error) { + // If referencedBy is empty, the image is considered to be removed via + // `image remove --all` which alters the logic below. + + // The removal logic below is complex. There is a number of rules + // inherited from Podman and Buildah (and Docker). This function + // should be the *only* place to extend the removal logic so we keep it + // sealed in one place. Make sure to add verbose comments to leave + // some breadcrumbs for future readers. + logrus.Debugf("Removing image %s", i.ID()) + + if i.IsReadOnly() { + return processedIDs, errors.Errorf("cannot remove read-only image %q", i.ID()) + } + + if i.runtime.eventChannel != nil { + defer i.runtime.writeEvent(&Event{ID: i.ID(), Name: referencedBy, Time: time.Now(), Type: EventTypeImageRemove}) + } + + // Check if already visisted this image. + report, exists := rmMap[i.ID()] + if exists { + // If the image has already been removed, we're done. + if report.Removed { + return processedIDs, nil + } + } else { + report = &RemoveImageReport{ID: i.ID()} + rmMap[i.ID()] = report + } + + // The image may have already been (partially) removed, so we need to + // have a closer look at the errors. On top, image removal should be + // tolerant toward corrupted images. + handleError := func(err error) error { + switch errors.Cause(err) { + case storage.ErrImageUnknown, storage.ErrNotAnImage, storage.ErrLayerUnknown: + // The image or layers of the image may already + // have been removed in which case we consider + // the image to be removed. + return nil + default: + return err + } + } + + // Calculate the size if requested. `podman-image-prune` likes to + // report the regained size. + if options.WithSize { + size, err := i.Size() + if handleError(err) != nil { + return processedIDs, err + } + report.Size = size + } + + skipRemove := false + numNames := len(i.Names()) + + // NOTE: the `numNames == 1` check is not only a performance + // optimization but also preserves exiting Podman/Docker behaviour. + // If image "foo" is used by a container and has only this tag/name, + // an `rmi foo` will not untag "foo" but instead attempt to remove the + // entire image. If there's a container using "foo", we should get an + // error. + if referencedBy == "" || numNames == 1 { + // DO NOTHING, the image will be removed + } else { + byID := strings.HasPrefix(i.ID(), referencedBy) + byDigest := strings.HasPrefix(referencedBy, "sha256:") + if !options.Force { + if byID && numNames > 1 { + return processedIDs, errors.Errorf("unable to delete image %q by ID with more than one tag (%s): please force removal", i.ID(), i.Names()) + } else if byDigest && numNames > 1 { + // FIXME - Docker will remove the digest but containers storage + // does not support that yet, so our hands are tied. + return processedIDs, errors.Errorf("unable to delete image %q by digest with more than one tag (%s): please force removal", i.ID(), i.Names()) + } + } + + // Only try to untag if we know it's not an ID or digest. + if !byID && !byDigest { + if err := i.Untag(referencedBy); handleError(err) != nil { + return processedIDs, err + } + report.Untagged = append(report.Untagged, referencedBy) + + // If there's still tags left, we cannot delete it. + skipRemove = len(i.Names()) > 0 + } + } + + processedIDs = append(processedIDs, i.ID()) + if skipRemove { + return processedIDs, nil + } + + // Perform the container removal, if needed. + if err := i.removeContainers(options); err != nil { + return processedIDs, err + } + + // Podman/Docker compat: we only report an image as removed if it has + // no children. Otherwise, the data is effectively still present in the + // storage despite the image being removed. + hasChildren, err := i.HasChildren(ctx) + if err != nil { + // We must be tolerant toward corrupted images. + // See containers/podman commit fd9dd7065d44. + logrus.Warnf("Failed to determine if an image is a parent: %v, ignoring the error", err) + hasChildren = false + } + + // If there's a dangling parent that no other image depends on, remove + // it recursively. + parent, err := i.Parent(ctx) + if err != nil { + // We must be tolerant toward corrupted images. + // See containers/podman commit fd9dd7065d44. + logrus.Warnf("Failed to determine parent of image: %v, ignoring the error", err) + parent = nil + } + + if _, err := i.runtime.store.DeleteImage(i.ID(), true); handleError(err) != nil { + return processedIDs, err + } + report.Untagged = append(report.Untagged, i.Names()...) + + if !hasChildren { + report.Removed = true + } + + // Check if can remove the parent image. + if parent == nil { + return processedIDs, nil + } + + // Only remove the parent if it's dangling, that is being untagged and + // without children. + danglingParent, err := parent.IsDangling(ctx) + if err != nil { + // See Podman commit fd9dd7065d44: we need to + // be tolerant toward corrupted images. + logrus.Warnf("Failed to determine if an image is a parent: %v, ignoring the error", err) + danglingParent = false + } + if !danglingParent { + return processedIDs, nil + } + + // Recurse into removing the parent. + return parent.removeRecursive(ctx, rmMap, processedIDs, "", options) +} + +var errTagDigest = errors.New("tag by digest not supported") + +// Tag the image with the specified name and store it in the local containers +// storage. The name is normalized according to the rules of NormalizeName. +func (i *Image) Tag(name string) error { + if strings.HasPrefix(name, "sha256:") { // ambiguous input + return errors.Wrap(errTagDigest, name) + } + + ref, err := NormalizeName(name) + if err != nil { + return errors.Wrapf(err, "normalizing name %q", name) + } + + if _, isDigested := ref.(reference.Digested); isDigested { + return errors.Wrap(errTagDigest, name) + } + + logrus.Debugf("Tagging image %s with %q", i.ID(), ref.String()) + if i.runtime.eventChannel != nil { + defer i.runtime.writeEvent(&Event{ID: i.ID(), Name: name, Time: time.Now(), Type: EventTypeImageTag}) + } + + newNames := append(i.Names(), ref.String()) + if err := i.runtime.store.SetNames(i.ID(), newNames); err != nil { + return err + } + + return i.reload() +} + +// to have some symmetry with the errors from containers/storage. +var errTagUnknown = errors.New("tag not known") + +// TODO (@vrothberg) - `docker rmi sha256:` will remove the digest from the +// image. However, that's something containers storage does not support. +var errUntagDigest = errors.New("untag by digest not supported") + +// Untag the image with the specified name and make the change persistent in +// the local containers storage. The name is normalized according to the rules +// of NormalizeName. +func (i *Image) Untag(name string) error { + if strings.HasPrefix(name, "sha256:") { // ambiguous input + return errors.Wrap(errUntagDigest, name) + } + + ref, err := NormalizeName(name) + if err != nil { + return errors.Wrapf(err, "normalizing name %q", name) + } + + // FIXME: this is breaking Podman CI but must be re-enabled once + // c/storage supports alterting the digests of an image. Then, + // Podman will do the right thing. + // + // !!! Also make sure to re-enable the tests !!! + // + // if _, isDigested := ref.(reference.Digested); isDigested { + // return errors.Wrap(errUntagDigest, name) + // } + + name = ref.String() + + logrus.Debugf("Untagging %q from image %s", ref.String(), i.ID()) + if i.runtime.eventChannel != nil { + defer i.runtime.writeEvent(&Event{ID: i.ID(), Name: name, Time: time.Now(), Type: EventTypeImageUntag}) + } + + removedName := false + newNames := []string{} + for _, n := range i.Names() { + if n == name { + removedName = true + continue + } + newNames = append(newNames, n) + } + + if !removedName { + return errors.Wrap(errTagUnknown, name) + } + + if err := i.runtime.store.SetNames(i.ID(), newNames); err != nil { + return err + } + + return i.reload() +} + +// RepoTags returns a string slice of repotags associated with the image. +func (i *Image) RepoTags() ([]string, error) { + namedTagged, err := i.NamedTaggedRepoTags() + if err != nil { + return nil, err + } + repoTags := make([]string, len(namedTagged)) + for i := range namedTagged { + repoTags[i] = namedTagged[i].String() + } + return repoTags, nil +} + +// NamedTaggedRepoTags returns the repotags associated with the image as a +// slice of reference.NamedTagged. +func (i *Image) NamedTaggedRepoTags() ([]reference.NamedTagged, error) { + var repoTags []reference.NamedTagged + for _, name := range i.Names() { + parsed, err := reference.Parse(name) + if err != nil { + return nil, err + } + named, isNamed := parsed.(reference.Named) + if !isNamed { + continue + } + tagged, isTagged := named.(reference.NamedTagged) + if !isTagged { + continue + } + repoTags = append(repoTags, tagged) + } + return repoTags, nil +} + +// NamedRepoTags returns the repotags associated with the image as a +// slice of reference.Named. +func (i *Image) NamedRepoTags() ([]reference.Named, error) { + var repoTags []reference.Named + for _, name := range i.Names() { + parsed, err := reference.Parse(name) + if err != nil { + return nil, err + } + if named, isNamed := parsed.(reference.Named); isNamed { + repoTags = append(repoTags, named) + } + } + return repoTags, nil +} + +// inRepoTags looks for the specified name/tag pair in the image's repo tags. +func (i *Image) inRepoTags(namedTagged reference.NamedTagged) (reference.Named, error) { + repoTags, err := i.NamedRepoTags() + if err != nil { + return nil, err + } + + pairs, err := ToNameTagPairs(repoTags) + if err != nil { + return nil, err + } + + name := namedTagged.Name() + tag := namedTagged.Tag() + for _, pair := range pairs { + if tag != pair.Tag { + continue + } + if !strings.HasSuffix(pair.Name, name) { + continue + } + if len(pair.Name) == len(name) { // full match + return pair.named, nil + } + if pair.Name[len(pair.Name)-len(name)-1] == '/' { // matches at repo + return pair.named, nil + } + } + + return nil, nil +} + +// RepoDigests returns a string array of repodigests associated with the image. +func (i *Image) RepoDigests() ([]string, error) { + repoDigests := []string{} + added := make(map[string]struct{}) + + for _, name := range i.Names() { + for _, imageDigest := range append(i.Digests(), i.Digest()) { + if imageDigest == "" { + continue + } + + named, err := reference.ParseNormalizedNamed(name) + if err != nil { + return nil, err + } + + canonical, err := reference.WithDigest(reference.TrimNamed(named), imageDigest) + if err != nil { + return nil, err + } + + if _, alreadyInList := added[canonical.String()]; !alreadyInList { + repoDigests = append(repoDigests, canonical.String()) + added[canonical.String()] = struct{}{} + } + } + } + sort.Strings(repoDigests) + return repoDigests, nil +} + +// Mount the image with the specified mount options and label, both of which +// are directly passed down to the containers storage. Returns the fully +// evaluated path to the mount point. +func (i *Image) Mount(ctx context.Context, mountOptions []string, mountLabel string) (string, error) { + if i.runtime.eventChannel != nil { + defer i.runtime.writeEvent(&Event{ID: i.ID(), Name: "", Time: time.Now(), Type: EventTypeImageMount}) + } + + mountPoint, err := i.runtime.store.MountImage(i.ID(), mountOptions, mountLabel) + if err != nil { + return "", err + } + mountPoint, err = filepath.EvalSymlinks(mountPoint) + if err != nil { + return "", err + } + logrus.Debugf("Mounted image %s at %q", i.ID(), mountPoint) + return mountPoint, nil +} + +// Mountpoint returns the path to image's mount point. The path is empty if +// the image is not mounted. +func (i *Image) Mountpoint() (string, error) { + mountedTimes, err := i.runtime.store.Mounted(i.TopLayer()) + if err != nil || mountedTimes == 0 { + if errors.Cause(err) == storage.ErrLayerUnknown { + // Can happen, Podman did it, but there's no + // explanation why. + err = nil + } + return "", err + } + + layer, err := i.runtime.store.Layer(i.TopLayer()) + if err != nil { + return "", err + } + + mountPoint, err := filepath.EvalSymlinks(layer.MountPoint) + if err != nil { + return "", err + } + + return mountPoint, nil +} + +// Unmount the image. Use force to ignore the reference counter and forcefully +// unmount. +func (i *Image) Unmount(force bool) error { + if i.runtime.eventChannel != nil { + defer i.runtime.writeEvent(&Event{ID: i.ID(), Name: "", Time: time.Now(), Type: EventTypeImageUnmount}) + } + logrus.Debugf("Unmounted image %s", i.ID()) + _, err := i.runtime.store.UnmountImage(i.ID(), force) + return err +} + +// Size computes the size of the image layers and associated data. +func (i *Image) Size() (int64, error) { + return i.runtime.store.ImageSize(i.ID()) +} + +// HasDifferentDigestOptions allows for customizing the check if another +// (remote) image has a different digest. +type HasDifferentDigestOptions struct { + // containers-auth.json(5) file to use when authenticating against + // container registries. + AuthFilePath string +} + +// HasDifferentDigest returns true if the image specified by `remoteRef` has a +// different digest than the local one. This check can be useful to check for +// updates on remote registries. +func (i *Image) HasDifferentDigest(ctx context.Context, remoteRef types.ImageReference, options *HasDifferentDigestOptions) (bool, error) { + // We need to account for the arch that the image uses. It seems + // common on ARM to tweak this option to pull the correct image. See + // github.com/containers/podman/issues/6613. + inspectInfo, err := i.inspectInfo(ctx) + if err != nil { + return false, err + } + + sys := i.runtime.systemContextCopy() + sys.ArchitectureChoice = inspectInfo.Architecture + // OS and variant may not be set, so let's check to avoid accidental + // overrides of the runtime settings. + if inspectInfo.Os != "" { + sys.OSChoice = inspectInfo.Os + } + if inspectInfo.Variant != "" { + sys.VariantChoice = inspectInfo.Variant + } + + if options != nil && options.AuthFilePath != "" { + sys.AuthFilePath = options.AuthFilePath + } + + return i.hasDifferentDigestWithSystemContext(ctx, remoteRef, sys) +} + +func (i *Image) hasDifferentDigestWithSystemContext(ctx context.Context, remoteRef types.ImageReference, sys *types.SystemContext) (bool, error) { + remoteImg, err := remoteRef.NewImage(ctx, sys) + if err != nil { + return false, err + } + + rawManifest, rawManifestMIMEType, err := remoteImg.Manifest(ctx) + if err != nil { + return false, err + } + + // If the remote ref's manifest is a list, try to zero in on the image + // in the list that we would eventually try to pull. + var remoteDigest digest.Digest + if manifest.MIMETypeIsMultiImage(rawManifestMIMEType) { + list, err := manifest.ListFromBlob(rawManifest, rawManifestMIMEType) + if err != nil { + return false, err + } + remoteDigest, err = list.ChooseInstance(sys) + if err != nil { + return false, err + } + } else { + remoteDigest, err = manifest.Digest(rawManifest) + if err != nil { + return false, err + } + } + + // Check if we already have that image's manifest in this image. A + // single image can have multiple manifests that describe the same + // config blob and layers, so treat any match as a successful match. + for _, digest := range append(i.Digests(), i.Digest()) { + if digest.Validate() != nil { + continue + } + if digest.String() == remoteDigest.String() { + return false, nil + } + } + // No matching digest found in the local image. + return true, nil +} + +// driverData gets the driver data from the store on a layer +func (i *Image) driverData() (*DriverData, error) { + store := i.runtime.store + layerID := i.TopLayer() + driver, err := store.GraphDriver() + if err != nil { + return nil, err + } + metaData, err := driver.Metadata(layerID) + if err != nil { + return nil, err + } + if mountTimes, err := store.Mounted(layerID); mountTimes == 0 || err != nil { + delete(metaData, "MergedDir") + } + return &DriverData{ + Name: driver.String(), + Data: metaData, + }, nil +} + +// StorageReference returns the image's reference to the containers storage +// using the image ID. +func (i *Image) StorageReference() (types.ImageReference, error) { + if i.storageReference != nil { + return i.storageReference, nil + } + ref, err := storageTransport.Transport.ParseStoreReference(i.runtime.store, "@"+i.ID()) + if err != nil { + return nil, err + } + i.storageReference = ref + return ref, nil +} + +// source returns the possibly cached image reference. +func (i *Image) source(ctx context.Context) (types.ImageSource, error) { + if i.cached.imageSource != nil { + return i.cached.imageSource, nil + } + ref, err := i.StorageReference() + if err != nil { + return nil, err + } + src, err := ref.NewImageSource(ctx, i.runtime.systemContextCopy()) + if err != nil { + return nil, err + } + i.cached.imageSource = src + return src, nil +} + +// rawConfigBlob returns the image's config as a raw byte slice. Users need to +// unmarshal it to the corresponding type (OCI, Docker v2s{1,2}) +func (i *Image) rawConfigBlob(ctx context.Context) ([]byte, error) { + ref, err := i.StorageReference() + if err != nil { + return nil, err + } + + imageCloser, err := ref.NewImage(ctx, i.runtime.systemContextCopy()) + if err != nil { + return nil, err + } + defer imageCloser.Close() + + return imageCloser.ConfigBlob(ctx) +} + +// Manifest returns the raw data and the MIME type of the image's manifest. +func (i *Image) Manifest(ctx context.Context) (rawManifest []byte, mimeType string, err error) { + src, err := i.source(ctx) + if err != nil { + return nil, "", err + } + return src.GetManifest(ctx, nil) +} + +// getImageID creates an image object and uses the hex value of the config +// blob's digest (if it has one) as the image ID for parsing the store reference +func getImageID(ctx context.Context, src types.ImageReference, sys *types.SystemContext) (string, error) { + newImg, err := src.NewImage(ctx, sys) + if err != nil { + return "", err + } + defer func() { + if err := newImg.Close(); err != nil { + logrus.Errorf("Failed to close image: %q", err) + } + }() + imageDigest := newImg.ConfigInfo().Digest + if err = imageDigest.Validate(); err != nil { + return "", errors.Wrapf(err, "getting config info") + } + return "@" + imageDigest.Encoded(), nil +} diff --git a/vendor/github.com/containers/common/libimage/image_config.go b/vendor/github.com/containers/common/libimage/image_config.go new file mode 100644 index 00000000000..14020244050 --- /dev/null +++ b/vendor/github.com/containers/common/libimage/image_config.go @@ -0,0 +1,241 @@ +package libimage + +import ( + "fmt" + "path/filepath" + "strconv" + "strings" + + "github.com/containers/common/pkg/signal" + ociv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +// ImageConfig is a wrapper around the OCIv1 Image Configuration struct exported +// by containers/image, but containing additional fields that are not supported +// by OCIv1 (but are by Docker v2) - notably OnBuild. +type ImageConfig struct { + ociv1.ImageConfig + OnBuild []string +} + +// ImageConfigFromChanges produces a v1.ImageConfig from the --change flag that +// is accepted by several Podman commands. It accepts a (limited subset) of +// Dockerfile instructions. +// Valid changes are: +// * USER +// * EXPOSE +// * ENV +// * ENTRYPOINT +// * CMD +// * VOLUME +// * WORKDIR +// * LABEL +// * STOPSIGNAL +// * ONBUILD +func ImageConfigFromChanges(changes []string) (*ImageConfig, error) { // nolint:gocyclo + config := &ImageConfig{} + + for _, change := range changes { + // First, let's assume proper Dockerfile format - space + // separator between instruction and value + split := strings.SplitN(change, " ", 2) + + if len(split) != 2 { + split = strings.SplitN(change, "=", 2) + if len(split) != 2 { + return nil, errors.Errorf("invalid change %q - must be formatted as KEY VALUE", change) + } + } + + outerKey := strings.ToUpper(strings.TrimSpace(split[0])) + value := strings.TrimSpace(split[1]) + switch outerKey { + case "USER": + // Assume literal contents are the user. + if value == "" { + return nil, errors.Errorf("invalid change %q - must provide a value to USER", change) + } + config.User = value + case "EXPOSE": + // EXPOSE is either [portnum] or + // [portnum]/[proto] + // Protocol must be "tcp" or "udp" + splitPort := strings.Split(value, "/") + if len(splitPort) > 2 { + return nil, errors.Errorf("invalid change %q - EXPOSE port must be formatted as PORT[/PROTO]", change) + } + portNum, err := strconv.Atoi(splitPort[0]) + if err != nil { + return nil, errors.Wrapf(err, "invalid change %q - EXPOSE port must be an integer", change) + } + if portNum > 65535 || portNum <= 0 { + return nil, errors.Errorf("invalid change %q - EXPOSE port must be a valid port number", change) + } + proto := "tcp" + if len(splitPort) > 1 { + testProto := strings.ToLower(splitPort[1]) + switch testProto { + case "tcp", "udp": + proto = testProto + default: + return nil, errors.Errorf("invalid change %q - EXPOSE protocol must be TCP or UDP", change) + } + } + if config.ExposedPorts == nil { + config.ExposedPorts = make(map[string]struct{}) + } + config.ExposedPorts[fmt.Sprintf("%d/%s", portNum, proto)] = struct{}{} + case "ENV": + // Format is either: + // ENV key=value + // ENV key=value key=value ... + // ENV key value + // Both keys and values can be surrounded by quotes to group them. + // For now: we only support key=value + // We will attempt to strip quotation marks if present. + + var ( + key, val string + ) + + splitEnv := strings.SplitN(value, "=", 2) + key = splitEnv[0] + // We do need a key + if key == "" { + return nil, errors.Errorf("invalid change %q - ENV must have at least one argument", change) + } + // Perfectly valid to not have a value + if len(splitEnv) == 2 { + val = splitEnv[1] + } + + if strings.HasPrefix(key, `"`) && strings.HasSuffix(key, `"`) { + key = strings.TrimPrefix(strings.TrimSuffix(key, `"`), `"`) + } + if strings.HasPrefix(val, `"`) && strings.HasSuffix(val, `"`) { + val = strings.TrimPrefix(strings.TrimSuffix(val, `"`), `"`) + } + config.Env = append(config.Env, fmt.Sprintf("%s=%s", key, val)) + case "ENTRYPOINT": + // Two valid forms. + // First, JSON array. + // Second, not a JSON array - we interpret this as an + // argument to `sh -c`, unless empty, in which case we + // just use a blank entrypoint. + testUnmarshal := []string{} + if err := json.Unmarshal([]byte(value), &testUnmarshal); err != nil { + // It ain't valid JSON, so assume it's an + // argument to sh -c if not empty. + if value != "" { + config.Entrypoint = []string{"/bin/sh", "-c", value} + } else { + config.Entrypoint = []string{} + } + } else { + // Valid JSON + config.Entrypoint = testUnmarshal + } + case "CMD": + // Same valid forms as entrypoint. + // However, where ENTRYPOINT assumes that 'ENTRYPOINT ' + // means no entrypoint, CMD assumes it is 'sh -c' with + // no third argument. + testUnmarshal := []string{} + if err := json.Unmarshal([]byte(value), &testUnmarshal); err != nil { + // It ain't valid JSON, so assume it's an + // argument to sh -c. + // Only include volume if it's not "" + config.Cmd = []string{"/bin/sh", "-c"} + if value != "" { + config.Cmd = append(config.Cmd, value) + } + } else { + // Valid JSON + config.Cmd = testUnmarshal + } + case "VOLUME": + // Either a JSON array or a set of space-separated + // paths. + // Acts rather similar to ENTRYPOINT and CMD, but always + // appends rather than replacing, and no sh -c prepend. + testUnmarshal := []string{} + if err := json.Unmarshal([]byte(value), &testUnmarshal); err != nil { + // Not valid JSON, so split on spaces + testUnmarshal = strings.Split(value, " ") + } + if len(testUnmarshal) == 0 { + return nil, errors.Errorf("invalid change %q - must provide at least one argument to VOLUME", change) + } + for _, vol := range testUnmarshal { + if vol == "" { + return nil, errors.Errorf("invalid change %q - VOLUME paths must not be empty", change) + } + if config.Volumes == nil { + config.Volumes = make(map[string]struct{}) + } + config.Volumes[vol] = struct{}{} + } + case "WORKDIR": + // This can be passed multiple times. + // Each successive invocation is treated as relative to + // the previous one - so WORKDIR /A, WORKDIR b, + // WORKDIR c results in /A/b/c + // Just need to check it's not empty... + if value == "" { + return nil, errors.Errorf("invalid change %q - must provide a non-empty WORKDIR", change) + } + config.WorkingDir = filepath.Join(config.WorkingDir, value) + case "LABEL": + // Same general idea as ENV, but we no longer allow " " + // as a separator. + // We didn't do that for ENV either, so nice and easy. + // Potentially problematic: LABEL might theoretically + // allow an = in the key? If people really do this, we + // may need to investigate more advanced parsing. + var ( + key, val string + ) + + splitLabel := strings.SplitN(value, "=", 2) + // Unlike ENV, LABEL must have a value + if len(splitLabel) != 2 { + return nil, errors.Errorf("invalid change %q - LABEL must be formatted key=value", change) + } + key = splitLabel[0] + val = splitLabel[1] + + if strings.HasPrefix(key, `"`) && strings.HasSuffix(key, `"`) { + key = strings.TrimPrefix(strings.TrimSuffix(key, `"`), `"`) + } + if strings.HasPrefix(val, `"`) && strings.HasSuffix(val, `"`) { + val = strings.TrimPrefix(strings.TrimSuffix(val, `"`), `"`) + } + // Check key after we strip quotations + if key == "" { + return nil, errors.Errorf("invalid change %q - LABEL must have a non-empty key", change) + } + if config.Labels == nil { + config.Labels = make(map[string]string) + } + config.Labels[key] = val + case "STOPSIGNAL": + // Check the provided signal for validity. + killSignal, err := signal.ParseSignal(value) + if err != nil { + return nil, errors.Wrapf(err, "invalid change %q - KILLSIGNAL must be given a valid signal", change) + } + config.StopSignal = fmt.Sprintf("%d", killSignal) + case "ONBUILD": + // Onbuild always appends. + if value == "" { + return nil, errors.Errorf("invalid change %q - ONBUILD must be given an argument", change) + } + config.OnBuild = append(config.OnBuild, value) + default: + return nil, errors.Errorf("invalid change %q - invalid instruction %s", change, outerKey) + } + } + + return config, nil +} diff --git a/vendor/github.com/containers/common/libimage/image_tree.go b/vendor/github.com/containers/common/libimage/image_tree.go new file mode 100644 index 00000000000..d48aeeada34 --- /dev/null +++ b/vendor/github.com/containers/common/libimage/image_tree.go @@ -0,0 +1,117 @@ +package libimage + +import ( + "fmt" + "strings" + + "github.com/disiqueira/gotree/v3" + "github.com/docker/go-units" +) + +// Tree generates a tree for the specified image and its layers. Use +// `traverseChildren` to traverse the layers of all children. By default, only +// layers of the image are printed. +func (i *Image) Tree(traverseChildren bool) (string, error) { + // NOTE: a string builder prevents us from copying to much data around + // and compile the string when and where needed. + sb := &strings.Builder{} + + // First print the pretty header for the target image. + size, err := i.Size() + if err != nil { + return "", err + } + repoTags, err := i.RepoTags() + if err != nil { + return "", err + } + + fmt.Fprintf(sb, "Image ID: %s\n", i.ID()[:12]) + fmt.Fprintf(sb, "Tags: %s\n", repoTags) + fmt.Fprintf(sb, "Size: %v\n", units.HumanSizeWithPrecision(float64(size), 4)) + if i.TopLayer() != "" { + fmt.Fprintf(sb, "Image Layers") + } else { + fmt.Fprintf(sb, "No Image Layers") + } + + layerTree, err := i.runtime.layerTree() + if err != nil { + return "", err + } + imageNode := layerTree.node(i.TopLayer()) + + // Traverse the entire tree down to all children. + if traverseChildren { + tree := gotree.New(sb.String()) + if err := imageTreeTraverseChildren(imageNode, tree); err != nil { + return "", err + } + return tree.Print(), nil + } + + // Walk all layers of the image and assemlbe their data. Note that the + // tree is constructed in reverse order to remain backwards compatible + // with Podman. + contents := []string{} + for parentNode := imageNode; parentNode != nil; parentNode = parentNode.parent { + if parentNode.layer == nil { + break // we're done + } + var tags string + repoTags, err := parentNode.repoTags() + if err != nil { + return "", err + } + if len(repoTags) > 0 { + tags = fmt.Sprintf(" Top Layer of: %s", repoTags) + } + content := fmt.Sprintf("ID: %s Size: %7v%s", parentNode.layer.ID[:12], units.HumanSizeWithPrecision(float64(parentNode.layer.UncompressedSize), 4), tags) + contents = append(contents, content) + } + contents = append(contents, sb.String()) + + tree := gotree.New(contents[len(contents)-1]) + for i := len(contents) - 2; i >= 0; i-- { + tree.Add(contents[i]) + } + + return tree.Print(), nil +} + +func imageTreeTraverseChildren(node *layerNode, parent gotree.Tree) error { + if node.layer == nil { + return nil + } + + var tags string + repoTags, err := node.repoTags() + if err != nil { + return err + } + if len(repoTags) > 0 { + tags = fmt.Sprintf(" Top Layer of: %s", repoTags) + } + + content := fmt.Sprintf("ID: %s Size: %7v%s", node.layer.ID[:12], units.HumanSizeWithPrecision(float64(node.layer.UncompressedSize), 4), tags) + + var newTree gotree.Tree + if node.parent == nil || len(node.parent.children) <= 1 { + // No parent or no siblings, so we can go linear. + parent.Add(content) + newTree = parent + } else { + // Each siblings gets a new tree, so we can branch. + newTree = gotree.New(content) + parent.AddTree(newTree) + } + + for i := range node.children { + child := node.children[i] + if err := imageTreeTraverseChildren(child, newTree); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/containers/common/libimage/import.go b/vendor/github.com/containers/common/libimage/import.go new file mode 100644 index 00000000000..67ab654b2ca --- /dev/null +++ b/vendor/github.com/containers/common/libimage/import.go @@ -0,0 +1,127 @@ +package libimage + +import ( + "context" + "fmt" + "net/url" + "os" + + "github.com/containers/common/pkg/download" + storageTransport "github.com/containers/image/v5/storage" + tarballTransport "github.com/containers/image/v5/tarball" + v1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// ImportOptions allow for customizing image imports. +type ImportOptions struct { + CopyOptions + + // Apply the specified changes to the created image. Please refer to + // `ImageConfigFromChanges` for supported change instructions. + Changes []string + // Set the commit message as a comment to created image's history. + CommitMessage string + // Tag the imported image with this value. + Tag string + // Overwrite OS of imported image. + OS string + // Overwrite Arch of imported image. + Arch string +} + +// Import imports a custom tarball at the specified path. Returns the name of +// the imported image. +func (r *Runtime) Import(ctx context.Context, path string, options *ImportOptions) (string, error) { + logrus.Debugf("Importing image from %q", path) + + if options == nil { + options = &ImportOptions{} + } + + ic := v1.ImageConfig{} + if len(options.Changes) > 0 { + config, err := ImageConfigFromChanges(options.Changes) + if err != nil { + return "", err + } + ic = config.ImageConfig + } + + hist := []v1.History{ + {Comment: options.CommitMessage}, + } + + config := v1.Image{ + Config: ic, + History: hist, + OS: options.OS, + Architecture: options.Arch, + } + + u, err := url.ParseRequestURI(path) + if err == nil && u.Scheme != "" { + // If source is a URL, download the file. + fmt.Printf("Downloading from %q\n", path) + file, err := download.FromURL(r.systemContext.BigFilesTemporaryDir, path) + if err != nil { + return "", err + } + defer os.Remove(file) + path = file + } else if path == "-" { + // "-" special cases stdin + path = os.Stdin.Name() + } + + srcRef, err := tarballTransport.Transport.ParseReference(path) + if err != nil { + return "", err + } + + updater, ok := srcRef.(tarballTransport.ConfigUpdater) + if !ok { + return "", errors.New("unexpected type, a tarball reference should implement tarball.ConfigUpdater") + } + annotations := make(map[string]string) + if err := updater.ConfigUpdate(config, annotations); err != nil { + return "", err + } + + id, err := getImageID(ctx, srcRef, r.systemContextCopy()) + if err != nil { + return "", err + } + + destRef, err := storageTransport.Transport.ParseStoreReference(r.store, id) + if err != nil { + return "", err + } + + c, err := r.newCopier(&options.CopyOptions) + if err != nil { + return "", err + } + defer c.close() + + if _, err := c.copy(ctx, srcRef, destRef); err != nil { + return "", err + } + + // Strip the leading @ off the id. + name := id[1:] + + // If requested, tag the imported image. + if options.Tag != "" { + image, _, err := r.LookupImage(name, nil) + if err != nil { + return "", errors.Wrap(err, "looking up imported image") + } + if err := image.Tag(options.Tag); err != nil { + return "", err + } + } + + return "sha256:" + name, nil +} diff --git a/vendor/github.com/containers/common/libimage/inspect.go b/vendor/github.com/containers/common/libimage/inspect.go new file mode 100644 index 00000000000..d44ebf46ef9 --- /dev/null +++ b/vendor/github.com/containers/common/libimage/inspect.go @@ -0,0 +1,233 @@ +package libimage + +import ( + "context" + "time" + + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + ociv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/sirupsen/logrus" +) + +// ImageData contains the inspected data of an image. +type ImageData struct { + ID string `json:"Id"` + Digest digest.Digest `json:"Digest"` + RepoTags []string `json:"RepoTags"` + RepoDigests []string `json:"RepoDigests"` + Parent string `json:"Parent"` + Comment string `json:"Comment"` + Created *time.Time `json:"Created"` + Config *ociv1.ImageConfig `json:"Config"` + Version string `json:"Version"` + Author string `json:"Author"` + Architecture string `json:"Architecture"` + Os string `json:"Os"` + Size int64 `json:"Size"` + VirtualSize int64 `json:"VirtualSize"` + GraphDriver *DriverData `json:"GraphDriver"` + RootFS *RootFS `json:"RootFS"` + Labels map[string]string `json:"Labels"` + Annotations map[string]string `json:"Annotations"` + ManifestType string `json:"ManifestType"` + User string `json:"User"` + History []ociv1.History `json:"History"` + NamesHistory []string `json:"NamesHistory"` + HealthCheck *manifest.Schema2HealthConfig `json:"Healthcheck,omitempty"` +} + +// DriverData includes data on the storage driver of the image. +type DriverData struct { + Name string `json:"Name"` + Data map[string]string `json:"Data"` +} + +// RootFS includes data on the root filesystem of the image. +type RootFS struct { + Type string `json:"Type"` + Layers []digest.Digest `json:"Layers"` +} + +// InspectOptions allow for customizing inspecting images. +type InspectOptions struct { + // Compute the size of the image (expensive). + WithSize bool + // Compute the parent of the image (expensive). + WithParent bool +} + +// Inspect inspects the image. +func (i *Image) Inspect(ctx context.Context, options *InspectOptions) (*ImageData, error) { + logrus.Debugf("Inspecting image %s", i.ID()) + + if options == nil { + options = &InspectOptions{} + } + + if i.cached.completeInspectData != nil { + if options.WithSize && i.cached.completeInspectData.Size == int64(-1) { + size, err := i.Size() + if err != nil { + return nil, err + } + i.cached.completeInspectData.Size = size + } + if options.WithParent && i.cached.completeInspectData.Parent == "" { + parentImage, err := i.Parent(ctx) + if err != nil { + return nil, err + } + if parentImage != nil { + i.cached.completeInspectData.Parent = parentImage.ID() + } + } + return i.cached.completeInspectData, nil + } + + // First assemble data that does not depend on the format of the image. + info, err := i.inspectInfo(ctx) + if err != nil { + return nil, err + } + ociImage, err := i.toOCI(ctx) + if err != nil { + return nil, err + } + + repoTags, err := i.RepoTags() + if err != nil { + return nil, err + } + repoDigests, err := i.RepoDigests() + if err != nil { + return nil, err + } + driverData, err := i.driverData() + if err != nil { + return nil, err + } + + size := int64(-1) + if options.WithSize { + size, err = i.Size() + if err != nil { + return nil, err + } + } + + data := &ImageData{ + ID: i.ID(), + RepoTags: repoTags, + RepoDigests: repoDigests, + Created: ociImage.Created, + Author: ociImage.Author, + Architecture: ociImage.Architecture, + Os: ociImage.OS, + Config: &ociImage.Config, + Version: info.DockerVersion, + Size: size, + VirtualSize: size, // TODO: they should be different (inherited from Podman) + Digest: i.Digest(), + Labels: info.Labels, + RootFS: &RootFS{ + Type: ociImage.RootFS.Type, + Layers: ociImage.RootFS.DiffIDs, + }, + GraphDriver: driverData, + User: ociImage.Config.User, + History: ociImage.History, + NamesHistory: i.NamesHistory(), + } + + if options.WithParent { + parentImage, err := i.Parent(ctx) + if err != nil { + return nil, err + } + if parentImage != nil { + data.Parent = parentImage.ID() + } + } + + // Determine the format of the image. How we determine certain data + // depends on the format (e.g., Docker v2s2, OCI v1). + src, err := i.source(ctx) + if err != nil { + return nil, err + } + manifestRaw, manifestType, err := src.GetManifest(ctx, nil) + if err != nil { + return nil, err + } + + data.ManifestType = manifestType + + switch manifestType { + // OCI image + case ociv1.MediaTypeImageManifest: + var ociManifest ociv1.Manifest + if err := json.Unmarshal(manifestRaw, &ociManifest); err != nil { + return nil, err + } + data.Annotations = ociManifest.Annotations + if len(ociImage.History) > 0 { + data.Comment = ociImage.History[0].Comment + } + + // Docker image + case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema2MediaType: + rawConfig, err := i.rawConfigBlob(ctx) + if err != nil { + return nil, err + } + var dockerManifest manifest.Schema2V1Image + if err := json.Unmarshal(rawConfig, &dockerManifest); err != nil { + return nil, err + } + data.Comment = dockerManifest.Comment + // NOTE: Health checks may be listed in the container config or + // the config. + data.HealthCheck = dockerManifest.ContainerConfig.Healthcheck + if data.HealthCheck == nil { + data.HealthCheck = dockerManifest.Config.Healthcheck + } + } + + if data.Annotations == nil { + // Podman compat + data.Annotations = make(map[string]string) + } + + i.cached.completeInspectData = data + + return data, nil +} + +// inspectInfo returns the image inspect info. +func (i *Image) inspectInfo(ctx context.Context) (*types.ImageInspectInfo, error) { + if i.cached.partialInspectData != nil { + return i.cached.partialInspectData, nil + } + + ref, err := i.StorageReference() + if err != nil { + + return nil, err + } + + img, err := ref.NewImage(ctx, i.runtime.systemContextCopy()) + if err != nil { + return nil, err + } + defer img.Close() + + data, err := img.Inspect(ctx) + if err != nil { + return nil, err + } + + i.cached.partialInspectData = data + return data, nil +} diff --git a/vendor/github.com/containers/common/libimage/layer_tree.go b/vendor/github.com/containers/common/libimage/layer_tree.go new file mode 100644 index 00000000000..05f21531b0a --- /dev/null +++ b/vendor/github.com/containers/common/libimage/layer_tree.go @@ -0,0 +1,299 @@ +package libimage + +import ( + "context" + + "github.com/containers/storage" + ociv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/sirupsen/logrus" +) + +// layerTree is an internal representation of local layers. +type layerTree struct { + // nodes is the actual layer tree with layer IDs being keys. + nodes map[string]*layerNode + // ociCache is a cache for Image.ID -> OCI Image. Translations are done + // on-demand. + ociCache map[string]*ociv1.Image + // emptyImages do not have any top-layer so we cannot create a + // *layerNode for them. + emptyImages []*Image +} + +// node returns a layerNode for the specified layerID. +func (t *layerTree) node(layerID string) *layerNode { + node, exists := t.nodes[layerID] + if !exists { + node = &layerNode{} + t.nodes[layerID] = node + } + return node +} + +// toOCI returns an OCI image for the specified image. +func (t *layerTree) toOCI(ctx context.Context, i *Image) (*ociv1.Image, error) { + var err error + oci, exists := t.ociCache[i.ID()] + if !exists { + oci, err = i.toOCI(ctx) + if err == nil { + t.ociCache[i.ID()] = oci + } + } + return oci, err +} + +// layerNode is a node in a layerTree. It's ID is the key in a layerTree. +type layerNode struct { + children []*layerNode + images []*Image + parent *layerNode + layer *storage.Layer +} + +// repoTags assemble all repo tags all of images of the layer node. +func (l *layerNode) repoTags() ([]string, error) { + orderedTags := []string{} + visitedTags := make(map[string]bool) + + for _, image := range l.images { + repoTags, err := image.RepoTags() + if err != nil { + return nil, err + } + for _, tag := range repoTags { + if _, visited := visitedTags[tag]; visited { + continue + } + visitedTags[tag] = true + orderedTags = append(orderedTags, tag) + } + } + + return orderedTags, nil +} + +// layerTree extracts a layerTree from the layers in the local storage and +// relates them to the specified images. +func (r *Runtime) layerTree() (*layerTree, error) { + layers, err := r.store.Layers() + if err != nil { + return nil, err + } + + images, err := r.ListImages(context.Background(), nil, nil) + if err != nil { + return nil, err + } + + tree := layerTree{ + nodes: make(map[string]*layerNode), + ociCache: make(map[string]*ociv1.Image), + } + + // First build a tree purely based on layer information. + for i := range layers { + node := tree.node(layers[i].ID) + node.layer = &layers[i] + if layers[i].Parent == "" { + continue + } + parent := tree.node(layers[i].Parent) + node.parent = parent + parent.children = append(parent.children, node) + } + + // Now assign the images to each (top) layer. + for i := range images { + img := images[i] // do not leak loop variable outside the scope + topLayer := img.TopLayer() + if topLayer == "" { + tree.emptyImages = append(tree.emptyImages, img) + continue + } + node, exists := tree.nodes[topLayer] + if !exists { + // Note: erroring out in this case has turned out having been a + // mistake. Users may not be able to recover, so we're now + // throwing a warning to guide them to resolve the issue and + // turn the errors non-fatal. + logrus.Warnf("Top layer %s of image %s not found in layer tree. The storage may be corrupted, consider running `podman system reset`.", topLayer, img.ID()) + continue + } + node.images = append(node.images, img) + } + + return &tree, nil +} + +// children returns the child images of parent. Child images are images with +// either the same top layer as parent or parent being the true parent layer. +// Furthermore, the history of the parent and child images must match with the +// parent having one history item less. If all is true, all images are +// returned. Otherwise, the first image is returned. Note that manifest lists +// do not have children. +func (t *layerTree) children(ctx context.Context, parent *Image, all bool) ([]*Image, error) { + if parent.TopLayer() == "" { + if isManifestList, _ := parent.IsManifestList(ctx); isManifestList { + return nil, nil + } + } + + parentID := parent.ID() + parentOCI, err := t.toOCI(ctx, parent) + if err != nil { + return nil, err + } + + // checkParent returns true if child and parent are in such a relation. + checkParent := func(child *Image) (bool, error) { + if parentID == child.ID() { + return false, nil + } + childOCI, err := t.toOCI(ctx, child) + if err != nil { + return false, err + } + // History check. + return areParentAndChild(parentOCI, childOCI), nil + } + + var children []*Image + + // Empty images are special in that they do not have any physical layer + // but yet can have a parent-child relation. Hence, compare the + // "parent" image to all other known empty images. + if parent.TopLayer() == "" { + for i := range t.emptyImages { + empty := t.emptyImages[i] + isParent, err := checkParent(empty) + if err != nil { + return nil, err + } + if isParent { + children = append(children, empty) + if !all { + break + } + } + } + return children, nil + } + + parentNode, exists := t.nodes[parent.TopLayer()] + if !exists { + // Note: erroring out in this case has turned out having been a + // mistake. Users may not be able to recover, so we're now + // throwing a warning to guide them to resolve the issue and + // turn the errors non-fatal. + logrus.Warnf("Layer %s not found in layer tree. The storage may be corrupted, consider running `podman system reset`.", parent.TopLayer()) + return children, nil + } + + // addChildrenFrom adds child images of parent to children. Returns + // true if any image is a child of parent. + addChildrenFromNode := func(node *layerNode) (bool, error) { + foundChildren := false + for i, childImage := range node.images { + isChild, err := checkParent(childImage) + if err != nil { + return foundChildren, err + } + if isChild { + foundChildren = true + children = append(children, node.images[i]) + if all { + return foundChildren, nil + } + } + } + return foundChildren, nil + } + + // First check images where parent's top layer is also the parent + // layer. + for _, childNode := range parentNode.children { + found, err := addChildrenFromNode(childNode) + if err != nil { + return nil, err + } + if found && all { + return children, nil + } + } + + // Now check images with the same top layer. + if _, err := addChildrenFromNode(parentNode); err != nil { + return nil, err + } + + return children, nil +} + +// parent returns the parent image or nil if no parent image could be found. +// Note that manifest lists do not have parents. +func (t *layerTree) parent(ctx context.Context, child *Image) (*Image, error) { + if child.TopLayer() == "" { + if isManifestList, _ := child.IsManifestList(ctx); isManifestList { + return nil, nil + } + } + + childID := child.ID() + childOCI, err := t.toOCI(ctx, child) + if err != nil { + return nil, err + } + + // Empty images are special in that they do not have any physical layer + // but yet can have a parent-child relation. Hence, compare the + // "child" image to all other known empty images. + if child.TopLayer() == "" { + for _, empty := range t.emptyImages { + if childID == empty.ID() { + continue + } + emptyOCI, err := t.toOCI(ctx, empty) + if err != nil { + return nil, err + } + // History check. + if areParentAndChild(emptyOCI, childOCI) { + return empty, nil + } + } + return nil, nil + } + + node, exists := t.nodes[child.TopLayer()] + if !exists { + // Note: erroring out in this case has turned out having been a + // mistake. Users may not be able to recover, so we're now + // throwing a warning to guide them to resolve the issue and + // turn the errors non-fatal. + logrus.Warnf("Layer %s not found in layer tree. The storage may be corrupted, consider running `podman system reset`.", child.TopLayer()) + return nil, nil + } + + // Check images from the parent node (i.e., parent layer) and images + // with the same layer (i.e., same top layer). + images := node.images + if node.parent != nil { + images = append(images, node.parent.images...) + } + for _, parent := range images { + if parent.ID() == childID { + continue + } + parentOCI, err := t.toOCI(ctx, parent) + if err != nil { + return nil, err + } + // History check. + if areParentAndChild(parentOCI, childOCI) { + return parent, nil + } + } + + return nil, nil +} diff --git a/vendor/github.com/containers/common/libimage/load.go b/vendor/github.com/containers/common/libimage/load.go new file mode 100644 index 00000000000..4dfac710654 --- /dev/null +++ b/vendor/github.com/containers/common/libimage/load.go @@ -0,0 +1,133 @@ +package libimage + +import ( + "context" + "fmt" + "os" + "time" + + dirTransport "github.com/containers/image/v5/directory" + dockerArchiveTransport "github.com/containers/image/v5/docker/archive" + ociArchiveTransport "github.com/containers/image/v5/oci/archive" + ociTransport "github.com/containers/image/v5/oci/layout" + "github.com/containers/image/v5/types" + "github.com/sirupsen/logrus" +) + +type LoadOptions struct { + CopyOptions +} + +// Load loads one or more images (depending on the transport) from the +// specified path. The path may point to an image the following transports: +// oci, oci-archive, dir, docker-archive. +func (r *Runtime) Load(ctx context.Context, path string, options *LoadOptions) ([]string, error) { + logrus.Debugf("Loading image from %q", path) + + if r.eventChannel != nil { + defer r.writeEvent(&Event{ID: "", Name: path, Time: time.Now(), Type: EventTypeImageLoad}) + } + + if options == nil { + options = &LoadOptions{} + } + + var loadErrors []error + + for _, f := range []func() ([]string, string, error){ + // OCI + func() ([]string, string, error) { + logrus.Debugf("-> Attempting to load %q as an OCI directory", path) + ref, err := ociTransport.NewReference(path, "") + if err != nil { + return nil, ociTransport.Transport.Name(), err + } + images, err := r.copyFromDefault(ctx, ref, &options.CopyOptions) + return images, ociTransport.Transport.Name(), err + }, + + // OCI-ARCHIVE + func() ([]string, string, error) { + logrus.Debugf("-> Attempting to load %q as an OCI archive", path) + ref, err := ociArchiveTransport.NewReference(path, "") + if err != nil { + return nil, ociArchiveTransport.Transport.Name(), err + } + images, err := r.copyFromDefault(ctx, ref, &options.CopyOptions) + return images, ociArchiveTransport.Transport.Name(), err + }, + + // DOCKER-ARCHIVE + func() ([]string, string, error) { + logrus.Debugf("-> Attempting to load %q as a Docker archive", path) + ref, err := dockerArchiveTransport.ParseReference(path) + if err != nil { + return nil, dockerArchiveTransport.Transport.Name(), err + } + images, err := r.loadMultiImageDockerArchive(ctx, ref, &options.CopyOptions) + return images, dockerArchiveTransport.Transport.Name(), err + }, + + // DIR + func() ([]string, string, error) { + logrus.Debugf("-> Attempting to load %q as a Docker dir", path) + ref, err := dirTransport.NewReference(path) + if err != nil { + return nil, dirTransport.Transport.Name(), err + } + images, err := r.copyFromDefault(ctx, ref, &options.CopyOptions) + return images, dirTransport.Transport.Name(), err + }, + } { + loadedImages, transportName, err := f() + if err == nil { + return loadedImages, nil + } + logrus.Debugf("Error loading %s (%s): %v", path, transportName, err) + loadErrors = append(loadErrors, fmt.Errorf("%s: %v", transportName, err)) + } + + // Give a decent error message if nothing above worked. + loadError := fmt.Errorf("payload does not match any of the supported image formats:") + for _, err := range loadErrors { + loadError = fmt.Errorf("%v\n * %v", loadError, err) + } + + return nil, loadError +} + +// loadMultiImageDockerArchive loads the docker archive specified by ref. In +// case the path@reference notation was used, only the specifiec image will be +// loaded. Otherwise, all images will be loaded. +func (r *Runtime) loadMultiImageDockerArchive(ctx context.Context, ref types.ImageReference, options *CopyOptions) ([]string, error) { + // If we cannot stat the path, it either does not exist OR the correct + // syntax to reference an image within the archive was used, so we + // should. + path := ref.StringWithinTransport() + if _, err := os.Stat(path); err != nil { + return r.copyFromDockerArchive(ctx, ref, options) + } + + reader, err := dockerArchiveTransport.NewReader(r.systemContextCopy(), path) + if err != nil { + return nil, err + } + + refLists, err := reader.List() + if err != nil { + return nil, err + } + + var copiedImages []string + for _, list := range refLists { + for _, listRef := range list { + names, err := r.copyFromDockerArchiveReaderReference(ctx, reader, listRef, options) + if err != nil { + return nil, err + } + copiedImages = append(copiedImages, names...) + } + } + + return copiedImages, nil +} diff --git a/vendor/github.com/containers/common/libimage/manifest_list.go b/vendor/github.com/containers/common/libimage/manifest_list.go new file mode 100644 index 00000000000..4e8959004bf --- /dev/null +++ b/vendor/github.com/containers/common/libimage/manifest_list.go @@ -0,0 +1,401 @@ +package libimage + +import ( + "context" + "fmt" + "time" + + "github.com/containers/common/libimage/manifests" + imageCopy "github.com/containers/image/v5/copy" + "github.com/containers/image/v5/docker" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/transports/alltransports" + "github.com/containers/image/v5/types" + "github.com/containers/storage" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +// NOTE: the abstractions and APIs here are a first step to further merge +// `libimage/manifests` into `libimage`. + +// ErrNotAManifestList indicates that an image was found in the local +// containers storage but it is not a manifest list as requested. +var ErrNotAManifestList = errors.New("image is not a manifest list") + +// ManifestList represents a manifest list (Docker) or an image index (OCI) in +// the local containers storage. +type ManifestList struct { + // NOTE: the *List* suffix is intentional as the term "manifest" is + // used ambiguously across the ecosystem. It may refer to the (JSON) + // manifest of an ordinary image OR to a manifest *list* (Docker) or to + // image index (OCI). + // It's a bit more work when typing but without ambiguity. + + // The underlying image in the containers storage. + image *Image + + // The underlying manifest list. + list manifests.List +} + +// ID returns the ID of the manifest list. +func (m *ManifestList) ID() string { + return m.image.ID() +} + +// CreateManifestList creates a new empty manifest list with the specified +// name. +func (r *Runtime) CreateManifestList(name string) (*ManifestList, error) { + normalized, err := NormalizeName(name) + if err != nil { + return nil, err + } + + list := manifests.Create() + listID, err := list.SaveToImage(r.store, "", []string{normalized.String()}, manifest.DockerV2ListMediaType) + if err != nil { + return nil, err + } + + mList, err := r.LookupManifestList(listID) + if err != nil { + return nil, err + } + + return mList, nil +} + +// LookupManifestList looks up a manifest list with the specified name in the +// containers storage. +func (r *Runtime) LookupManifestList(name string) (*ManifestList, error) { + image, list, err := r.lookupManifestList(name) + if err != nil { + return nil, err + } + return &ManifestList{image: image, list: list}, nil +} + +func (r *Runtime) lookupManifestList(name string) (*Image, manifests.List, error) { + lookupOptions := &LookupImageOptions{ + lookupManifest: true, + } + image, _, err := r.LookupImage(name, lookupOptions) + if err != nil { + return nil, nil, err + } + if err := image.reload(); err != nil { + return nil, nil, err + } + list, err := image.getManifestList() + if err != nil { + return nil, nil, err + } + return image, list, nil +} + +// ToManifestList converts the image into a manifest list. An error is thrown +// if the image is no manifest list. +func (i *Image) ToManifestList() (*ManifestList, error) { + list, err := i.getManifestList() + if err != nil { + return nil, err + } + return &ManifestList{image: i, list: list}, nil +} + +// LookupInstance looks up an instance of the manifest list matching the +// specified platform. The local machine's platform is used if left empty. +func (m *ManifestList) LookupInstance(ctx context.Context, architecture, os, variant string) (*Image, error) { + sys := m.image.runtime.systemContextCopy() + if architecture != "" { + sys.ArchitectureChoice = architecture + } + if os != "" { + sys.OSChoice = os + } + if architecture != "" { + sys.VariantChoice = variant + } + + // Now look at the *manifest* and select a matching instance. + rawManifest, manifestType, err := m.image.Manifest(ctx) + if err != nil { + return nil, err + } + list, err := manifest.ListFromBlob(rawManifest, manifestType) + if err != nil { + return nil, err + } + instanceDigest, err := list.ChooseInstance(sys) + if err != nil { + return nil, err + } + + allImages, err := m.image.runtime.ListImages(ctx, nil, nil) + if err != nil { + return nil, err + } + + for _, image := range allImages { + for _, imageDigest := range append(image.Digests(), image.Digest()) { + if imageDigest == instanceDigest { + return image, nil + } + } + } + + return nil, errors.Wrapf(storage.ErrImageUnknown, "could not find image instance %s of manifest list %s in local containers storage", instanceDigest, m.ID()) +} + +// Saves the specified manifest list and reloads it from storage with the new ID. +func (m *ManifestList) saveAndReload() error { + newID, err := m.list.SaveToImage(m.image.runtime.store, m.image.ID(), nil, "") + if err != nil { + return err + } + + // Make sure to reload the image from the containers storage to fetch + // the latest data (e.g., new or delete digests). + if err := m.image.reload(); err != nil { + return err + } + image, list, err := m.image.runtime.lookupManifestList(newID) + if err != nil { + return err + } + m.image = image + m.list = list + return nil +} + +// getManifestList is a helper to obtain a manifest list +func (i *Image) getManifestList() (manifests.List, error) { + _, list, err := manifests.LoadFromImage(i.runtime.store, i.ID()) + return list, err +} + +// IsManifestList returns true if the image is a manifest list (Docker) or an +// image index (OCI). This information may be critical to make certain +// execution paths more robust (e.g., suppress certain errors). +func (i *Image) IsManifestList(ctx context.Context) (bool, error) { + ref, err := i.StorageReference() + if err != nil { + return false, err + } + imgRef, err := ref.NewImageSource(ctx, i.runtime.systemContextCopy()) + if err != nil { + return false, err + } + _, manifestType, err := imgRef.GetManifest(ctx, nil) + if err != nil { + return false, err + } + return manifest.MIMETypeIsMultiImage(manifestType), nil +} + +// Inspect returns a dockerized version of the manifest list. +func (m *ManifestList) Inspect() (*manifest.Schema2List, error) { + return m.list.Docker(), nil +} + +// Options for adding a manifest list. +type ManifestListAddOptions struct { + // Add all images to the list if the to-be-added image itself is a + // manifest list. + All bool `json:"all"` + // containers-auth.json(5) file to use when authenticating against + // container registries. + AuthFilePath string + // Path to the certificates directory. + CertDirPath string + // Allow contacting registries over HTTP, or HTTPS with failed TLS + // verification. Note that this does not affect other TLS connections. + InsecureSkipTLSVerify types.OptionalBool + // Username to use when authenticating at a container registry. + Username string + // Password to use when authenticating at a container registry. + Password string +} + +// Add adds one or more manifests to the manifest list and returns the digest +// of the added instance. +func (m *ManifestList) Add(ctx context.Context, name string, options *ManifestListAddOptions) (digest.Digest, error) { + if options == nil { + options = &ManifestListAddOptions{} + } + + ref, err := alltransports.ParseImageName(name) + if err != nil { + withDocker := fmt.Sprintf("%s://%s", docker.Transport.Name(), name) + ref, err = alltransports.ParseImageName(withDocker) + if err != nil { + return "", err + } + } + + // Now massage in the copy-related options into the system context. + systemContext := m.image.runtime.systemContextCopy() + if options.AuthFilePath != "" { + systemContext.AuthFilePath = options.AuthFilePath + } + if options.CertDirPath != "" { + systemContext.DockerCertPath = options.CertDirPath + } + if options.InsecureSkipTLSVerify != types.OptionalBoolUndefined { + systemContext.DockerInsecureSkipTLSVerify = options.InsecureSkipTLSVerify + systemContext.OCIInsecureSkipTLSVerify = options.InsecureSkipTLSVerify == types.OptionalBoolTrue + systemContext.DockerDaemonInsecureSkipTLSVerify = options.InsecureSkipTLSVerify == types.OptionalBoolTrue + } + if options.Username != "" { + systemContext.DockerAuthConfig = &types.DockerAuthConfig{ + Username: options.Username, + Password: options.Password, + } + } + + newDigest, err := m.list.Add(ctx, systemContext, ref, options.All) + if err != nil { + return "", err + } + + // Write the changes to disk. + if err := m.saveAndReload(); err != nil { + return "", err + } + return newDigest, nil +} + +// Options for annotationg a manifest list. +type ManifestListAnnotateOptions struct { + // Add the specified annotations to the added image. + Annotations map[string]string + // Add the specified architecture to the added image. + Architecture string + // Add the specified features to the added image. + Features []string + // Add the specified OS to the added image. + OS string + // Add the specified OS features to the added image. + OSFeatures []string + // Add the specified OS version to the added image. + OSVersion string + // Add the specified variant to the added image. + Variant string +} + +// Annotate an image instance specified by `d` in the manifest list. +func (m *ManifestList) AnnotateInstance(d digest.Digest, options *ManifestListAnnotateOptions) error { + if options == nil { + return nil + } + + if len(options.OS) > 0 { + if err := m.list.SetOS(d, options.OS); err != nil { + return err + } + } + if len(options.OSVersion) > 0 { + if err := m.list.SetOSVersion(d, options.OSVersion); err != nil { + return err + } + } + if len(options.Features) > 0 { + if err := m.list.SetFeatures(d, options.Features); err != nil { + return err + } + } + if len(options.OSFeatures) > 0 { + if err := m.list.SetOSFeatures(d, options.OSFeatures); err != nil { + return err + } + } + if len(options.Architecture) > 0 { + if err := m.list.SetArchitecture(d, options.Architecture); err != nil { + return err + } + } + if len(options.Variant) > 0 { + if err := m.list.SetVariant(d, options.Variant); err != nil { + return err + } + } + if len(options.Annotations) > 0 { + if err := m.list.SetAnnotations(&d, options.Annotations); err != nil { + return err + } + } + + // Write the changes to disk. + if err := m.saveAndReload(); err != nil { + return err + } + return nil +} + +// RemoveInstance removes the instance specified by `d` from the manifest list. +// Returns the new ID of the image. +func (m *ManifestList) RemoveInstance(d digest.Digest) error { + if err := m.list.Remove(d); err != nil { + return err + } + + // Write the changes to disk. + if err := m.saveAndReload(); err != nil { + return err + } + return nil +} + +// ManifestListPushOptions allow for customizing pushing a manifest list. +type ManifestListPushOptions struct { + CopyOptions + + // For tweaking the list selection. + ImageListSelection imageCopy.ImageListSelection + // Use when selecting only specific imags. + Instances []digest.Digest +} + +// Push pushes a manifest to the specified destination. +func (m *ManifestList) Push(ctx context.Context, destination string, options *ManifestListPushOptions) (digest.Digest, error) { + if options == nil { + options = &ManifestListPushOptions{} + } + + dest, err := alltransports.ParseImageName(destination) + if err != nil { + oldErr := err + dest, err = alltransports.ParseImageName("docker://" + destination) + if err != nil { + return "", oldErr + } + } + + if m.image.runtime.eventChannel != nil { + defer m.image.runtime.writeEvent(&Event{ID: m.ID(), Name: destination, Time: time.Now(), Type: EventTypeImagePush}) + } + + // NOTE: we're using the logic in copier to create a proper + // types.SystemContext. This prevents us from having an error prone + // code duplicate here. + copier, err := m.image.runtime.newCopier(&options.CopyOptions) + if err != nil { + return "", err + } + defer copier.close() + + pushOptions := manifests.PushOptions{ + Store: m.image.runtime.store, + SystemContext: copier.systemContext, + ImageListSelection: options.ImageListSelection, + Instances: options.Instances, + ReportWriter: options.Writer, + SignBy: options.SignBy, + RemoveSignatures: options.RemoveSignatures, + ManifestType: options.ManifestMIMEType, + } + + _, d, err := m.list.Push(ctx, dest, pushOptions) + return d, err +} diff --git a/vendor/github.com/containers/common/libimage/manifests/copy.go b/vendor/github.com/containers/common/libimage/manifests/copy.go new file mode 100644 index 00000000000..7e651a46c02 --- /dev/null +++ b/vendor/github.com/containers/common/libimage/manifests/copy.go @@ -0,0 +1,15 @@ +package manifests + +import ( + "github.com/containers/image/v5/signature" +) + +var ( + // storageAllowedPolicyScopes overrides the policy for local storage + // to ensure that we can read images from it. + storageAllowedPolicyScopes = signature.PolicyTransportScopes{ + "": []signature.PolicyRequirement{ + signature.NewPRInsecureAcceptAnything(), + }, + } +) diff --git a/vendor/github.com/containers/common/libimage/manifests/manifests.go b/vendor/github.com/containers/common/libimage/manifests/manifests.go new file mode 100644 index 00000000000..ccff908c943 --- /dev/null +++ b/vendor/github.com/containers/common/libimage/manifests/manifests.go @@ -0,0 +1,430 @@ +package manifests + +import ( + "context" + "encoding/json" + stderrors "errors" + "io" + + "github.com/containers/common/pkg/manifests" + "github.com/containers/common/pkg/supplemented" + cp "github.com/containers/image/v5/copy" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/image" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/signature" + is "github.com/containers/image/v5/storage" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/transports/alltransports" + "github.com/containers/image/v5/types" + "github.com/containers/storage" + "github.com/containers/storage/pkg/lockfile" + digest "github.com/opencontainers/go-digest" + v1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const instancesData = "instances.json" + +// LookupReferenceFunc return an image reference based on the specified one. +// The returned reference can return custom ImageSource or ImageDestination +// objects which intercept or filter blobs, manifests, and signatures as +// they are read and written. +type LookupReferenceFunc func(ref types.ImageReference) (types.ImageReference, error) + +// ErrListImageUnknown is returned when we attempt to create an image reference +// for a List that has not yet been saved to an image. +var ErrListImageUnknown = stderrors.New("unable to determine which image holds the manifest list") + +type list struct { + manifests.List + instances map[digest.Digest]string +} + +// List is a manifest list or image index, either created using Create(), or +// loaded from local storage using LoadFromImage(). +type List interface { + manifests.List + SaveToImage(store storage.Store, imageID string, names []string, mimeType string) (string, error) + Reference(store storage.Store, multiple cp.ImageListSelection, instances []digest.Digest) (types.ImageReference, error) + Push(ctx context.Context, dest types.ImageReference, options PushOptions) (reference.Canonical, digest.Digest, error) + Add(ctx context.Context, sys *types.SystemContext, ref types.ImageReference, all bool) (digest.Digest, error) +} + +// PushOptions includes various settings which are needed for pushing the +// manifest list and its instances. +type PushOptions struct { + Store storage.Store + SystemContext *types.SystemContext // github.com/containers/image/types.SystemContext + ImageListSelection cp.ImageListSelection // set to either CopySystemImage, CopyAllImages, or CopySpecificImages + Instances []digest.Digest // instances to copy if ImageListSelection == CopySpecificImages + ReportWriter io.Writer // will be used to log the writing of the list and any blobs + SignBy string // fingerprint of GPG key to use to sign images + RemoveSignatures bool // true to discard signatures in images + ManifestType string // the format to use when saving the list - possible options are oci, v2s1, and v2s2 + SourceFilter LookupReferenceFunc // filter the list source +} + +// Create creates a new list containing information about the specified image, +// computing its manifest's digest, and retrieving OS and architecture +// information from its configuration blob. Returns the new list, and the +// instanceDigest for the initial image. +func Create() List { + return &list{ + List: manifests.Create(), + instances: make(map[digest.Digest]string), + } +} + +// LoadFromImage reads the manifest list or image index, and additional +// information about where the various instances that it contains live, from an +// image record with the specified ID in local storage. +func LoadFromImage(store storage.Store, image string) (string, List, error) { + img, err := store.Image(image) + if err != nil { + return "", nil, errors.Wrapf(err, "error locating image %q for loading manifest list", image) + } + manifestBytes, err := store.ImageBigData(img.ID, storage.ImageDigestManifestBigDataNamePrefix) + if err != nil { + return "", nil, errors.Wrapf(err, "error locating image %q for loading manifest list", image) + } + manifestList, err := manifests.FromBlob(manifestBytes) + if err != nil { + return "", nil, err + } + list := &list{ + List: manifestList, + instances: make(map[digest.Digest]string), + } + instancesBytes, err := store.ImageBigData(img.ID, instancesData) + if err != nil { + return "", nil, errors.Wrapf(err, "error locating image %q for loading instance list", image) + } + if err := json.Unmarshal(instancesBytes, &list.instances); err != nil { + return "", nil, errors.Wrapf(err, "error decoding instance list for image %q", image) + } + list.instances[""] = img.ID + return img.ID, list, err +} + +// SaveToImage saves the manifest list or image index as the manifest of an +// Image record with the specified names in local storage, generating a random +// image ID if none is specified. It also stores information about where the +// images whose manifests are included in the list can be found. +func (l *list) SaveToImage(store storage.Store, imageID string, names []string, mimeType string) (string, error) { + manifestBytes, err := l.List.Serialize(mimeType) + if err != nil { + return "", err + } + instancesBytes, err := json.Marshal(&l.instances) + if err != nil { + return "", err + } + img, err := store.CreateImage(imageID, names, "", "", &storage.ImageOptions{}) + if err == nil || errors.Cause(err) == storage.ErrDuplicateID { + created := (err == nil) + if created { + imageID = img.ID + l.instances[""] = img.ID + } + err := store.SetImageBigData(imageID, storage.ImageDigestManifestBigDataNamePrefix, manifestBytes, manifest.Digest) + if err != nil { + if created { + if _, err2 := store.DeleteImage(img.ID, true); err2 != nil { + logrus.Errorf("Deleting image %q after failing to save manifest for it", img.ID) + } + } + return "", errors.Wrapf(err, "saving manifest list to image %q", imageID) + } + err = store.SetImageBigData(imageID, instancesData, instancesBytes, nil) + if err != nil { + if created { + if _, err2 := store.DeleteImage(img.ID, true); err2 != nil { + logrus.Errorf("Deleting image %q after failing to save instance locations for it", img.ID) + } + } + return "", errors.Wrapf(err, "saving instance list to image %q", imageID) + } + return imageID, nil + } + return "", errors.Wrapf(err, "error creating image to hold manifest list") +} + +// Reference returns an image reference for the composite image being built +// in the list, or an error if the list has never been saved to a local image. +func (l *list) Reference(store storage.Store, multiple cp.ImageListSelection, instances []digest.Digest) (types.ImageReference, error) { + if l.instances[""] == "" { + return nil, errors.Wrap(ErrListImageUnknown, "error building reference to list") + } + s, err := is.Transport.ParseStoreReference(store, l.instances[""]) + if err != nil { + return nil, errors.Wrapf(err, "error creating ImageReference from image %q", l.instances[""]) + } + references := make([]types.ImageReference, 0, len(l.instances)) + whichInstances := make([]digest.Digest, 0, len(l.instances)) + switch multiple { + case cp.CopyAllImages, cp.CopySystemImage: + for instance := range l.instances { + if instance != "" { + whichInstances = append(whichInstances, instance) + } + } + case cp.CopySpecificImages: + for instance := range l.instances { + for _, allowed := range instances { + if instance == allowed { + whichInstances = append(whichInstances, instance) + } + } + } + } + for _, instance := range whichInstances { + imageName := l.instances[instance] + ref, err := alltransports.ParseImageName(imageName) + if err != nil { + return nil, errors.Wrapf(err, "error creating ImageReference from image %q", imageName) + } + references = append(references, ref) + } + return supplemented.Reference(s, references, multiple, instances), nil +} + +// Push saves the manifest list and whichever blobs are needed to a destination location. +func (l *list) Push(ctx context.Context, dest types.ImageReference, options PushOptions) (reference.Canonical, digest.Digest, error) { + // Load the system signing policy. + pushPolicy, err := signature.DefaultPolicy(options.SystemContext) + if err != nil { + return nil, "", errors.Wrapf(err, "error obtaining default signature policy") + } + + // Override the settings for local storage to make sure that we can always read the source "image". + pushPolicy.Transports[is.Transport.Name()] = storageAllowedPolicyScopes + + policyContext, err := signature.NewPolicyContext(pushPolicy) + if err != nil { + return nil, "", errors.Wrapf(err, "error creating new signature policy context") + } + defer func() { + if err2 := policyContext.Destroy(); err2 != nil { + logrus.Errorf("Destroying signature policy context: %v", err2) + } + }() + + // If we were given a media type that corresponds to a multiple-images + // type, reset it to a valid corresponding single-image type, since we + // already expect the image library to infer the list type from the + // image type that we're telling it to force. + singleImageManifestType := options.ManifestType + switch singleImageManifestType { + case v1.MediaTypeImageIndex: + singleImageManifestType = v1.MediaTypeImageManifest + case manifest.DockerV2ListMediaType: + singleImageManifestType = manifest.DockerV2Schema2MediaType + } + + // Build a source reference for our list and grab bag full of blobs. + src, err := l.Reference(options.Store, options.ImageListSelection, options.Instances) + if err != nil { + return nil, "", err + } + if options.SourceFilter != nil { + if src, err = options.SourceFilter(src); err != nil { + return nil, "", err + } + } + copyOptions := &cp.Options{ + ImageListSelection: options.ImageListSelection, + Instances: options.Instances, + SourceCtx: options.SystemContext, + DestinationCtx: options.SystemContext, + ReportWriter: options.ReportWriter, + RemoveSignatures: options.RemoveSignatures, + SignBy: options.SignBy, + ForceManifestMIMEType: singleImageManifestType, + } + + // Copy whatever we were asked to copy. + manifestBytes, err := cp.Image(ctx, policyContext, dest, src, copyOptions) + if err != nil { + return nil, "", err + } + manifestDigest, err := manifest.Digest(manifestBytes) + if err != nil { + return nil, "", err + } + return nil, manifestDigest, nil +} + +// Add adds information about the specified image to the list, computing the +// image's manifest's digest, retrieving OS and architecture information from +// the image's configuration, and recording the image's reference so that it +// can be found at push-time. Returns the instanceDigest for the image. If +// the reference points to an image list, either all instances are added (if +// "all" is true), or the instance which matches "sys" (if "all" is false) will +// be added. +func (l *list) Add(ctx context.Context, sys *types.SystemContext, ref types.ImageReference, all bool) (digest.Digest, error) { + src, err := ref.NewImageSource(ctx, sys) + if err != nil { + return "", errors.Wrapf(err, "error setting up to read manifest and configuration from %q", transports.ImageName(ref)) + } + defer src.Close() + + type instanceInfo struct { + instanceDigest *digest.Digest + OS, Architecture, OSVersion, Variant string + Features, OSFeatures, Annotations []string + Size int64 + } + var instanceInfos []instanceInfo + var manifestDigest digest.Digest + + primaryManifestBytes, primaryManifestType, err := src.GetManifest(ctx, nil) + if err != nil { + return "", errors.Wrapf(err, "error reading manifest from %q", transports.ImageName(ref)) + } + + if manifest.MIMETypeIsMultiImage(primaryManifestType) { + lists, err := manifests.FromBlob(primaryManifestBytes) + if err != nil { + return "", errors.Wrapf(err, "error parsing manifest list in %q", transports.ImageName(ref)) + } + if all { + for i, instance := range lists.OCIv1().Manifests { + platform := instance.Platform + if platform == nil { + platform = &v1.Platform{} + } + instanceDigest := instance.Digest + instanceInfo := instanceInfo{ + instanceDigest: &instanceDigest, + OS: platform.OS, + Architecture: platform.Architecture, + OSVersion: platform.OSVersion, + Variant: platform.Variant, + Features: append([]string{}, lists.Docker().Manifests[i].Platform.Features...), + OSFeatures: append([]string{}, platform.OSFeatures...), + Size: instance.Size, + } + instanceInfos = append(instanceInfos, instanceInfo) + } + } else { + list, err := manifest.ListFromBlob(primaryManifestBytes, primaryManifestType) + if err != nil { + return "", errors.Wrapf(err, "error parsing manifest list in %q", transports.ImageName(ref)) + } + instanceDigest, err := list.ChooseInstance(sys) + if err != nil { + return "", errors.Wrapf(err, "error selecting image from manifest list in %q", transports.ImageName(ref)) + } + added := false + for i, instance := range lists.OCIv1().Manifests { + if instance.Digest != instanceDigest { + continue + } + platform := instance.Platform + if platform == nil { + platform = &v1.Platform{} + } + instanceInfo := instanceInfo{ + instanceDigest: &instanceDigest, + OS: platform.OS, + Architecture: platform.Architecture, + OSVersion: platform.OSVersion, + Variant: platform.Variant, + Features: append([]string{}, lists.Docker().Manifests[i].Platform.Features...), + OSFeatures: append([]string{}, platform.OSFeatures...), + Size: instance.Size, + } + instanceInfos = append(instanceInfos, instanceInfo) + added = true + } + if !added { + instanceInfo := instanceInfo{ + instanceDigest: &instanceDigest, + } + instanceInfos = append(instanceInfos, instanceInfo) + } + } + } else { + instanceInfo := instanceInfo{ + instanceDigest: nil, + } + instanceInfos = append(instanceInfos, instanceInfo) + } + + for _, instanceInfo := range instanceInfos { + if instanceInfo.OS == "" || instanceInfo.Architecture == "" { + img, err := image.FromUnparsedImage(ctx, sys, image.UnparsedInstance(src, instanceInfo.instanceDigest)) + if err != nil { + return "", errors.Wrapf(err, "error reading configuration blob from %q", transports.ImageName(ref)) + } + config, err := img.OCIConfig(ctx) + if err != nil { + return "", errors.Wrapf(err, "error reading info about config blob from %q", transports.ImageName(ref)) + } + if instanceInfo.OS == "" { + instanceInfo.OS = config.OS + instanceInfo.OSVersion = config.OSVersion + instanceInfo.OSFeatures = config.OSFeatures + } + if instanceInfo.Architecture == "" { + instanceInfo.Architecture = config.Architecture + instanceInfo.Variant = config.Variant + } + } + manifestBytes, manifestType, err := src.GetManifest(ctx, instanceInfo.instanceDigest) + if err != nil { + return "", errors.Wrapf(err, "error reading manifest from %q, instance %q", transports.ImageName(ref), instanceInfo.instanceDigest) + } + if instanceInfo.instanceDigest == nil { + manifestDigest, err = manifest.Digest(manifestBytes) + if err != nil { + return "", errors.Wrapf(err, "error computing digest of manifest from %q", transports.ImageName(ref)) + } + instanceInfo.instanceDigest = &manifestDigest + instanceInfo.Size = int64(len(manifestBytes)) + } else { + if manifestDigest == "" { + manifestDigest = *instanceInfo.instanceDigest + } + } + err = l.List.AddInstance(*instanceInfo.instanceDigest, instanceInfo.Size, manifestType, instanceInfo.OS, instanceInfo.Architecture, instanceInfo.OSVersion, instanceInfo.OSFeatures, instanceInfo.Variant, instanceInfo.Features, instanceInfo.Annotations) + if err != nil { + return "", errors.Wrapf(err, "error adding instance with digest %q", *instanceInfo.instanceDigest) + } + if _, ok := l.instances[*instanceInfo.instanceDigest]; !ok { + l.instances[*instanceInfo.instanceDigest] = transports.ImageName(ref) + } + } + + return manifestDigest, nil +} + +// Remove filters out any instances in the list which match the specified digest. +func (l *list) Remove(instanceDigest digest.Digest) error { + err := l.List.Remove(instanceDigest) + if err == nil { + if _, needToDelete := l.instances[instanceDigest]; needToDelete { + delete(l.instances, instanceDigest) + } + } + return err +} + +// LockerForImage returns a Locker for a given image record. It's recommended +// that processes which use LoadFromImage() to load a list from an image and +// then use that list's SaveToImage() method to save a modified version of the +// list to that image record use this lock to avoid accidentally wiping out +// changes that another process is also attempting to make. +func LockerForImage(store storage.Store, image string) (lockfile.Locker, error) { + img, err := store.Image(image) + if err != nil { + return nil, errors.Wrapf(err, "locating image %q for locating lock", image) + } + d := digest.NewDigestFromEncoded(digest.Canonical, img.ID) + if err := d.Validate(); err != nil { + return nil, errors.Wrapf(err, "coercing image ID for %q into a digest", image) + } + return store.GetDigestLock(d) +} diff --git a/vendor/github.com/containers/common/libimage/normalize.go b/vendor/github.com/containers/common/libimage/normalize.go new file mode 100644 index 00000000000..7ceb6283063 --- /dev/null +++ b/vendor/github.com/containers/common/libimage/normalize.go @@ -0,0 +1,181 @@ +package libimage + +import ( + "runtime" + "strings" + + "github.com/containerd/containerd/platforms" + "github.com/containers/image/v5/docker/reference" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// NormalizePlatform normalizes (according to the OCI spec) the specified os, +// arch and variant. If left empty, the individual item will not be normalized. +func NormalizePlatform(rawOS, rawArch, rawVariant string) (os, arch, variant string) { + os, arch, variant = rawOS, rawArch, rawVariant + if os == "" { + os = runtime.GOOS + } + if arch == "" { + arch = runtime.GOARCH + } + rawPlatform := os + "/" + arch + if variant != "" { + rawPlatform += "/" + variant + } + + normalizedPlatform, err := platforms.Parse(rawPlatform) + if err != nil { + logrus.Debugf("Error normalizing platform: %v", err) + return rawOS, rawArch, rawVariant + } + logrus.Debugf("Normalized platform %s to %s", rawPlatform, normalizedPlatform) + os = rawOS + if rawOS != "" { + os = normalizedPlatform.OS + } + arch = rawArch + if rawArch != "" { + arch = normalizedPlatform.Architecture + } + variant = rawVariant + if rawVariant != "" { + variant = normalizedPlatform.Variant + } + return os, arch, variant +} + +// NormalizeName normalizes the provided name according to the conventions by +// Podman and Buildah. If tag and digest are missing, the "latest" tag will be +// used. If it's a short name, it will be prefixed with "localhost/". +// +// References to docker.io are normalized according to the Docker conventions. +// For instance, "docker.io/foo" turns into "docker.io/library/foo". +func NormalizeName(name string) (reference.Named, error) { + // NOTE: this code is in symmetrie with containers/image/pkg/shortnames. + ref, err := reference.Parse(name) + if err != nil { + return nil, errors.Wrapf(err, "error normalizing name %q", name) + } + + named, ok := ref.(reference.Named) + if !ok { + return nil, errors.Errorf("%q is not a named reference", name) + } + + // Enforce "localhost" if needed. + registry := reference.Domain(named) + if !(strings.ContainsAny(registry, ".:") || registry == "localhost") { + name = toLocalImageName(ref.String()) + } + + // Another parse which also makes sure that docker.io references are + // correctly normalized (e.g., docker.io/alpine to + // docker.io/library/alpine). + named, err = reference.ParseNormalizedNamed(name) + if err != nil { + return nil, err + } + + if _, hasTag := named.(reference.NamedTagged); hasTag { + // Strip off the tag of a tagged and digested reference. + named, err = normalizeTaggedDigestedNamed(named) + if err != nil { + return nil, err + } + return named, nil + } + if _, hasDigest := named.(reference.Digested); hasDigest { + return named, nil + } + + // Make sure to tag "latest". + return reference.TagNameOnly(named), nil +} + +// prefix the specified name with "localhost/". +func toLocalImageName(name string) string { + return "localhost/" + strings.TrimLeft(name, "/") +} + +// NameTagPair represents a RepoTag of an image. +type NameTagPair struct { + // Name of the RepoTag. Maybe "". + Name string + // Tag of the RepoTag. Maybe "". + Tag string + + // for internal use + named reference.Named +} + +// ToNameTagsPairs splits repoTags into name&tag pairs. +// Guaranteed to return at least one pair. +func ToNameTagPairs(repoTags []reference.Named) ([]NameTagPair, error) { + none := "" + + var pairs []NameTagPair + for i, named := range repoTags { + pair := NameTagPair{ + Name: named.Name(), + Tag: none, + named: repoTags[i], + } + + if tagged, isTagged := named.(reference.NamedTagged); isTagged { + pair.Tag = tagged.Tag() + } + pairs = append(pairs, pair) + } + + if len(pairs) == 0 { + pairs = append(pairs, NameTagPair{Name: none, Tag: none}) + } + return pairs, nil +} + +// normalizeTaggedDigestedString strips the tag off the specified string iff it +// is tagged and digested. Note that the tag is entirely ignored to match +// Docker behavior. +func normalizeTaggedDigestedString(s string) (string, error) { + // Note that the input string is not expected to be parseable, so we + // return it verbatim in error cases. + ref, err := reference.Parse(s) + if err != nil { + return "", err + } + named, ok := ref.(reference.Named) + if !ok { + return s, nil + } + named, err = normalizeTaggedDigestedNamed(named) + if err != nil { + return "", err + } + return named.String(), nil +} + +// normalizeTaggedDigestedNamed strips the tag off the specified named +// reference iff it is tagged and digested. Note that the tag is entirely +// ignored to match Docker behavior. +func normalizeTaggedDigestedNamed(named reference.Named) (reference.Named, error) { + _, isTagged := named.(reference.NamedTagged) + if !isTagged { + return named, nil + } + digested, isDigested := named.(reference.Digested) + if !isDigested { + return named, nil + } + + // Now strip off the tag. + newNamed := reference.TrimNamed(named) + // And re-add the digest. + newNamed, err := reference.WithDigest(newNamed, digested.Digest()) + if err != nil { + return named, err + } + logrus.Debugf("Stripped off tag from tagged and digested reference %q", named.String()) + return newNamed, nil +} diff --git a/vendor/github.com/containers/common/libimage/oci.go b/vendor/github.com/containers/common/libimage/oci.go new file mode 100644 index 00000000000..b88d6613d74 --- /dev/null +++ b/vendor/github.com/containers/common/libimage/oci.go @@ -0,0 +1,97 @@ +package libimage + +import ( + "context" + + ociv1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +// toOCI returns the image as OCI v1 image. +func (i *Image) toOCI(ctx context.Context) (*ociv1.Image, error) { + if i.cached.ociv1Image != nil { + return i.cached.ociv1Image, nil + } + ref, err := i.StorageReference() + if err != nil { + return nil, err + } + + img, err := ref.NewImage(ctx, i.runtime.systemContextCopy()) + if err != nil { + return nil, err + } + defer img.Close() + + return img.OCIConfig(ctx) +} + +// historiesMatch returns the number of entries in the histories which have the +// same contents +func historiesMatch(a, b []ociv1.History) int { + i := 0 + for i < len(a) && i < len(b) { + if a[i].Created != nil && b[i].Created == nil { + return i + } + if a[i].Created == nil && b[i].Created != nil { + return i + } + if a[i].Created != nil && b[i].Created != nil { + if !a[i].Created.Equal(*(b[i].Created)) { + return i + } + } + if a[i].CreatedBy != b[i].CreatedBy { + return i + } + if a[i].Author != b[i].Author { + return i + } + if a[i].Comment != b[i].Comment { + return i + } + if a[i].EmptyLayer != b[i].EmptyLayer { + return i + } + i++ + } + return i +} + +// areParentAndChild checks diff ID and history in the two images and return +// true if the second should be considered to be directly based on the first +func areParentAndChild(parent, child *ociv1.Image) bool { + // the child and candidate parent should share all of the + // candidate parent's diff IDs, which together would have + // controlled which layers were used + + // Both, child and parent, may be nil when the storage is left in an + // incoherent state. Issue #7444 describes such a case when a build + // has been killed. + if child == nil || parent == nil { + return false + } + + if len(parent.RootFS.DiffIDs) > len(child.RootFS.DiffIDs) { + return false + } + childUsesCandidateDiffs := true + for i := range parent.RootFS.DiffIDs { + if child.RootFS.DiffIDs[i] != parent.RootFS.DiffIDs[i] { + childUsesCandidateDiffs = false + break + } + } + if !childUsesCandidateDiffs { + return false + } + // the child should have the same history as the parent, plus + // one more entry + if len(parent.History)+1 != len(child.History) { + return false + } + if historiesMatch(parent.History, child.History) != len(parent.History) { + return false + } + return true +} diff --git a/vendor/github.com/containers/common/libimage/pull.go b/vendor/github.com/containers/common/libimage/pull.go new file mode 100644 index 00000000000..ff93b6ed888 --- /dev/null +++ b/vendor/github.com/containers/common/libimage/pull.go @@ -0,0 +1,634 @@ +package libimage + +import ( + "context" + "fmt" + "io" + "runtime" + "strings" + "time" + + "github.com/containers/common/pkg/config" + registryTransport "github.com/containers/image/v5/docker" + dockerArchiveTransport "github.com/containers/image/v5/docker/archive" + dockerDaemonTransport "github.com/containers/image/v5/docker/daemon" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/manifest" + ociArchiveTransport "github.com/containers/image/v5/oci/archive" + ociTransport "github.com/containers/image/v5/oci/layout" + "github.com/containers/image/v5/pkg/shortnames" + storageTransport "github.com/containers/image/v5/storage" + "github.com/containers/image/v5/transports/alltransports" + "github.com/containers/image/v5/types" + "github.com/containers/storage" + digest "github.com/opencontainers/go-digest" + ociSpec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// PullOptions allows for custommizing image pulls. +type PullOptions struct { + CopyOptions + + // If true, all tags of the image will be pulled from the container + // registry. Only supported for the docker transport. + AllTags bool +} + +// Pull pulls the specified name. Name may refer to any of the supported +// transports from github.com/containers/image. If no transport is encoded, +// name will be treated as a reference to a registry (i.e., docker transport). +// +// Note that pullPolicy is only used when pulling from a container registry but +// it *must* be different than the default value `config.PullPolicyUnsupported`. This +// way, callers are forced to decide on the pull behaviour. The reasoning +// behind is that some (commands of some) tools have different default pull +// policies (e.g., buildah-bud versus podman-build). Making the pull-policy +// choice explicit is an attempt to prevent silent regressions. +// +// The error is storage.ErrImageUnknown iff the pull policy is set to "never" +// and no local image has been found. This allows for an easier integration +// into some users of this package (e.g., Buildah). +func (r *Runtime) Pull(ctx context.Context, name string, pullPolicy config.PullPolicy, options *PullOptions) ([]*Image, error) { + logrus.Debugf("Pulling image %s (policy: %s)", name, pullPolicy) + + if options == nil { + options = &PullOptions{} + } + + var possiblyUnqualifiedName string // used for short-name resolution + ref, err := alltransports.ParseImageName(name) + if err != nil { + // Check whether `name` points to a transport. If so, we + // return the error. Otherwise we assume that `name` refers to + // an image on a registry (e.g., "fedora"). + // + // NOTE: the `docker` transport is an exception to support a + // `pull docker:latest` which would otherwise return an error. + if t := alltransports.TransportFromImageName(name); t != nil && t.Name() != registryTransport.Transport.Name() { + return nil, err + } + + // If the image clearly refers to a local one, we can look it up directly. + // In fact, we need to since they are not parseable. + if strings.HasPrefix(name, "sha256:") || (len(name) == 64 && !strings.ContainsAny(name, "/.:@")) { + if pullPolicy == config.PullPolicyAlways { + return nil, errors.Errorf("pull policy is always but image has been referred to by ID (%s)", name) + } + local, _, err := r.LookupImage(name, nil) + if err != nil { + return nil, err + } + return []*Image{local}, err + } + + // Docker compat: strip off the tag iff name is tagged and digested + // (e.g., fedora:latest@sha256...). In that case, the tag is stripped + // off and entirely ignored. The digest is the sole source of truth. + normalizedName, normalizeError := normalizeTaggedDigestedString(name) + if normalizeError != nil { + return nil, normalizeError + } + name = normalizedName + + // If the input does not include a transport assume it refers + // to a registry. + dockerRef, dockerErr := alltransports.ParseImageName("docker://" + name) + if dockerErr != nil { + return nil, err + } + ref = dockerRef + possiblyUnqualifiedName = name + } else if ref.Transport().Name() == registryTransport.Transport.Name() { + // Normalize the input if we're referring to the docker + // transport directly. That makes sure that a `docker://fedora` + // will resolve directly to `docker.io/library/fedora:latest` + // and not be subject to short-name resolution. + named := ref.DockerReference() + if named == nil { + return nil, errors.New("internal error: unexpected nil reference") + } + possiblyUnqualifiedName = named.String() + } + + if options.AllTags && ref.Transport().Name() != registryTransport.Transport.Name() { + return nil, errors.Errorf("pulling all tags is not supported for %s transport", ref.Transport().Name()) + } + + if r.eventChannel != nil { + defer r.writeEvent(&Event{ID: "", Name: name, Time: time.Now(), Type: EventTypeImagePull}) + } + + // Some callers may set the platform via the system context at creation + // time of the runtime. We need this information to decide whether we + // need to enforce pulling from a registry (see + // containers/podman/issues/10682). + if options.Architecture == "" { + options.Architecture = r.systemContext.ArchitectureChoice + } + if options.OS == "" { + options.OS = r.systemContext.OSChoice + } + if options.Variant == "" { + options.Variant = r.systemContext.VariantChoice + } + + var ( + pulledImages []string + pullError error + ) + + // Dispatch the copy operation. + switch ref.Transport().Name() { + + // DOCKER REGISTRY + case registryTransport.Transport.Name(): + pulledImages, pullError = r.copyFromRegistry(ctx, ref, possiblyUnqualifiedName, pullPolicy, options) + + // DOCKER ARCHIVE + case dockerArchiveTransport.Transport.Name(): + pulledImages, pullError = r.copyFromDockerArchive(ctx, ref, &options.CopyOptions) + + // ALL OTHER TRANSPORTS + default: + pulledImages, pullError = r.copyFromDefault(ctx, ref, &options.CopyOptions) + } + + if pullError != nil { + return nil, pullError + } + + localImages := []*Image{} + for _, name := range pulledImages { + local, _, err := r.LookupImage(name, nil) + if err != nil { + return nil, errors.Wrapf(err, "error locating pulled image %q name in containers storage", name) + } + localImages = append(localImages, local) + } + + return localImages, pullError +} + +// nameFromAnnotations returns a reference string to be used as an image name, +// or an empty string. The annotations map may be nil. +func nameFromAnnotations(annotations map[string]string) string { + if annotations == nil { + return "" + } + // buildkit/containerd are using a custom annotation see + // containers/podman/issues/12560. + if annotations["io.containerd.image.name"] != "" { + return annotations["io.containerd.image.name"] + } + return annotations[ociSpec.AnnotationRefName] +} + +// copyFromDefault is the default copier for a number of transports. Other +// transports require some specific dancing, sometimes Yoga. +func (r *Runtime) copyFromDefault(ctx context.Context, ref types.ImageReference, options *CopyOptions) ([]string, error) { + c, err := r.newCopier(options) + if err != nil { + return nil, err + } + defer c.close() + + // Figure out a name for the storage destination. + var storageName, imageName string + switch ref.Transport().Name() { + + case dockerDaemonTransport.Transport.Name(): + // Normalize to docker.io if needed (see containers/podman/issues/10998). + named, err := reference.ParseNormalizedNamed(ref.StringWithinTransport()) + if err != nil { + return nil, err + } + imageName = named.String() + storageName = imageName + + case ociTransport.Transport.Name(): + split := strings.SplitN(ref.StringWithinTransport(), ":", 2) + storageName = toLocalImageName(split[0]) + imageName = storageName + + case ociArchiveTransport.Transport.Name(): + manifestDescriptor, err := ociArchiveTransport.LoadManifestDescriptor(ref) + if err != nil { + return nil, err + } + storageName = nameFromAnnotations(manifestDescriptor.Annotations) + switch len(storageName) { + case 0: + // If there's no reference name in the annotations, compute an ID. + storageName, err = getImageID(ctx, ref, nil) + if err != nil { + return nil, err + } + imageName = "sha256:" + storageName[1:] + default: + named, err := NormalizeName(storageName) + if err != nil { + return nil, err + } + imageName = named.String() + storageName = imageName + } + + case storageTransport.Transport.Name(): + storageName = ref.StringWithinTransport() + named := ref.DockerReference() + if named == nil { + return nil, errors.Errorf("could not get an image name for storage reference %q", ref) + } + imageName = named.String() + + default: + // Path-based transports (e.g., dir) may include invalid + // characters, so we should pessimistically generate an ID + // instead of looking at the StringWithinTransport(). + storageName, err = getImageID(ctx, ref, nil) + if err != nil { + return nil, err + } + imageName = "sha256:" + storageName[1:] + } + + // Create a storage reference. + destRef, err := storageTransport.Transport.ParseStoreReference(r.store, storageName) + if err != nil { + return nil, errors.Wrapf(err, "parsing %q", storageName) + } + + _, err = c.copy(ctx, ref, destRef) + return []string{imageName}, err +} + +// storageReferencesFromArchiveReader returns a slice of image references inside the +// archive reader. A docker archive may include more than one image and this +// method allows for extracting them into containers storage references which +// can later be used from copying. +func (r *Runtime) storageReferencesReferencesFromArchiveReader(ctx context.Context, readerRef types.ImageReference, reader *dockerArchiveTransport.Reader) ([]types.ImageReference, []string, error) { + destNames, err := reader.ManifestTagsForReference(readerRef) + if err != nil { + return nil, nil, err + } + + var imageNames []string + if len(destNames) == 0 { + destName, err := getImageID(ctx, readerRef, &r.systemContext) + if err != nil { + return nil, nil, err + } + destNames = append(destNames, destName) + // Make sure the image can be loaded after the pull by + // replacing the @ with sha256:. + imageNames = append(imageNames, "sha256:"+destName[1:]) + } else { + for i := range destNames { + ref, err := NormalizeName(destNames[i]) + if err != nil { + return nil, nil, err + } + destNames[i] = ref.String() + } + imageNames = destNames + } + + references := []types.ImageReference{} + for _, destName := range destNames { + destRef, err := storageTransport.Transport.ParseStoreReference(r.store, destName) + if err != nil { + return nil, nil, errors.Wrapf(err, "error parsing dest reference name %#v", destName) + } + references = append(references, destRef) + } + + return references, imageNames, nil +} + +// copyFromDockerArchive copies one image from the specified reference. +func (r *Runtime) copyFromDockerArchive(ctx context.Context, ref types.ImageReference, options *CopyOptions) ([]string, error) { + // There may be more than one image inside the docker archive, so we + // need a quick glimpse inside. + reader, readerRef, err := dockerArchiveTransport.NewReaderForReference(&r.systemContext, ref) + if err != nil { + return nil, err + } + + return r.copyFromDockerArchiveReaderReference(ctx, reader, readerRef, options) +} + +// copyFromDockerArchiveReaderReference copies the specified readerRef from reader. +func (r *Runtime) copyFromDockerArchiveReaderReference(ctx context.Context, reader *dockerArchiveTransport.Reader, readerRef types.ImageReference, options *CopyOptions) ([]string, error) { + c, err := r.newCopier(options) + if err != nil { + return nil, err + } + defer c.close() + + // Get a slice of storage references we can copy. + references, destNames, err := r.storageReferencesReferencesFromArchiveReader(ctx, readerRef, reader) + if err != nil { + return nil, err + } + + // Now copy all of the images. Use readerRef for performance. + for _, destRef := range references { + if _, err := c.copy(ctx, readerRef, destRef); err != nil { + return nil, err + } + } + + return destNames, nil +} + +// copyFromRegistry pulls the specified, possibly unqualified, name from a +// registry. On successful pull it returns the ID of the image in local +// storage. +// +// If options.All is set, all tags from the specified registry will be pulled. +func (r *Runtime) copyFromRegistry(ctx context.Context, ref types.ImageReference, inputName string, pullPolicy config.PullPolicy, options *PullOptions) ([]string, error) { + // Sanity check. + if err := pullPolicy.Validate(); err != nil { + return nil, err + } + + if !options.AllTags { + return r.copySingleImageFromRegistry(ctx, inputName, pullPolicy, options) + } + + // Copy all tags + named := reference.TrimNamed(ref.DockerReference()) + tags, err := registryTransport.GetRepositoryTags(ctx, &r.systemContext, ref) + if err != nil { + return nil, err + } + + pulledIDs := []string{} + for _, tag := range tags { + select { // Let's be gentle with Podman remote. + case <-ctx.Done(): + return nil, errors.Errorf("pulling cancelled") + default: + // We can continue. + } + tagged, err := reference.WithTag(named, tag) + if err != nil { + return nil, errors.Wrapf(err, "error creating tagged reference (name %s, tag %s)", named.String(), tag) + } + pulled, err := r.copySingleImageFromRegistry(ctx, tagged.String(), pullPolicy, options) + if err != nil { + return nil, err + } + pulledIDs = append(pulledIDs, pulled...) + } + + return pulledIDs, nil +} + +// imageIDsForManifest() parses the manifest of the copied image and then looks +// up the IDs of the matching image. There's a small slice of time, between +// when we copy the image into local storage and when we go to look for it +// using the name that we gave it when we copied it, when the name we wanted to +// assign to the image could have been moved, but the image's ID will remain +// the same until it is deleted. +func (r *Runtime) imagesIDsForManifest(manifestBytes []byte, sys *types.SystemContext) ([]string, error) { + var imageDigest digest.Digest + manifestType := manifest.GuessMIMEType(manifestBytes) + if manifest.MIMETypeIsMultiImage(manifestType) { + list, err := manifest.ListFromBlob(manifestBytes, manifestType) + if err != nil { + return nil, errors.Wrapf(err, "parsing manifest list") + } + d, err := list.ChooseInstance(sys) + if err != nil { + return nil, errors.Wrapf(err, "choosing instance from manifest list") + } + imageDigest = d + } else { + d, err := manifest.Digest(manifestBytes) + if err != nil { + return nil, errors.Wrapf(err, "digesting manifest") + } + imageDigest = d + } + var results []string + images, err := r.store.ImagesByDigest(imageDigest) + if err != nil { + return nil, errors.Wrapf(err, "listing images by manifest digest") + } + for _, image := range images { + results = append(results, image.ID) + } + if len(results) == 0 { + return nil, errors.Wrapf(storage.ErrImageUnknown, "identifying new image by manifest digest") + } + return results, nil +} + +// copySingleImageFromRegistry pulls the specified, possibly unqualified, name +// from a registry. On successful pull it returns the ID of the image in local +// storage. +func (r *Runtime) copySingleImageFromRegistry(ctx context.Context, imageName string, pullPolicy config.PullPolicy, options *PullOptions) ([]string, error) { //nolint:gocyclo + // Sanity check. + if err := pullPolicy.Validate(); err != nil { + return nil, err + } + + var ( + localImage *Image + resolvedImageName string + err error + ) + + // Always check if there's a local image. If so, we should use its + // resolved name for pulling. Assume we're doing a `pull foo`. + // If there's already a local image "localhost/foo", then we should + // attempt pulling that instead of doing the full short-name dance. + // + // NOTE that we only do platform checks if the specified values differ + // from the local platform. Unfortunately, there are many images used + // in the wild which don't set the correct value(s) in the config + // causing various issues such as containers/podman/issues/10682. + lookupImageOptions := &LookupImageOptions{Variant: options.Variant} + if options.Architecture != runtime.GOARCH { + lookupImageOptions.Architecture = options.Architecture + } + if options.OS != runtime.GOOS { + lookupImageOptions.OS = options.OS + } + localImage, resolvedImageName, err = r.LookupImage(imageName, lookupImageOptions) + if err != nil && errors.Cause(err) != storage.ErrImageUnknown { + logrus.Errorf("Looking up %s in local storage: %v", imageName, err) + } + + // If the local image is corrupted, we need to repull it. + if localImage != nil { + if err := localImage.isCorrupted(imageName); err != nil { + logrus.Error(err) + localImage = nil + } + } + + customPlatform := len(options.Architecture)+len(options.OS)+len(options.Variant) > 0 + if customPlatform && pullPolicy != config.PullPolicyAlways && pullPolicy != config.PullPolicyNever { + // Unless the pull policy is always/never, we must + // pessimistically assume that the local image has an invalid + // architecture (see containers/podman/issues/10682). Hence, + // whenever the user requests a custom platform, set the pull + // policy to "newer" to make sure we're pulling down the + // correct image. + // + // NOTE that this is will even override --pull={false,never}. + pullPolicy = config.PullPolicyNewer + logrus.Debugf("Enforcing pull policy to %q to pull custom platform (arch: %q, os: %q, variant: %q) - local image may mistakenly specify wrong platform", pullPolicy, options.Architecture, options.OS, options.Variant) + } + + if pullPolicy == config.PullPolicyNever { + if localImage != nil { + logrus.Debugf("Pull policy %q and %s resolved to local image %s", pullPolicy, imageName, resolvedImageName) + return []string{resolvedImageName}, nil + } + logrus.Debugf("Pull policy %q but no local image has been found for %s", pullPolicy, imageName) + return nil, errors.Wrap(storage.ErrImageUnknown, imageName) + } + + if pullPolicy == config.PullPolicyMissing && localImage != nil { + return []string{resolvedImageName}, nil + } + + // If we looked up the image by ID, we cannot really pull from anywhere. + if localImage != nil && strings.HasPrefix(localImage.ID(), imageName) { + switch pullPolicy { + case config.PullPolicyAlways: + return nil, errors.Errorf("pull policy is always but image has been referred to by ID (%s)", imageName) + default: + return []string{resolvedImageName}, nil + } + } + + // If we found a local image, we should use its locally resolved name + // (see containers/buildah/issues/2904). An exception is if a custom + // platform is specified (e.g., `--arch=arm64`). In that case, we need + // to pessimistically pull the image since some images declare wrong + // platforms making platform checks absolutely unreliable (see + // containers/podman/issues/10682). + // + // In other words: multi-arch support can only be as good as the images + // in the wild, so we shouldn't break things for our users by trying to + // insist that they make sense. + if localImage != nil && !customPlatform { + if imageName != resolvedImageName { + logrus.Debugf("Image %s resolved to local image %s which will be used for pulling", imageName, resolvedImageName) + } + imageName = resolvedImageName + } + + sys := r.systemContextCopy() + resolved, err := shortnames.Resolve(sys, imageName) + if err != nil { + // TODO: that is a too big of a hammer since we should only + // ignore errors that indicate that there's no alias and no + // USRs. Must be addressed in c/image first. + if localImage != nil && pullPolicy == config.PullPolicyNewer { + return []string{resolvedImageName}, nil + } + return nil, err + } + + // NOTE: Below we print the description from the short-name resolution. + // In theory we could print it here. In practice, however, this is + // causing a hard time for Buildah uses who are doing a `buildah from + // image` and expect just the container name to be printed if the image + // is present locally. + // The pragmatic solution is to only print the description when we found + // a _newer_ image that we're about to pull. + wroteDesc := false + writeDesc := func() error { + if wroteDesc { + return nil + } + wroteDesc = true + if desc := resolved.Description(); len(desc) > 0 { + logrus.Debug(desc) + if options.Writer != nil { + if _, err := options.Writer.Write([]byte(desc + "\n")); err != nil { + return err + } + } + } + return nil + } + + c, err := r.newCopier(&options.CopyOptions) + if err != nil { + return nil, err + } + defer c.close() + + var pullErrors []error + for _, candidate := range resolved.PullCandidates { + candidateString := candidate.Value.String() + logrus.Debugf("Attempting to pull candidate %s for %s", candidateString, imageName) + srcRef, err := registryTransport.NewReference(candidate.Value) + if err != nil { + return nil, err + } + + if pullPolicy == config.PullPolicyNewer && localImage != nil { + isNewer, err := localImage.hasDifferentDigestWithSystemContext(ctx, srcRef, c.systemContext) + if err != nil { + pullErrors = append(pullErrors, err) + continue + } + + if !isNewer { + logrus.Debugf("Skipping pull candidate %s as the image is not newer (pull policy %s)", candidateString, pullPolicy) + continue + } + } + + destRef, err := storageTransport.Transport.ParseStoreReference(r.store, candidate.Value.String()) + if err != nil { + return nil, err + } + + if err := writeDesc(); err != nil { + return nil, err + } + if options.Writer != nil { + if _, err := io.WriteString(options.Writer, fmt.Sprintf("Trying to pull %s...\n", candidateString)); err != nil { + return nil, err + } + } + var manifestBytes []byte + if manifestBytes, err = c.copy(ctx, srcRef, destRef); err != nil { + logrus.Debugf("Error pulling candidate %s: %v", candidateString, err) + pullErrors = append(pullErrors, err) + continue + } + if err := candidate.Record(); err != nil { + // Only log the recording errors. Podman has seen + // reports where users set most of the system to + // read-only which can cause issues. + logrus.Errorf("Error recording short-name alias %q: %v", candidateString, err) + } + + logrus.Debugf("Pulled candidate %s successfully", candidateString) + if ids, err := r.imagesIDsForManifest(manifestBytes, sys); err == nil { + return ids, nil + } + return []string{candidate.Value.String()}, nil + } + + if localImage != nil && pullPolicy == config.PullPolicyNewer { + return []string{resolvedImageName}, nil + } + + if len(pullErrors) == 0 { + return nil, errors.Errorf("internal error: no image pulled (pull policy %s)", pullPolicy) + } + + return nil, resolved.FormatPullErrors(pullErrors) +} diff --git a/vendor/github.com/containers/common/libimage/push.go b/vendor/github.com/containers/common/libimage/push.go new file mode 100644 index 00000000000..7203838aa61 --- /dev/null +++ b/vendor/github.com/containers/common/libimage/push.go @@ -0,0 +1,89 @@ +package libimage + +import ( + "context" + "time" + + dockerArchiveTransport "github.com/containers/image/v5/docker/archive" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/transports/alltransports" + "github.com/sirupsen/logrus" +) + +// PushOptions allows for custommizing image pushes. +type PushOptions struct { + CopyOptions +} + +// Push pushes the specified source which must refer to an image in the local +// containers storage. It may or may not have the `containers-storage:` +// prefix. Use destination to push to a custom destination. The destination +// can refer to any supported transport. If not transport is specified, the +// docker transport (i.e., a registry) is implied. If destination is left +// empty, the docker destination will be extrapolated from the source. +// +// Return storage.ErrImageUnknown if source could not be found in the local +// containers storage. +func (r *Runtime) Push(ctx context.Context, source, destination string, options *PushOptions) ([]byte, error) { + if options == nil { + options = &PushOptions{} + } + + // Look up the local image. Note that we need to ignore the platform + // and push what the user specified (containers/podman/issues/10344). + image, resolvedSource, err := r.LookupImage(source, nil) + if err != nil { + return nil, err + } + + srcRef, err := image.StorageReference() + if err != nil { + return nil, err + } + + // Make sure we have a proper destination, and parse it into an image + // reference for copying. + if destination == "" { + // Doing an ID check here is tempting but false positives (due + // to a short partial IDs) are more painful than false + // negatives. + destination = resolvedSource + } + + logrus.Debugf("Pushing image %s to %s", source, destination) + + destRef, err := alltransports.ParseImageName(destination) + if err != nil { + // If the input does not include a transport assume it refers + // to a registry. + dockerRef, dockerErr := alltransports.ParseImageName("docker://" + destination) + if dockerErr != nil { + return nil, err + } + destRef = dockerRef + } + + if r.eventChannel != nil { + defer r.writeEvent(&Event{ID: image.ID(), Name: destination, Time: time.Now(), Type: EventTypeImagePush}) + } + + // Buildah compat: Make sure to tag the destination image if it's a + // Docker archive. This way, we preserve the image name. + if destRef.Transport().Name() == dockerArchiveTransport.Transport.Name() { + if named, err := reference.ParseNamed(resolvedSource); err == nil { + tagged, isTagged := named.(reference.NamedTagged) + if isTagged { + options.dockerArchiveAdditionalTags = []reference.NamedTagged{tagged} + } + } + } + + c, err := r.newCopier(&options.CopyOptions) + if err != nil { + return nil, err + } + + defer c.close() + + return c.copy(ctx, srcRef, destRef) +} diff --git a/vendor/github.com/containers/common/libimage/runtime.go b/vendor/github.com/containers/common/libimage/runtime.go new file mode 100644 index 00000000000..86e7eee5638 --- /dev/null +++ b/vendor/github.com/containers/common/libimage/runtime.go @@ -0,0 +1,739 @@ +package libimage + +import ( + "context" + "fmt" + "os" + "strings" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/pkg/shortnames" + storageTransport "github.com/containers/image/v5/storage" + "github.com/containers/image/v5/transports/alltransports" + "github.com/containers/image/v5/types" + "github.com/containers/storage" + deepcopy "github.com/jinzhu/copier" + jsoniter "github.com/json-iterator/go" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// Faster than the standard library, see https://github.com/json-iterator/go. +var json = jsoniter.ConfigCompatibleWithStandardLibrary + +// tmpdir returns a path to a temporary directory. +func tmpdir() string { + tmpdir := os.Getenv("TMPDIR") + if tmpdir == "" { + tmpdir = "/var/tmp" + } + + return tmpdir +} + +// RuntimeOptions allow for creating a customized Runtime. +type RuntimeOptions struct { + // The base system context of the runtime which will be used throughout + // the entire lifespan of the Runtime. Certain options in some + // functions may override specific fields. + SystemContext *types.SystemContext +} + +// setRegistriesConfPath sets the registries.conf path for the specified context. +func setRegistriesConfPath(systemContext *types.SystemContext) { + if systemContext.SystemRegistriesConfPath != "" { + return + } + if envOverride, ok := os.LookupEnv("CONTAINERS_REGISTRIES_CONF"); ok { + systemContext.SystemRegistriesConfPath = envOverride + return + } + if envOverride, ok := os.LookupEnv("REGISTRIES_CONFIG_PATH"); ok { + systemContext.SystemRegistriesConfPath = envOverride + return + } +} + +// Runtime is responsible for image management and storing them in a containers +// storage. +type Runtime struct { + // Use to send events out to users. + eventChannel chan *Event + // Underlying storage store. + store storage.Store + // Global system context. No pointer to simplify copying and modifying + // it. + systemContext types.SystemContext +} + +// Returns a copy of the runtime's system context. +func (r *Runtime) SystemContext() *types.SystemContext { + return r.systemContextCopy() +} + +// Returns a copy of the runtime's system context. +func (r *Runtime) systemContextCopy() *types.SystemContext { + var sys types.SystemContext + deepcopy.Copy(&sys, &r.systemContext) + return &sys +} + +// EventChannel creates a buffered channel for events that the Runtime will use +// to write events to. Callers are expected to read from the channel in a +// timely manner. +// Can be called once for a given Runtime. +func (r *Runtime) EventChannel() chan *Event { + if r.eventChannel != nil { + return r.eventChannel + } + r.eventChannel = make(chan *Event, 100) + return r.eventChannel +} + +// RuntimeFromStore returns a Runtime for the specified store. +func RuntimeFromStore(store storage.Store, options *RuntimeOptions) (*Runtime, error) { + if options == nil { + options = &RuntimeOptions{} + } + + var systemContext types.SystemContext + if options.SystemContext != nil { + systemContext = *options.SystemContext + } else { + systemContext = types.SystemContext{} + } + if systemContext.BigFilesTemporaryDir == "" { + systemContext.BigFilesTemporaryDir = tmpdir() + } + + setRegistriesConfPath(&systemContext) + + return &Runtime{ + store: store, + systemContext: systemContext, + }, nil +} + +// RuntimeFromStoreOptions returns a return for the specified store options. +func RuntimeFromStoreOptions(runtimeOptions *RuntimeOptions, storeOptions *storage.StoreOptions) (*Runtime, error) { + if storeOptions == nil { + storeOptions = &storage.StoreOptions{} + } + store, err := storage.GetStore(*storeOptions) + if err != nil { + return nil, err + } + storageTransport.Transport.SetStore(store) + return RuntimeFromStore(store, runtimeOptions) +} + +// Shutdown attempts to free any kernel resources which are being used by the +// underlying driver. If "force" is true, any mounted (i.e., in use) layers +// are unmounted beforehand. If "force" is not true, then layers being in use +// is considered to be an error condition. +func (r *Runtime) Shutdown(force bool) error { + _, err := r.store.Shutdown(force) + if r.eventChannel != nil { + close(r.eventChannel) + } + return err +} + +// storageToImage transforms a storage.Image to an Image. +func (r *Runtime) storageToImage(storageImage *storage.Image, ref types.ImageReference) *Image { + return &Image{ + runtime: r, + storageImage: storageImage, + storageReference: ref, + } +} + +// Exists returns true if the specicifed image exists in the local containers +// storage. Note that it may return false if an image corrupted. +func (r *Runtime) Exists(name string) (bool, error) { + image, _, err := r.LookupImage(name, nil) + if err != nil && errors.Cause(err) != storage.ErrImageUnknown { + return false, err + } + if image == nil { + return false, nil + } + if err := image.isCorrupted(name); err != nil { + logrus.Error(err) + return false, nil + } + return true, nil +} + +// LookupImageOptions allow for customizing local image lookups. +type LookupImageOptions struct { + // Lookup an image matching the specified architecture. + Architecture string + // Lookup an image matching the specified OS. + OS string + // Lookup an image matching the specified variant. + Variant string + + // If set, do not look for items/instances in the manifest list that + // match the current platform but return the manifest list as is. + // only check for manifest list, return ErrNotAManifestList if not found. + lookupManifest bool + + // If matching images resolves to a manifest list, return manifest list + // instead of resolving to image instance, if manifest list is not found + // try resolving image. + ManifestList bool + + // If the image resolves to a manifest list, we usually lookup a + // matching instance and error if none could be found. In this case, + // just return the manifest list. Required for image removal. + returnManifestIfNoInstance bool +} + +// Lookup Image looks up `name` in the local container storage. Returns the +// image and the name it has been found with. Note that name may also use the +// `containers-storage:` prefix used to refer to the containers-storage +// transport. Returns storage.ErrImageUnknown if the image could not be found. +// +// Unless specified via the options, the image will be looked up by name only +// without matching the architecture, os or variant. An exception is if the +// image resolves to a manifest list, where an instance of the manifest list +// matching the local or specified platform (via options.{Architecture,OS,Variant}) +// is returned. +// +// If the specified name uses the `containers-storage` transport, the resolved +// name is empty. +func (r *Runtime) LookupImage(name string, options *LookupImageOptions) (*Image, string, error) { + logrus.Debugf("Looking up image %q in local containers storage", name) + + if options == nil { + options = &LookupImageOptions{} + } + + // If needed extract the name sans transport. + storageRef, err := alltransports.ParseImageName(name) + if err == nil { + if storageRef.Transport().Name() != storageTransport.Transport.Name() { + return nil, "", errors.Errorf("unsupported transport %q for looking up local images", storageRef.Transport().Name()) + } + img, err := storageTransport.Transport.GetStoreImage(r.store, storageRef) + if err != nil { + return nil, "", err + } + logrus.Debugf("Found image %q in local containers storage (%s)", name, storageRef.StringWithinTransport()) + return r.storageToImage(img, storageRef), "", nil + } else { + // Docker compat: strip off the tag iff name is tagged and digested + // (e.g., fedora:latest@sha256...). In that case, the tag is stripped + // off and entirely ignored. The digest is the sole source of truth. + normalizedName, err := normalizeTaggedDigestedString(name) + if err != nil { + return nil, "", err + } + name = normalizedName + } + + originalName := name + idByDigest := false + if strings.HasPrefix(name, "sha256:") { + // Strip off the sha256 prefix so it can be parsed later on. + idByDigest = true + name = strings.TrimPrefix(name, "sha256:") + } + + // Unless specified, set the platform specified in the system context + // for later platform matching. Builder likes to set these things via + // the system context at runtime creation. + if options.Architecture == "" { + options.Architecture = r.systemContext.ArchitectureChoice + } + if options.OS == "" { + options.OS = r.systemContext.OSChoice + } + if options.Variant == "" { + options.Variant = r.systemContext.VariantChoice + } + // Normalize platform to be OCI compatible (e.g., "aarch64" -> "arm64"). + options.OS, options.Architecture, options.Variant = NormalizePlatform(options.OS, options.Architecture, options.Variant) + + // First, check if we have an exact match in the storage. Maybe an ID + // or a fully-qualified image name. + img, err := r.lookupImageInLocalStorage(name, name, options) + if err != nil { + return nil, "", err + } + if img != nil { + return img, originalName, nil + } + + // If the name clearly referred to a local image, there's nothing we can + // do anymore. + if storageRef != nil || idByDigest { + return nil, "", errors.Wrap(storage.ErrImageUnknown, originalName) + } + + // Second, try out the candidates as resolved by shortnames. This takes + // "localhost/" prefixed images into account as well. + candidates, err := shortnames.ResolveLocally(&r.systemContext, name) + if err != nil { + return nil, "", errors.Wrap(storage.ErrImageUnknown, originalName) + } + // Backwards compat: normalize to docker.io as some users may very well + // rely on that. + if dockerNamed, err := reference.ParseDockerRef(name); err == nil { + candidates = append(candidates, dockerNamed) + } + + for _, candidate := range candidates { + img, err := r.lookupImageInLocalStorage(name, candidate.String(), options) + if err != nil { + return nil, "", err + } + if img != nil { + return img, candidate.String(), err + } + } + + return r.lookupImageInDigestsAndRepoTags(originalName, options) +} + +// lookupImageInLocalStorage looks up the specified candidate for name in the +// storage and checks whether it's matching the system context. +func (r *Runtime) lookupImageInLocalStorage(name, candidate string, options *LookupImageOptions) (*Image, error) { + logrus.Debugf("Trying %q ...", candidate) + img, err := r.store.Image(candidate) + if err != nil && errors.Cause(err) != storage.ErrImageUnknown { + return nil, err + } + if img == nil { + return nil, nil + } + ref, err := storageTransport.Transport.ParseStoreReference(r.store, img.ID) + if err != nil { + return nil, err + } + + image := r.storageToImage(img, ref) + logrus.Debugf("Found image %q as %q in local containers storage", name, candidate) + + // If we referenced a manifest list, we need to check whether we can + // find a matching instance in the local containers storage. + isManifestList, err := image.IsManifestList(context.Background()) + if err != nil { + if errors.Cause(err) == os.ErrNotExist { + // We must be tolerant toward corrupted images. + // See containers/podman commit fd9dd7065d44. + logrus.Warnf("Failed to determine if an image is a manifest list: %v, ignoring the error", err) + return image, nil + } + return nil, err + } + if options.lookupManifest || options.ManifestList { + if isManifestList { + return image, nil + } + // return ErrNotAManifestList if lookupManifest is set otherwise try resolving image. + if options.lookupManifest { + return nil, errors.Wrapf(ErrNotAManifestList, candidate) + } + } + + if isManifestList { + logrus.Debugf("Candidate %q is a manifest list, looking up matching instance", candidate) + manifestList, err := image.ToManifestList() + if err != nil { + return nil, err + } + instance, err := manifestList.LookupInstance(context.Background(), options.Architecture, options.OS, options.Variant) + if err != nil { + if options.returnManifestIfNoInstance { + logrus.Debug("No matching instance was found: returning manifest list instead") + return image, nil + } + return nil, errors.Wrap(storage.ErrImageUnknown, err.Error()) + } + ref, err = storageTransport.Transport.ParseStoreReference(r.store, "@"+instance.ID()) + if err != nil { + return nil, err + } + image = instance + } + + matches, err := r.imageReferenceMatchesContext(ref, options) + if err != nil { + return nil, err + } + + // NOTE: if the user referenced by ID we must optimistically assume + // that they know what they're doing. Given, we already did the + // manifest limbo above, we may already have resolved it. + if !matches && !strings.HasPrefix(image.ID(), candidate) { + return nil, nil + } + // Also print the string within the storage transport. That may aid in + // debugging when using additional stores since we see explicitly where + // the store is and which driver (options) are used. + logrus.Debugf("Found image %q as %q in local containers storage (%s)", name, candidate, ref.StringWithinTransport()) + return image, nil +} + +// lookupImageInDigestsAndRepoTags attempts to match name against any image in +// the local containers storage. If name is digested, it will be compared +// against image digests. Otherwise, it will be looked up in the repo tags. +func (r *Runtime) lookupImageInDigestsAndRepoTags(name string, options *LookupImageOptions) (*Image, string, error) { + // Until now, we've tried very hard to find an image but now it is time + // for limbo. If the image includes a digest that we couldn't detect + // verbatim in the storage, we must have a look at all digests of all + // images. Those may change over time (e.g., via manifest lists). + // Both Podman and Buildah want us to do that dance. + allImages, err := r.ListImages(context.Background(), nil, nil) + if err != nil { + return nil, "", err + } + + ref, err := reference.Parse(name) // Warning! This is not ParseNormalizedNamed + if err != nil { + return nil, "", err + } + named, isNamed := ref.(reference.Named) + if !isNamed { + return nil, "", errors.Wrap(storage.ErrImageUnknown, name) + } + + digested, isDigested := named.(reference.Digested) + if isDigested { + logrus.Debug("Looking for image with matching recorded digests") + digest := digested.Digest() + for _, image := range allImages { + for _, d := range image.Digests() { + if d != digest { + continue + } + // Also make sure that the matching image fits all criteria (e.g., manifest list). + if _, err := r.lookupImageInLocalStorage(name, image.ID(), options); err != nil { + return nil, "", err + } + return image, name, nil + + } + } + return nil, "", errors.Wrap(storage.ErrImageUnknown, name) + } + + if !shortnames.IsShortName(name) { + return nil, "", errors.Wrap(storage.ErrImageUnknown, name) + } + + named = reference.TagNameOnly(named) // Make sure to add ":latest" if needed + namedTagged, isNammedTagged := named.(reference.NamedTagged) + if !isNammedTagged { + // NOTE: this should never happen since we already know it's + // not a digested reference. + return nil, "", fmt.Errorf("%s: %w (could not cast to tagged)", name, storage.ErrImageUnknown) + } + + for _, image := range allImages { + named, err := image.inRepoTags(namedTagged) + if err != nil { + return nil, "", err + } + if named == nil { + continue + } + img, err := r.lookupImageInLocalStorage(name, named.String(), options) + if err != nil { + return nil, "", err + } + if img != nil { + return img, named.String(), err + } + } + + return nil, "", errors.Wrap(storage.ErrImageUnknown, name) +} + +// ResolveName resolves the specified name. If the name resolves to a local +// image, the fully resolved name will be returned. Otherwise, the name will +// be properly normalized. +// +// Note that an empty string is returned as is. +func (r *Runtime) ResolveName(name string) (string, error) { + if name == "" { + return "", nil + } + image, resolvedName, err := r.LookupImage(name, nil) + if err != nil && errors.Cause(err) != storage.ErrImageUnknown { + return "", err + } + + if image != nil && !strings.HasPrefix(image.ID(), resolvedName) { + return resolvedName, err + } + + normalized, err := NormalizeName(name) + if err != nil { + return "", err + } + + return normalized.String(), nil +} + +// imageReferenceMatchesContext return true if the specified reference matches +// the platform (os, arch, variant) as specified by the lookup options. +func (r *Runtime) imageReferenceMatchesContext(ref types.ImageReference, options *LookupImageOptions) (bool, error) { + if options.Architecture+options.OS+options.Variant == "" { + return true, nil + } + + ctx := context.Background() + img, err := ref.NewImage(ctx, &r.systemContext) + if err != nil { + return false, err + } + defer img.Close() + data, err := img.Inspect(ctx) + if err != nil { + return false, err + } + + if options.Architecture != "" && options.Architecture != data.Architecture { + logrus.Debugf("architecture %q does not match architecture %q of image %s", options.Architecture, data.Architecture, ref) + return false, nil + } + if options.OS != "" && options.OS != data.Os { + logrus.Debugf("OS %q does not match OS %q of image %s", options.OS, data.Os, ref) + return false, nil + } + if options.Variant != "" && options.Variant != data.Variant { + logrus.Debugf("variant %q does not match variant %q of image %s", options.Variant, data.Variant, ref) + return false, nil + } + + return true, nil +} + +// IsExternalContainerFunc allows for checking whether the specified container +// is an external one. The definition of an external container can be set by +// callers. +type IsExternalContainerFunc func(containerID string) (bool, error) + +// ListImagesOptions allow for customizing listing images. +type ListImagesOptions struct { + // Filters to filter the listed images. Supported filters are + // * after,before,since=image + // * containers=true,false,external + // * dangling=true,false + // * intermediate=true,false (useful for pruning images) + // * id=id + // * label=key[=value] + // * readonly=true,false + // * reference=name[:tag] (wildcards allowed) + Filters []string + // IsExternalContainerFunc allows for checking whether the specified + // container is an external one (when containers=external filter is + // used). The definition of an external container can be set by + // callers. + IsExternalContainerFunc IsExternalContainerFunc +} + +// ListImages lists images in the local container storage. If names are +// specified, only images with the specified names are looked up and filtered. +func (r *Runtime) ListImages(ctx context.Context, names []string, options *ListImagesOptions) ([]*Image, error) { + if options == nil { + options = &ListImagesOptions{} + } + + var images []*Image + if len(names) > 0 { + for _, name := range names { + image, _, err := r.LookupImage(name, nil) + if err != nil { + return nil, err + } + images = append(images, image) + } + } else { + storageImages, err := r.store.Images() + if err != nil { + return nil, err + } + for i := range storageImages { + images = append(images, r.storageToImage(&storageImages[i], nil)) + } + } + + return r.filterImages(ctx, images, options) +} + +// RemoveImagesOptions allow for customizing image removal. +type RemoveImagesOptions struct { + // Force will remove all containers from the local storage that are + // using a removed image. Use RemoveContainerFunc for a custom logic. + // If set, all child images will be removed as well. + Force bool + // LookupManifest will expect all specified names to be manifest lists (no instance look up). + // This allows for removing manifest lists. + // By default, RemoveImages will attempt to resolve to a manifest instance matching + // the local platform (i.e., os, architecture, variant). + LookupManifest bool + // RemoveContainerFunc allows for a custom logic for removing + // containers using a specific image. By default, all containers in + // the local containers storage will be removed (if Force is set). + RemoveContainerFunc RemoveContainerFunc + // IsExternalContainerFunc allows for checking whether the specified + // container is an external one (when containers=external filter is + // used). The definition of an external container can be set by + // callers. + IsExternalContainerFunc IsExternalContainerFunc + // Remove external containers even when Force is false. Requires + // IsExternalContainerFunc to be specified. + ExternalContainers bool + // Filters to filter the removed images. Supported filters are + // * after,before,since=image + // * containers=true,false,external + // * dangling=true,false + // * intermediate=true,false (useful for pruning images) + // * id=id + // * label=key[=value] + // * readonly=true,false + // * reference=name[:tag] (wildcards allowed) + Filters []string + // The RemoveImagesReport will include the size of the removed image. + // This information may be useful when pruning images to figure out how + // much space was freed. However, computing the size of an image is + // comparatively expensive, so it is made optional. + WithSize bool +} + +// RemoveImages removes images specified by names. If no names are specified, +// remove images as specified via the options' filters. All images are +// expected to exist in the local containers storage. +// +// If an image has more names than one name, the image will be untagged with +// the specified name. RemoveImages returns a slice of untagged and removed +// images. +// +// Note that most errors are non-fatal and collected into `rmErrors` return +// value. +func (r *Runtime) RemoveImages(ctx context.Context, names []string, options *RemoveImagesOptions) (reports []*RemoveImageReport, rmErrors []error) { + if options == nil { + options = &RemoveImagesOptions{} + } + + if options.ExternalContainers && options.IsExternalContainerFunc == nil { + return nil, []error{fmt.Errorf("libimage error: cannot remove external containers without callback")} + } + + // The logic here may require some explanation. Image removal is + // surprisingly complex since it is recursive (intermediate parents are + // removed) and since multiple items in `names` may resolve to the + // *same* image. On top, the data in the containers storage is shared, + // so we need to be careful and the code must be robust. That is why + // users can only remove images via this function; the logic may be + // complex but the execution path is clear. + + // Bundle an image with a possible empty slice of names to untag. That + // allows for a decent untagging logic and to bundle multiple + // references to the same *Image (and circumvent consistency issues). + type deleteMe struct { + image *Image + referencedBy []string + } + + appendError := func(err error) { + rmErrors = append(rmErrors, err) + } + + deleteMap := make(map[string]*deleteMe) // ID -> deleteMe + toDelete := []string{} + // Look up images in the local containers storage and fill out + // toDelete and the deleteMap. + + switch { + case len(names) > 0: + // prepare lookupOptions + var lookupOptions *LookupImageOptions + if options.LookupManifest { + // LookupManifest configured as true make sure we only remove manifests and no referenced images. + lookupOptions = &LookupImageOptions{lookupManifest: true} + } else { + lookupOptions = &LookupImageOptions{returnManifestIfNoInstance: true} + } + // Look up the images one-by-one. That allows for removing + // images that have been looked up successfully while reporting + // lookup errors at the end. + for _, name := range names { + img, resolvedName, err := r.LookupImage(name, lookupOptions) + if err != nil { + appendError(err) + continue + } + dm, exists := deleteMap[img.ID()] + if !exists { + toDelete = append(toDelete, img.ID()) + dm = &deleteMe{image: img} + deleteMap[img.ID()] = dm + } + dm.referencedBy = append(dm.referencedBy, resolvedName) + } + + default: + options := &ListImagesOptions{ + IsExternalContainerFunc: options.IsExternalContainerFunc, + Filters: options.Filters, + } + filteredImages, err := r.ListImages(ctx, nil, options) + if err != nil { + appendError(err) + return nil, rmErrors + } + for _, img := range filteredImages { + toDelete = append(toDelete, img.ID()) + deleteMap[img.ID()] = &deleteMe{image: img} + } + } + + // Return early if there's no image to delete. + if len(deleteMap) == 0 { + return nil, rmErrors + } + + // Now remove the images in the given order. + rmMap := make(map[string]*RemoveImageReport) + orderedIDs := []string{} + visitedIDs := make(map[string]bool) + for _, id := range toDelete { + del, exists := deleteMap[id] + if !exists { + appendError(errors.Errorf("internal error: ID %s not in found in image-deletion map", id)) + continue + } + if len(del.referencedBy) == 0 { + del.referencedBy = []string{""} + } + for _, ref := range del.referencedBy { + processedIDs, err := del.image.remove(ctx, rmMap, ref, options) + if err != nil { + appendError(err) + } + // NOTE: make sure to add given ID only once to orderedIDs. + for _, id := range processedIDs { + if visited := visitedIDs[id]; visited { + continue + } + orderedIDs = append(orderedIDs, id) + visitedIDs[id] = true + } + } + } + + // Finally, we can assemble the reports slice. + for _, id := range orderedIDs { + report, exists := rmMap[id] + if exists { + reports = append(reports, report) + } + } + + return reports, rmErrors +} diff --git a/vendor/github.com/containers/common/libimage/save.go b/vendor/github.com/containers/common/libimage/save.go new file mode 100644 index 00000000000..e1b8c3f75ba --- /dev/null +++ b/vendor/github.com/containers/common/libimage/save.go @@ -0,0 +1,227 @@ +package libimage + +import ( + "context" + "strings" + "time" + + dirTransport "github.com/containers/image/v5/directory" + dockerArchiveTransport "github.com/containers/image/v5/docker/archive" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/manifest" + ociArchiveTransport "github.com/containers/image/v5/oci/archive" + ociTransport "github.com/containers/image/v5/oci/layout" + "github.com/containers/image/v5/types" + ociv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// SaveOptions allow for customizing saving images. +type SaveOptions struct { + CopyOptions + + // AdditionalTags for the saved image. Incompatible when saving + // multiple images. + AdditionalTags []string +} + +// Save saves one or more images indicated by `names` in the specified `format` +// to `path`. Supported formats are oci-archive, docker-archive, oci-dir and +// docker-dir. The latter two adhere to the dir transport in the corresponding +// oci or docker v2s2 format. Please note that only docker-archive supports +// saving more than one images. Other formats will yield an error attempting +// to save more than one. +func (r *Runtime) Save(ctx context.Context, names []string, format, path string, options *SaveOptions) error { + logrus.Debugf("Saving one more images (%s) to %q", names, path) + + if options == nil { + options = &SaveOptions{} + } + + // First some sanity checks to simplify subsequent code. + switch len(names) { + case 0: + return errors.New("no image specified for saving images") + case 1: + // All formats support saving 1. + default: + if format != "docker-archive" { + return errors.Errorf("unsupported format %q for saving multiple images (only docker-archive)", format) + } + if len(options.AdditionalTags) > 0 { + return errors.Errorf("cannot save multiple images with multiple tags") + } + } + + // Dispatch the save operations. + switch format { + case "oci-archive", "oci-dir", "docker-dir": + if len(names) > 1 { + return errors.Errorf("%q does not support saving multiple images (%v)", format, names) + } + return r.saveSingleImage(ctx, names[0], format, path, options) + + case "docker-archive": + options.ManifestMIMEType = manifest.DockerV2Schema2MediaType + return r.saveDockerArchive(ctx, names, path, options) + } + + return errors.Errorf("unsupported format %q for saving images", format) + +} + +// saveSingleImage saves the specified image name to the specified path. +// Supported formats are "oci-archive", "oci-dir" and "docker-dir". +func (r *Runtime) saveSingleImage(ctx context.Context, name, format, path string, options *SaveOptions) error { + image, imageName, err := r.LookupImage(name, nil) + if err != nil { + return err + } + + if r.eventChannel != nil { + defer r.writeEvent(&Event{ID: image.ID(), Name: path, Time: time.Now(), Type: EventTypeImageSave}) + } + + // Unless the image was referenced by ID, use the resolved name as a + // tag. + var tag string + if !strings.HasPrefix(image.ID(), imageName) { + tag = imageName + } + + srcRef, err := image.StorageReference() + if err != nil { + return err + } + + // Prepare the destination reference. + var destRef types.ImageReference + switch format { + case "oci-archive": + destRef, err = ociArchiveTransport.NewReference(path, tag) + + case "oci-dir": + destRef, err = ociTransport.NewReference(path, tag) + options.ManifestMIMEType = ociv1.MediaTypeImageManifest + + case "docker-dir": + destRef, err = dirTransport.NewReference(path) + options.ManifestMIMEType = manifest.DockerV2Schema2MediaType + + default: + return errors.Errorf("unsupported format %q for saving images", format) + } + + if err != nil { + return err + } + + c, err := r.newCopier(&options.CopyOptions) + if err != nil { + return err + } + defer c.close() + + _, err = c.copy(ctx, srcRef, destRef) + return err +} + +// saveDockerArchive saves the specified images indicated by names to the path. +// It loads all images from the local containers storage and assembles the meta +// data needed to properly save images. Since multiple names could refer to +// the *same* image, we need to dance a bit and store additional "names". +// Those can then be used as additional tags when copying. +func (r *Runtime) saveDockerArchive(ctx context.Context, names []string, path string, options *SaveOptions) error { + type localImage struct { + image *Image + tags []reference.NamedTagged + } + + additionalTags := []reference.NamedTagged{} + for _, tag := range options.AdditionalTags { + named, err := NormalizeName(tag) + if err == nil { + tagged, withTag := named.(reference.NamedTagged) + if !withTag { + return errors.Errorf("invalid additional tag %q: normalized to untagged %q", tag, named.String()) + } + additionalTags = append(additionalTags, tagged) + } + } + + orderedIDs := []string{} // to preserve the relative order + localImages := make(map[string]*localImage) // to assemble tags + visitedNames := make(map[string]bool) // filters duplicate names + for _, name := range names { + // Look up local images. + image, imageName, err := r.LookupImage(name, nil) + if err != nil { + return err + } + // Make sure to filter duplicates purely based on the resolved + // name. + if _, exists := visitedNames[imageName]; exists { + continue + } + visitedNames[imageName] = true + // Extract and assemble the data. + local, exists := localImages[image.ID()] + if !exists { + local = &localImage{image: image} + local.tags = additionalTags + orderedIDs = append(orderedIDs, image.ID()) + } + // Add the tag if the locally resolved name is properly tagged + // (which it should unless we looked it up by ID). + named, err := reference.ParseNamed(imageName) + if err == nil { + tagged, withTag := named.(reference.NamedTagged) + if withTag { + local.tags = append(local.tags, tagged) + } + } + localImages[image.ID()] = local + if r.eventChannel != nil { + defer r.writeEvent(&Event{ID: image.ID(), Name: path, Time: time.Now(), Type: EventTypeImageSave}) + } + } + + writer, err := dockerArchiveTransport.NewWriter(r.systemContextCopy(), path) + if err != nil { + return err + } + defer writer.Close() + + for _, id := range orderedIDs { + local, exists := localImages[id] + if !exists { + return errors.Errorf("internal error: saveDockerArchive: ID %s not found in local map", id) + } + + copyOpts := options.CopyOptions + copyOpts.dockerArchiveAdditionalTags = local.tags + + c, err := r.newCopier(©Opts) + if err != nil { + return err + } + defer c.close() + + destRef, err := writer.NewReference(nil) + if err != nil { + return err + } + + srcRef, err := local.image.StorageReference() + if err != nil { + return err + } + + if _, err := c.copy(ctx, srcRef, destRef); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/containers/common/libimage/search.go b/vendor/github.com/containers/common/libimage/search.go new file mode 100644 index 00000000000..33a4776ce3f --- /dev/null +++ b/vendor/github.com/containers/common/libimage/search.go @@ -0,0 +1,324 @@ +package libimage + +import ( + "context" + "fmt" + "strconv" + "strings" + "sync" + + registryTransport "github.com/containers/image/v5/docker" + "github.com/containers/image/v5/pkg/sysregistriesv2" + "github.com/containers/image/v5/transports/alltransports" + "github.com/containers/image/v5/types" + "github.com/hashicorp/go-multierror" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sync/semaphore" +) + +const ( + searchTruncLength = 44 + searchMaxQueries = 25 + // Let's follow Firefox by limiting parallel downloads to 6. We do the + // same when pulling images in c/image. + searchMaxParallel = int64(6) +) + +// SearchResult is holding image-search related data. +type SearchResult struct { + // Index is the image index (e.g., "docker.io" or "quay.io") + Index string + // Name is the canonical name of the image (e.g., "docker.io/library/alpine"). + Name string + // Description of the image. + Description string + // Stars is the number of stars of the image. + Stars int + // Official indicates if it's an official image. + Official string + // Automated indicates if the image was created by an automated build. + Automated string + // Tag is the image tag + Tag string +} + +// SearchOptions customize searching images. +type SearchOptions struct { + // Filter allows to filter the results. + Filter SearchFilter + // Limit limits the number of queries per index (default: 25). Must be + // greater than 0 to overwrite the default value. + Limit int + // NoTrunc avoids the output to be truncated. + NoTrunc bool + // Authfile is the path to the authentication file. + Authfile string + // InsecureSkipTLSVerify allows to skip TLS verification. + InsecureSkipTLSVerify types.OptionalBool + // ListTags returns the search result with available tags + ListTags bool + // Registries to search if the specified term does not include a + // registry. If set, the unqualified-search registries in + // containers-registries.conf(5) are ignored. + Registries []string +} + +// SearchFilter allows filtering images while searching. +type SearchFilter struct { + // Stars describes the minimal amount of starts of an image. + Stars int + // IsAutomated decides if only images from automated builds are displayed. + IsAutomated types.OptionalBool + // IsOfficial decides if only official images are displayed. + IsOfficial types.OptionalBool +} + +// ParseSearchFilter turns the filter into a SearchFilter that can be used for +// searching images. +func ParseSearchFilter(filter []string) (*SearchFilter, error) { + sFilter := new(SearchFilter) + for _, f := range filter { + arr := strings.SplitN(f, "=", 2) + switch arr[0] { + case "stars": + if len(arr) < 2 { + return nil, errors.Errorf("invalid `stars` filter %q, should be stars=", filter) + } + stars, err := strconv.Atoi(arr[1]) + if err != nil { + return nil, errors.Wrapf(err, "incorrect value type for stars filter") + } + sFilter.Stars = stars + case "is-automated": + if len(arr) == 2 && arr[1] == "false" { + sFilter.IsAutomated = types.OptionalBoolFalse + } else { + sFilter.IsAutomated = types.OptionalBoolTrue + } + case "is-official": + if len(arr) == 2 && arr[1] == "false" { + sFilter.IsOfficial = types.OptionalBoolFalse + } else { + sFilter.IsOfficial = types.OptionalBoolTrue + } + default: + return nil, errors.Errorf("invalid filter type %q", f) + } + } + return sFilter, nil +} + +// Search searches term. If term includes a registry, only this registry will +// be used for searching. Otherwise, the unqualified-search registries in +// containers-registries.conf(5) or the ones specified in the options will be +// used. +func (r *Runtime) Search(ctx context.Context, term string, options *SearchOptions) ([]SearchResult, error) { + if options == nil { + options = &SearchOptions{} + } + + var searchRegistries []string + + // Try to extract a registry from the specified search term. We + // consider everything before the first slash to be the registry. Note + // that we cannot use the reference parser from the containers/image + // library as the search term may container arbitrary input such as + // wildcards. See bugzilla.redhat.com/show_bug.cgi?id=1846629. + spl := strings.SplitN(term, "/", 2) + switch { + case len(spl) > 1: + searchRegistries = []string{spl[0]} + term = spl[1] + case len(options.Registries) > 0: + searchRegistries = options.Registries + default: + regs, err := sysregistriesv2.UnqualifiedSearchRegistries(r.systemContextCopy()) + if err != nil { + return nil, err + } + searchRegistries = regs + } + + logrus.Debugf("Searching images matching term %s at the following registries %s", term, searchRegistries) + + // searchOutputData is used as a return value for searching in parallel. + type searchOutputData struct { + data []SearchResult + err error + } + + sem := semaphore.NewWeighted(searchMaxParallel) + wg := sync.WaitGroup{} + wg.Add(len(searchRegistries)) + data := make([]searchOutputData, len(searchRegistries)) + + for i := range searchRegistries { + if err := sem.Acquire(ctx, 1); err != nil { + return nil, err + } + index := i + go func() { + defer sem.Release(1) + defer wg.Done() + searchOutput, err := r.searchImageInRegistry(ctx, term, searchRegistries[index], options) + data[index] = searchOutputData{data: searchOutput, err: err} + }() + } + + wg.Wait() + results := []SearchResult{} + var multiErr error + for _, d := range data { + if d.err != nil { + multiErr = multierror.Append(multiErr, d.err) + continue + } + results = append(results, d.data...) + } + + // Optimistically assume that one successfully searched registry + // includes what the user is looking for. + if len(results) > 0 { + return results, nil + } + return results, multiErr +} + +func (r *Runtime) searchImageInRegistry(ctx context.Context, term, registry string, options *SearchOptions) ([]SearchResult, error) { + // Max number of queries by default is 25 + limit := searchMaxQueries + if options.Limit > 0 { + limit = options.Limit + } + + sys := r.systemContextCopy() + if options.InsecureSkipTLSVerify != types.OptionalBoolUndefined { + sys.DockerInsecureSkipTLSVerify = options.InsecureSkipTLSVerify + } + + if options.Authfile != "" { + sys.AuthFilePath = options.Authfile + } + + if options.ListTags { + results, err := searchRepositoryTags(ctx, sys, registry, term, options) + if err != nil { + return []SearchResult{}, err + } + return results, nil + } + + results, err := registryTransport.SearchRegistry(ctx, sys, registry, term, limit) + if err != nil { + return []SearchResult{}, err + } + index := registry + arr := strings.Split(registry, ".") + if len(arr) > 2 { + index = strings.Join(arr[len(arr)-2:], ".") + } + + // limit is the number of results to output + // if the total number of results is less than the limit, output all + // if the limit has been set by the user, output those number of queries + limit = searchMaxQueries + if len(results) < limit { + limit = len(results) + } + if options.Limit != 0 { + limit = len(results) + if options.Limit < len(results) { + limit = options.Limit + } + } + + paramsArr := []SearchResult{} + for i := 0; i < limit; i++ { + // Check whether query matches filters + if !(options.Filter.matchesAutomatedFilter(results[i]) && options.Filter.matchesOfficialFilter(results[i]) && options.Filter.matchesStarFilter(results[i])) { + continue + } + official := "" + if results[i].IsOfficial { + official = "[OK]" + } + automated := "" + if results[i].IsAutomated { + automated = "[OK]" + } + description := strings.ReplaceAll(results[i].Description, "\n", " ") + if len(description) > 44 && !options.NoTrunc { + description = description[:searchTruncLength] + "..." + } + name := registry + "/" + results[i].Name + if index == "docker.io" && !strings.Contains(results[i].Name, "/") { + name = index + "/library/" + results[i].Name + } + params := SearchResult{ + Index: registry, + Name: name, + Description: description, + Official: official, + Automated: automated, + Stars: results[i].StarCount, + } + paramsArr = append(paramsArr, params) + } + return paramsArr, nil +} + +func searchRepositoryTags(ctx context.Context, sys *types.SystemContext, registry, term string, options *SearchOptions) ([]SearchResult, error) { + dockerPrefix := "docker://" + imageRef, err := alltransports.ParseImageName(fmt.Sprintf("%s/%s", registry, term)) + if err == nil && imageRef.Transport().Name() != registryTransport.Transport.Name() { + return nil, errors.Errorf("reference %q must be a docker reference", term) + } else if err != nil { + imageRef, err = alltransports.ParseImageName(fmt.Sprintf("%s%s", dockerPrefix, fmt.Sprintf("%s/%s", registry, term))) + if err != nil { + return nil, errors.Errorf("reference %q must be a docker reference", term) + } + } + tags, err := registryTransport.GetRepositoryTags(ctx, sys, imageRef) + if err != nil { + return nil, errors.Errorf("error getting repository tags: %v", err) + } + limit := searchMaxQueries + if len(tags) < limit { + limit = len(tags) + } + if options.Limit != 0 { + limit = len(tags) + if options.Limit < limit { + limit = options.Limit + } + } + paramsArr := []SearchResult{} + for i := 0; i < limit; i++ { + params := SearchResult{ + Name: imageRef.DockerReference().Name(), + Tag: tags[i], + Index: registry, + } + paramsArr = append(paramsArr, params) + } + return paramsArr, nil +} + +func (f *SearchFilter) matchesStarFilter(result registryTransport.SearchResult) bool { + return result.StarCount >= f.Stars +} + +func (f *SearchFilter) matchesAutomatedFilter(result registryTransport.SearchResult) bool { + if f.IsAutomated != types.OptionalBoolUndefined { + return result.IsAutomated == (f.IsAutomated == types.OptionalBoolTrue) + } + return true +} + +func (f *SearchFilter) matchesOfficialFilter(result registryTransport.SearchResult) bool { + if f.IsOfficial != types.OptionalBoolUndefined { + return result.IsOfficial == (f.IsOfficial == types.OptionalBoolTrue) + } + return true +} diff --git a/vendor/github.com/containers/common/libnetwork/cni/README.md b/vendor/github.com/containers/common/libnetwork/cni/README.md new file mode 100644 index 00000000000..6f57feff51b --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/cni/README.md @@ -0,0 +1,10 @@ +This package abstracts CNI from libpod. +It implements the `ContainerNetwork` interface defined in [libpod/network/types/network.go](../types/network.go) for the CNI backend. + + +## Testing +Run the tests with: +``` +go test -v -mod=vendor -cover ./libpod/network/cni/ +``` +Run the tests as root to also test setup/teardown. This will execute CNI and therefore the cni plugins have to be installed. diff --git a/vendor/github.com/containers/common/libnetwork/cni/cni.coverprofile b/vendor/github.com/containers/common/libnetwork/cni/cni.coverprofile new file mode 100644 index 00000000000..d302f441c76 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/cni/cni.coverprofile @@ -0,0 +1,483 @@ +mode: count +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:25.110,36.16 4 175 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:39.2,39.37 1 175 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:47.2,48.16 2 175 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:51.2,57.34 5 175 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:109.2,111.22 2 174 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:36.16,38.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:39.37,40.51 1 19 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:40.51,44.4 2 19 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:48.16,50.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:58.33,61.17 3 137 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:64.3,67.19 2 137 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:72.3,72.22 1 137 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:75.3,75.23 1 137 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:79.3,80.17 2 137 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:84.34,87.17 3 38 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:90.3,93.23 2 38 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:97.3,98.17 2 38 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:102.10,105.39 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:61.17,63.4 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:67.19,69.4 1 19 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:72.22,74.4 1 19 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:75.23,77.4 1 19 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:80.17,82.4 1 1 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:87.17,89.4 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:93.23,95.4 1 19 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:98.17,100.4 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:114.74,115.33 1 174 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:120.2,120.14 1 78 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:115.33,116.34 1 678 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:116.34,118.4 1 96 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:125.95,126.45 1 175 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:131.2,131.50 1 137 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:135.2,136.32 2 137 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:194.2,194.12 1 136 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:126.45,129.3 2 38 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:131.50,133.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:136.32,137.26 1 156 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:137.26,143.18 3 156 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:146.4,150.26 3 156 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:167.4,171.29 4 155 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:177.4,177.27 1 155 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:183.4,183.44 1 155 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:188.4,188.32 1 155 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:191.4,191.48 1 155 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:143.18,145.5 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:150.26,152.23 2 118 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:156.5,157.20 2 117 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:152.23,154.6 1 1 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:157.20,159.6 1 97 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:160.10,160.32 1 38 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:160.32,163.19 2 19 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:163.19,165.6 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:171.29,173.26 2 19 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:173.26,175.6 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:177.27,179.24 2 19 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:179.24,181.6 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:183.44,187.5 3 19 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:188.32,190.5 1 20 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:198.96,199.35 1 38 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:210.2,210.12 1 19 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:199.35,200.54 1 19 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:200.54,202.29 2 19 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:207.4,207.17 1 19 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:202.29,203.32 1 19 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:203.32,205.6 1 19 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:216.138,223.30 2 87 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:238.2,240.36 3 87 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:259.2,261.22 3 82 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:266.2,269.24 3 82 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:289.2,291.16 3 82 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:294.2,295.17 2 82 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:310.2,311.16 2 82 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:314.2,314.33 1 82 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:223.30,224.42 1 84 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:233.3,233.54 1 84 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:224.42,226.18 2 91 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:229.4,231.68 3 91 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:226.18,228.5 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:234.8,236.3 1 3 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:240.36,241.12 1 8 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:242.14,244.18 2 4 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:248.15,250.18 2 3 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:254.11,255.69 1 1 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:244.18,246.5 1 2 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:250.18,252.5 1 2 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:261.22,264.3 2 2 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:270.33,274.62 3 78 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:279.3,279.18 1 78 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:283.34,284.87 1 4 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:286.10,287.85 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:274.62,277.4 1 1 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:279.18,281.4 1 2 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:291.16,293.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:295.17,298.17 3 34 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:301.3,302.17 2 34 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:305.3,306.75 2 34 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:298.17,300.4 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:302.17,304.4 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:307.8,309.3 1 48 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:311.16,313.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:318.40,319.15 1 4 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:322.2,323.16 2 4 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:326.2,326.11 1 3 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:329.2,329.15 1 2 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:319.15,321.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:323.16,325.3 1 1 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:326.11,328.3 1 1 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:333.42,334.16 1 3 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:337.2,338.16 2 3 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:341.2,341.23 1 2 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:344.2,344.15 1 1 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:334.16,336.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:338.16,340.3 1 1 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:341.23,343.3 1 1 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:347.90,349.29 2 0 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:377.2,377.22 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:349.29,350.26 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:353.3,355.38 2 0 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:350.26,352.4 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:355.38,356.72 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:359.4,366.41 3 0 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:356.72,358.5 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:366.41,374.5 2 0 +github.com/containers/podman/v3/libpod/network/cni/cni_exec.go:47.41,49.17 2 0 +github.com/containers/podman/v3/libpod/network/cni/cni_exec.go:54.2,54.21 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_exec.go:57.2,57.12 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_exec.go:49.17,51.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_exec.go:51.8,51.23 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_exec.go:51.23,53.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_exec.go:54.21,56.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_exec.go:61.122,71.16 9 688 +github.com/containers/podman/v3/libpod/network/cni/cni_exec.go:74.2,74.28 1 688 +github.com/containers/podman/v3/libpod/network/cni/cni_exec.go:71.16,73.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_exec.go:78.88,83.22 3 0 +github.com/containers/podman/v3/libpod/network/cni/cni_exec.go:92.2,92.14 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_exec.go:83.22,84.23 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_exec.go:84.23,86.4 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_exec.go:86.9,88.4 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_exec.go:89.8,89.63 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_exec.go:89.63,91.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_exec.go:96.77,98.2 1 688 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:123.80,128.21 5 82 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:131.2,131.22 1 82 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:134.2,134.19 1 82 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:137.2,137.10 1 82 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:128.21,130.3 1 1 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:131.22,133.3 1 3 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:134.19,136.3 1 4 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:141.119,155.54 4 78 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:158.2,158.16 1 78 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:155.54,157.3 1 78 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:162.97,170.2 3 84 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:173.113,179.23 2 91 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:188.2,188.15 1 91 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:191.2,191.18 1 91 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:179.23,180.32 1 3 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:183.3,183.30 1 3 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:180.32,182.4 1 2 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:183.30,185.4 1 2 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:188.15,190.3 1 89 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:196.43,198.2 1 91 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:202.58,204.12 2 91 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:207.2,208.16 2 91 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:211.2,211.29 1 91 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:204.12,206.3 1 6 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:208.16,210.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:216.39,224.2 4 78 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:227.41,231.2 1 78 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:234.37,238.2 1 78 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:242.56,250.2 3 1 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:253.44,254.26 1 79 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:259.2,259.14 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:254.26,255.65 1 79 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:255.65,257.4 1 79 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:263.78,268.13 2 4 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:273.2,273.21 1 4 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:276.2,279.50 3 4 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:282.2,282.10 1 4 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:268.13,270.3 1 1 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:273.21,275.3 1 1 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:279.50,281.3 1 1 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:285.51,292.2 3 2 +github.com/containers/podman/v3/libpod/network/cni/config.go:20.78,24.16 4 55 +github.com/containers/podman/v3/libpod/network/cni/config.go:27.2,28.16 2 55 +github.com/containers/podman/v3/libpod/network/cni/config.go:32.2,33.32 2 34 +github.com/containers/podman/v3/libpod/network/cni/config.go:24.16,26.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/config.go:28.16,30.3 1 21 +github.com/containers/podman/v3/libpod/network/cni/config.go:38.97,40.29 1 103 +github.com/containers/podman/v3/libpod/network/cni/config.go:46.2,46.25 1 103 +github.com/containers/podman/v3/libpod/network/cni/config.go:50.2,50.30 1 102 +github.com/containers/podman/v3/libpod/network/cni/config.go:53.2,53.31 1 102 +github.com/containers/podman/v3/libpod/network/cni/config.go:56.2,56.35 1 102 +github.com/containers/podman/v3/libpod/network/cni/config.go:60.2,63.27 3 102 +github.com/containers/podman/v3/libpod/network/cni/config.go:85.2,86.17 2 99 +github.com/containers/podman/v3/libpod/network/cni/config.go:93.2,93.27 1 99 +github.com/containers/podman/v3/libpod/network/cni/config.go:112.2,112.36 1 93 +github.com/containers/podman/v3/libpod/network/cni/config.go:123.2,126.87 2 87 +github.com/containers/podman/v3/libpod/network/cni/config.go:131.2,132.16 2 87 +github.com/containers/podman/v3/libpod/network/cni/config.go:135.2,135.79 1 82 +github.com/containers/podman/v3/libpod/network/cni/config.go:40.29,42.3 1 27 +github.com/containers/podman/v3/libpod/network/cni/config.go:46.25,48.3 1 1 +github.com/containers/podman/v3/libpod/network/cni/config.go:50.30,52.3 1 101 +github.com/containers/podman/v3/libpod/network/cni/config.go:53.31,55.3 1 94 +github.com/containers/podman/v3/libpod/network/cni/config.go:56.35,58.3 1 102 +github.com/containers/podman/v3/libpod/network/cni/config.go:63.27,64.53 1 56 +github.com/containers/podman/v3/libpod/network/cni/config.go:67.3,67.47 1 54 +github.com/containers/podman/v3/libpod/network/cni/config.go:64.53,66.4 1 2 +github.com/containers/podman/v3/libpod/network/cni/config.go:67.47,69.4 1 1 +github.com/containers/podman/v3/libpod/network/cni/config.go:70.8,72.17 2 46 +github.com/containers/podman/v3/libpod/network/cni/config.go:75.3,75.25 1 46 +github.com/containers/podman/v3/libpod/network/cni/config.go:72.17,74.4 1 0 +github.com/containers/podman/v3/libpod/network/cni/config.go:86.17,88.17 2 51 +github.com/containers/podman/v3/libpod/network/cni/config.go:88.17,90.4 1 0 +github.com/containers/podman/v3/libpod/network/cni/config.go:94.33,96.54 1 92 +github.com/containers/podman/v3/libpod/network/cni/config.go:99.3,100.17 2 92 +github.com/containers/podman/v3/libpod/network/cni/config.go:103.34,105.17 2 6 +github.com/containers/podman/v3/libpod/network/cni/config.go:108.10,109.93 1 1 +github.com/containers/podman/v3/libpod/network/cni/config.go:96.54,98.4 1 34 +github.com/containers/podman/v3/libpod/network/cni/config.go:100.17,102.4 1 3 +github.com/containers/podman/v3/libpod/network/cni/config.go:105.17,107.4 1 2 +github.com/containers/podman/v3/libpod/network/cni/config.go:112.36,114.17 2 97 +github.com/containers/podman/v3/libpod/network/cni/config.go:117.3,117.51 1 91 +github.com/containers/podman/v3/libpod/network/cni/config.go:114.17,116.4 1 6 +github.com/containers/podman/v3/libpod/network/cni/config.go:117.51,119.4 1 6 +github.com/containers/podman/v3/libpod/network/cni/config.go:126.87,129.3 2 1 +github.com/containers/podman/v3/libpod/network/cni/config.go:132.16,134.3 1 5 +github.com/containers/podman/v3/libpod/network/cni/config.go:140.59,144.16 4 5 +github.com/containers/podman/v3/libpod/network/cni/config.go:148.2,149.16 2 5 +github.com/containers/podman/v3/libpod/network/cni/config.go:154.2,154.48 1 5 +github.com/containers/podman/v3/libpod/network/cni/config.go:159.2,159.59 1 3 +github.com/containers/podman/v3/libpod/network/cni/config.go:170.2,173.24 3 3 +github.com/containers/podman/v3/libpod/network/cni/config.go:144.16,146.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/config.go:149.16,151.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/config.go:154.48,156.3 1 2 +github.com/containers/podman/v3/libpod/network/cni/config.go:159.59,161.17 2 3 +github.com/containers/podman/v3/libpod/network/cni/config.go:161.17,164.18 2 0 +github.com/containers/podman/v3/libpod/network/cni/config.go:164.18,166.5 1 0 +github.com/containers/podman/v3/libpod/network/cni/config.go:179.88,183.16 4 15 +github.com/containers/podman/v3/libpod/network/cni/config.go:187.2,189.33 2 15 +github.com/containers/podman/v3/libpod/network/cni/config.go:198.2,198.22 1 15 +github.com/containers/podman/v3/libpod/network/cni/config.go:183.16,185.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/config.go:189.33,190.34 1 120 +github.com/containers/podman/v3/libpod/network/cni/config.go:196.3,196.46 1 52 +github.com/containers/podman/v3/libpod/network/cni/config.go:190.34,192.31 1 99 +github.com/containers/podman/v3/libpod/network/cni/config.go:192.31,193.19 1 68 +github.com/containers/podman/v3/libpod/network/cni/config.go:202.77,206.16 4 18 +github.com/containers/podman/v3/libpod/network/cni/config.go:210.2,211.16 2 18 +github.com/containers/podman/v3/libpod/network/cni/config.go:214.2,214.32 1 16 +github.com/containers/podman/v3/libpod/network/cni/config.go:206.16,208.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/config.go:211.16,213.3 1 2 +github.com/containers/podman/v3/libpod/network/cni/config.go:217.50,218.22 1 6 +github.com/containers/podman/v3/libpod/network/cni/config.go:221.2,221.36 1 5 +github.com/containers/podman/v3/libpod/network/cni/config.go:230.2,230.31 1 4 +github.com/containers/podman/v3/libpod/network/cni/config.go:235.2,235.12 1 4 +github.com/containers/podman/v3/libpod/network/cni/config.go:218.22,220.3 1 1 +github.com/containers/podman/v3/libpod/network/cni/config.go:221.36,223.17 2 2 +github.com/containers/podman/v3/libpod/network/cni/config.go:226.3,226.71 1 2 +github.com/containers/podman/v3/libpod/network/cni/config.go:223.17,225.4 1 0 +github.com/containers/podman/v3/libpod/network/cni/config.go:226.71,228.4 1 1 +github.com/containers/podman/v3/libpod/network/cni/config.go:230.31,232.3 1 3 +github.com/containers/podman/v3/libpod/network/cni/config.go:232.8,234.3 1 1 +github.com/containers/podman/v3/libpod/network/cni/config.go:238.92,239.36 1 92 +github.com/containers/podman/v3/libpod/network/cni/config.go:255.2,255.31 1 89 +github.com/containers/podman/v3/libpod/network/cni/config.go:264.2,264.25 1 89 +github.com/containers/podman/v3/libpod/network/cni/config.go:290.2,291.12 2 89 +github.com/containers/podman/v3/libpod/network/cni/config.go:239.36,241.63 2 87 +github.com/containers/podman/v3/libpod/network/cni/config.go:244.3,244.62 1 85 +github.com/containers/podman/v3/libpod/network/cni/config.go:241.63,243.4 1 2 +github.com/containers/podman/v3/libpod/network/cni/config.go:244.62,246.4 1 1 +github.com/containers/podman/v3/libpod/network/cni/config.go:247.8,250.17 3 5 +github.com/containers/podman/v3/libpod/network/cni/config.go:250.17,252.4 1 0 +github.com/containers/podman/v3/libpod/network/cni/config.go:255.31,257.17 2 24 +github.com/containers/podman/v3/libpod/network/cni/config.go:260.3,260.57 1 24 +github.com/containers/podman/v3/libpod/network/cni/config.go:257.17,259.4 1 0 +github.com/containers/podman/v3/libpod/network/cni/config.go:264.25,267.42 3 5 +github.com/containers/podman/v3/libpod/network/cni/config.go:275.3,275.12 1 5 +github.com/containers/podman/v3/libpod/network/cni/config.go:282.3,282.12 1 5 +github.com/containers/podman/v3/libpod/network/cni/config.go:267.42,268.37 1 7 +github.com/containers/podman/v3/libpod/network/cni/config.go:271.4,271.37 1 7 +github.com/containers/podman/v3/libpod/network/cni/config.go:268.37,270.5 1 2 +github.com/containers/podman/v3/libpod/network/cni/config.go:271.37,273.5 1 5 +github.com/containers/podman/v3/libpod/network/cni/config.go:275.12,277.18 2 1 +github.com/containers/podman/v3/libpod/network/cni/config.go:280.4,280.58 1 1 +github.com/containers/podman/v3/libpod/network/cni/config.go:277.18,279.5 1 0 +github.com/containers/podman/v3/libpod/network/cni/config.go:282.12,284.18 2 3 +github.com/containers/podman/v3/libpod/network/cni/config.go:287.4,287.58 1 3 +github.com/containers/podman/v3/libpod/network/cni/config.go:284.18,286.5 1 0 +github.com/containers/podman/v3/libpod/network/cni/config.go:298.88,299.14 1 97 +github.com/containers/podman/v3/libpod/network/cni/config.go:302.2,302.24 1 97 +github.com/containers/podman/v3/libpod/network/cni/config.go:309.2,310.16 2 96 +github.com/containers/podman/v3/libpod/network/cni/config.go:315.2,315.59 1 96 +github.com/containers/podman/v3/libpod/network/cni/config.go:319.2,320.22 2 94 +github.com/containers/podman/v3/libpod/network/cni/config.go:331.2,331.25 1 93 +github.com/containers/podman/v3/libpod/network/cni/config.go:339.2,339.12 1 91 +github.com/containers/podman/v3/libpod/network/cni/config.go:299.14,301.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/config.go:302.24,304.3 1 1 +github.com/containers/podman/v3/libpod/network/cni/config.go:310.16,312.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/config.go:315.59,317.3 1 2 +github.com/containers/podman/v3/libpod/network/cni/config.go:320.22,321.36 1 2 +github.com/containers/podman/v3/libpod/network/cni/config.go:321.36,323.4 1 1 +github.com/containers/podman/v3/libpod/network/cni/config.go:324.8,324.23 1 92 +github.com/containers/podman/v3/libpod/network/cni/config.go:324.23,326.17 2 90 +github.com/containers/podman/v3/libpod/network/cni/config.go:329.3,329.17 1 90 +github.com/containers/podman/v3/libpod/network/cni/config.go:326.17,328.4 1 0 +github.com/containers/podman/v3/libpod/network/cni/config.go:331.25,332.78 1 5 +github.com/containers/podman/v3/libpod/network/cni/config.go:335.3,335.74 1 4 +github.com/containers/podman/v3/libpod/network/cni/config.go:332.78,334.4 1 1 +github.com/containers/podman/v3/libpod/network/cni/config.go:335.74,337.4 1 1 +github.com/containers/podman/v3/libpod/network/cni/network.go:74.78,77.16 2 68 +github.com/containers/podman/v3/libpod/network/cni/network.go:81.2,82.30 2 68 +github.com/containers/podman/v3/libpod/network/cni/network.go:86.2,87.25 2 68 +github.com/containers/podman/v3/libpod/network/cni/network.go:90.2,91.16 2 68 +github.com/containers/podman/v3/libpod/network/cni/network.go:95.2,106.15 3 68 +github.com/containers/podman/v3/libpod/network/cni/network.go:77.16,79.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/network.go:82.30,84.3 1 68 +github.com/containers/podman/v3/libpod/network/cni/network.go:87.25,89.3 1 68 +github.com/containers/podman/v3/libpod/network/cni/network.go:91.16,93.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/network.go:109.43,111.23 1 93 +github.com/containers/podman/v3/libpod/network/cni/network.go:115.2,116.16 2 67 +github.com/containers/podman/v3/libpod/network/cni/network.go:119.2,120.29 2 67 +github.com/containers/podman/v3/libpod/network/cni/network.go:160.2,160.39 1 67 +github.com/containers/podman/v3/libpod/network/cni/network.go:168.2,170.12 3 67 +github.com/containers/podman/v3/libpod/network/cni/network.go:111.23,113.3 1 26 +github.com/containers/podman/v3/libpod/network/cni/network.go:116.16,118.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/network.go:120.29,122.17 2 180 +github.com/containers/podman/v3/libpod/network/cni/network.go:130.3,130.47 1 177 +github.com/containers/podman/v3/libpod/network/cni/network.go:135.3,135.86 1 176 +github.com/containers/podman/v3/libpod/network/cni/network.go:140.3,140.41 1 176 +github.com/containers/podman/v3/libpod/network/cni/network.go:145.3,146.17 2 175 +github.com/containers/podman/v3/libpod/network/cni/network.go:150.3,156.36 3 174 +github.com/containers/podman/v3/libpod/network/cni/network.go:122.17,124.27 1 3 +github.com/containers/podman/v3/libpod/network/cni/network.go:127.4,127.12 1 3 +github.com/containers/podman/v3/libpod/network/cni/network.go:124.27,126.5 1 3 +github.com/containers/podman/v3/libpod/network/cni/network.go:130.47,132.12 2 1 +github.com/containers/podman/v3/libpod/network/cni/network.go:135.86,137.12 2 0 +github.com/containers/podman/v3/libpod/network/cni/network.go:140.41,142.12 2 1 +github.com/containers/podman/v3/libpod/network/cni/network.go:146.17,148.12 2 1 +github.com/containers/podman/v3/libpod/network/cni/network.go:160.39,162.17 2 48 +github.com/containers/podman/v3/libpod/network/cni/network.go:165.3,165.43 1 48 +github.com/containers/podman/v3/libpod/network/cni/network.go:162.17,164.4 1 0 +github.com/containers/podman/v3/libpod/network/cni/network.go:173.63,183.2 2 48 +github.com/containers/podman/v3/libpod/network/cni/network.go:190.68,192.41 1 23 +github.com/containers/podman/v3/libpod/network/cni/network.go:196.2,197.33 2 5 +github.com/containers/podman/v3/libpod/network/cni/network.go:210.2,210.16 1 4 +github.com/containers/podman/v3/libpod/network/cni/network.go:213.2,213.106 1 1 +github.com/containers/podman/v3/libpod/network/cni/network.go:192.41,194.3 1 18 +github.com/containers/podman/v3/libpod/network/cni/network.go:197.33,199.37 1 9 +github.com/containers/podman/v3/libpod/network/cni/network.go:203.3,203.52 1 9 +github.com/containers/podman/v3/libpod/network/cni/network.go:199.37,201.4 1 0 +github.com/containers/podman/v3/libpod/network/cni/network.go:203.52,204.18 1 5 +github.com/containers/podman/v3/libpod/network/cni/network.go:207.4,207.13 1 4 +github.com/containers/podman/v3/libpod/network/cni/network.go:204.18,206.5 1 1 +github.com/containers/podman/v3/libpod/network/cni/network.go:210.16,212.3 1 3 +github.com/containers/podman/v3/libpod/network/cni/network.go:218.47,221.2 2 262 +github.com/containers/podman/v3/libpod/network/cni/network.go:224.97,233.6 2 25 +github.com/containers/podman/v3/libpod/network/cni/network.go:233.6,234.103 1 27 +github.com/containers/podman/v3/libpod/network/cni/network.go:240.3,242.17 3 2 +github.com/containers/podman/v3/libpod/network/cni/network.go:234.103,239.4 2 25 +github.com/containers/podman/v3/libpod/network/cni/network.go:242.17,244.4 1 0 +github.com/containers/podman/v3/libpod/network/cni/network.go:249.97,251.29 1 3 +github.com/containers/podman/v3/libpod/network/cni/network.go:264.2,264.60 1 0 +github.com/containers/podman/v3/libpod/network/cni/network.go:251.29,254.17 2 3 +github.com/containers/podman/v3/libpod/network/cni/network.go:257.3,257.104 1 3 +github.com/containers/podman/v3/libpod/network/cni/network.go:254.17,256.4 1 0 +github.com/containers/podman/v3/libpod/network/cni/network.go:257.104,262.4 2 3 +github.com/containers/podman/v3/libpod/network/cni/network.go:269.61,272.33 2 51 +github.com/containers/podman/v3/libpod/network/cni/network.go:278.2,279.16 2 51 +github.com/containers/podman/v3/libpod/network/cni/network.go:282.2,282.45 1 51 +github.com/containers/podman/v3/libpod/network/cni/network.go:272.33,273.40 1 64 +github.com/containers/podman/v3/libpod/network/cni/network.go:273.40,275.4 1 65 +github.com/containers/podman/v3/libpod/network/cni/network.go:279.16,281.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/network.go:287.58,291.16 4 51 +github.com/containers/podman/v3/libpod/network/cni/network.go:294.2,300.31 5 51 +github.com/containers/podman/v3/libpod/network/cni/network.go:307.2,307.78 1 0 +github.com/containers/podman/v3/libpod/network/cni/network.go:291.16,293.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/network.go:300.31,302.48 2 57 +github.com/containers/podman/v3/libpod/network/cni/network.go:302.48,305.4 2 51 +github.com/containers/podman/v3/libpod/network/cni/network.go:312.53,314.33 2 51 +github.com/containers/podman/v3/libpod/network/cni/network.go:317.2,317.14 1 51 +github.com/containers/podman/v3/libpod/network/cni/network.go:314.33,316.3 1 64 +github.com/containers/podman/v3/libpod/network/cni/network.go:322.57,324.33 2 138 +github.com/containers/podman/v3/libpod/network/cni/network.go:329.2,329.14 1 138 +github.com/containers/podman/v3/libpod/network/cni/network.go:324.33,325.56 1 115 +github.com/containers/podman/v3/libpod/network/cni/network.go:325.56,327.4 1 111 +github.com/containers/podman/v3/libpod/network/cni/run.go:25.116,29.16 4 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:33.2,33.25 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:36.2,36.31 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:39.2,39.32 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:42.2,42.46 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:54.2,54.63 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:61.2,61.16 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:65.2,69.15 4 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:80.2,81.16 2 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:85.2,86.46 2 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:123.2,123.21 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:29.16,31.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:33.25,35.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:36.31,38.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:39.32,41.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:42.46,44.21 2 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:47.3,48.17 2 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:44.21,46.4 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:48.17,50.4 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:54.63,56.17 2 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:59.3,59.13 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:56.17,58.4 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:61.16,63.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:69.15,70.20 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:70.20,71.38 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:71.38,73.19 2 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:73.19,75.6 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:81.16,83.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:86.46,92.91 3 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:101.3,106.20 4 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:110.3,112.20 3 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:115.3,118.20 4 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:121.3,121.25 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:92.91,96.21 4 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:96.21,98.5 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:106.20,108.4 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:112.20,114.4 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:118.20,120.4 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:128.78,131.55 3 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:138.2,142.35 4 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:172.2,173.20 2 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:131.55,133.16 2 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:136.3,136.40 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:133.16,135.4 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:142.35,143.26 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:147.3,147.49 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:150.3,152.9 3 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:143.26,145.12 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:147.49,149.4 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:152.9,158.4 2 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:158.9,160.18 2 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:163.4,169.5 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:160.18,162.5 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:177.86,178.33 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:181.1,182.39 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:190.2,190.63 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:193.2,193.12 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:178.33,180.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:182.39,183.47 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:188.3,188.118 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:183.47,184.29 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:184.29,185.19 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:190.63,192.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:196.141,213.68 2 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:220.2,220.29 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:224.2,224.30 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:235.2,235.27 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:242.2,242.20 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:246.2,246.11 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:213.68,214.66 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:214.66,216.4 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:220.29,222.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:224.30,228.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:228.8,228.36 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:228.36,232.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:235.27,239.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:242.20,244.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:250.90,254.16 4 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:257.2,257.43 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:254.16,256.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:260.90,265.16 2 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:269.2,270.46 2 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:291.2,291.30 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:265.16,267.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:270.46,274.17 3 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:286.3,287.17 2 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:274.17,276.4 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:276.9,279.22 3 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:283.4,283.32 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:279.22,281.13 2 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:287.17,289.4 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:294.149,299.16 3 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:305.2,306.16 2 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:309.2,309.29 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:299.16,301.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:301.8,301.29 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:301.29,303.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:306.16,308.3 1 0 diff --git a/vendor/github.com/containers/common/libnetwork/cni/cni_conversion.go b/vendor/github.com/containers/common/libnetwork/cni/cni_conversion.go new file mode 100644 index 00000000000..5574b2b1c5b --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/cni/cni_conversion.go @@ -0,0 +1,397 @@ +// +build linux + +package cni + +import ( + "encoding/json" + "io/ioutil" + "net" + "os" + "path/filepath" + "strconv" + "strings" + "syscall" + "time" + + "github.com/containernetworking/cni/libcni" + internalutil "github.com/containers/common/libnetwork/internal/util" + "github.com/containers/common/libnetwork/types" + "github.com/containers/common/libnetwork/util" + pkgutil "github.com/containers/common/pkg/util" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +func createNetworkFromCNIConfigList(conf *libcni.NetworkConfigList, confPath string) (*types.Network, error) { + network := types.Network{ + Name: conf.Name, + ID: getNetworkIDFromName(conf.Name), + Labels: map[string]string{}, + Options: map[string]string{}, + IPAMOptions: map[string]string{}, + } + + cniJSON := make(map[string]interface{}) + err := json.Unmarshal(conf.Bytes, &cniJSON) + if err != nil { + return nil, errors.Wrapf(err, "failed to unmarshal network config %s", conf.Name) + } + if args, ok := cniJSON["args"]; ok { + if key, ok := args.(map[string]interface{}); ok { + // read network labels and options from the conf file + network.Labels = getNetworkArgsFromConfList(key, podmanLabelKey) + network.Options = getNetworkArgsFromConfList(key, podmanOptionsKey) + } + } + + f, err := os.Stat(confPath) + if err != nil { + return nil, err + } + stat := f.Sys().(*syscall.Stat_t) + network.Created = time.Unix(int64(stat.Ctim.Sec), int64(stat.Ctim.Nsec)) + + firstPlugin := conf.Plugins[0] + network.Driver = firstPlugin.Network.Type + + switch firstPlugin.Network.Type { + case types.BridgeNetworkDriver: + var bridge hostLocalBridge + err := json.Unmarshal(firstPlugin.Bytes, &bridge) + if err != nil { + return nil, errors.Wrapf(err, "failed to unmarshal the bridge plugin config in %s", confPath) + } + network.NetworkInterface = bridge.BrName + + // if isGateway is false we have an internal network + if !bridge.IsGW { + network.Internal = true + } + + // set network options + if bridge.MTU != 0 { + network.Options["mtu"] = strconv.Itoa(bridge.MTU) + } + if bridge.Vlan != 0 { + network.Options["vlan"] = strconv.Itoa(bridge.Vlan) + } + + err = convertIPAMConfToNetwork(&network, &bridge.IPAM, confPath) + if err != nil { + return nil, err + } + + case types.MacVLANNetworkDriver, types.IPVLANNetworkDriver: + var vlan VLANConfig + err := json.Unmarshal(firstPlugin.Bytes, &vlan) + if err != nil { + return nil, errors.Wrapf(err, "failed to unmarshal the macvlan plugin config in %s", confPath) + } + network.NetworkInterface = vlan.Master + + // set network options + if vlan.MTU != 0 { + network.Options["mtu"] = strconv.Itoa(vlan.MTU) + } + + if vlan.Mode != "" { + network.Options["mode"] = vlan.Mode + } + + err = convertIPAMConfToNetwork(&network, &vlan.IPAM, confPath) + if err != nil { + return nil, err + } + + default: + // A warning would be good but users would get this warning every time so keep this at info level. + logrus.Infof("Unsupported CNI config type %s in %s, this network can still be used but inspect or list cannot show all information", + firstPlugin.Network.Type, confPath) + } + + // check if the dnsname plugin is configured + network.DNSEnabled = findPluginByName(conf.Plugins, "dnsname") + + return &network, nil +} + +func findPluginByName(plugins []*libcni.NetworkConfig, name string) bool { + for _, plugin := range plugins { + if plugin.Network.Type == name { + return true + } + } + return false +} + +// convertIPAMConfToNetwork converts A cni IPAMConfig to libpod network subnets. +// It returns an array of subnets and an extra bool if dhcp is configured. +func convertIPAMConfToNetwork(network *types.Network, ipam *ipamConfig, confPath string) error { + if ipam.PluginType == types.DHCPIPAMDriver { + network.IPAMOptions["driver"] = types.DHCPIPAMDriver + return nil + } + + if ipam.PluginType != types.HostLocalIPAMDriver { + return errors.Errorf("unsupported ipam plugin %s in %s", ipam.PluginType, confPath) + } + + network.IPAMOptions["driver"] = types.HostLocalIPAMDriver + for _, r := range ipam.Ranges { + for _, ipam := range r { + s := types.Subnet{} + + // Do not use types.ParseCIDR() because we want the ip to be + // the network address and not a random ip in the sub. + _, sub, err := net.ParseCIDR(ipam.Subnet) + if err != nil { + return err + } + s.Subnet = types.IPNet{IPNet: *sub} + + // gateway + var gateway net.IP + if ipam.Gateway != "" { + gateway = net.ParseIP(ipam.Gateway) + if gateway == nil { + return errors.Errorf("failed to parse gateway ip %s", ipam.Gateway) + } + // convert to 4 byte if ipv4 + util.NormalizeIP(&gateway) + } else if !network.Internal { + // only add a gateway address if the network is not internal + gateway, err = util.FirstIPInSubnet(sub) + if err != nil { + return errors.Errorf("failed to get first ip in subnet %s", sub.String()) + } + } + s.Gateway = gateway + + var rangeStart net.IP + var rangeEnd net.IP + if ipam.RangeStart != "" { + rangeStart = net.ParseIP(ipam.RangeStart) + if rangeStart == nil { + return errors.Errorf("failed to parse range start ip %s", ipam.RangeStart) + } + } + if ipam.RangeEnd != "" { + rangeEnd = net.ParseIP(ipam.RangeEnd) + if rangeEnd == nil { + return errors.Errorf("failed to parse range end ip %s", ipam.RangeEnd) + } + } + if rangeStart != nil || rangeEnd != nil { + s.LeaseRange = &types.LeaseRange{} + s.LeaseRange.StartIP = rangeStart + s.LeaseRange.EndIP = rangeEnd + } + if util.IsIPv6(s.Subnet.IP) { + network.IPv6Enabled = true + } + network.Subnets = append(network.Subnets, s) + } + } + return nil +} + +// getNetworkArgsFromConfList returns the map of args in a conflist, argType should be labels or options +func getNetworkArgsFromConfList(args map[string]interface{}, argType string) map[string]string { + if args, ok := args[argType]; ok { + if labels, ok := args.(map[string]interface{}); ok { + result := make(map[string]string, len(labels)) + for k, v := range labels { + if v, ok := v.(string); ok { + result[k] = v + } + } + return result + } + } + return map[string]string{} +} + +// createCNIConfigListFromNetwork will create a cni config file from the given network. +// It returns the cni config and the path to the file where the config was written. +// Set writeToDisk to false to only add this network into memory. +func (n *cniNetwork) createCNIConfigListFromNetwork(network *types.Network, writeToDisk bool) (*libcni.NetworkConfigList, string, error) { + var ( + routes []ipamRoute + ipamRanges [][]ipamLocalHostRangeConf + ipamConf ipamConfig + err error + ) + if len(network.Subnets) > 0 { + defIpv4Route := false + defIpv6Route := false + for _, subnet := range network.Subnets { + ipam := newIPAMLocalHostRange(subnet.Subnet, subnet.LeaseRange, subnet.Gateway) + ipamRanges = append(ipamRanges, []ipamLocalHostRangeConf{*ipam}) + + // only add default route for not internal networks + if !network.Internal { + ipv6 := util.IsIPv6(subnet.Subnet.IP) + if !ipv6 && defIpv4Route { + continue + } + if ipv6 && defIpv6Route { + continue + } + + if ipv6 { + defIpv6Route = true + } else { + defIpv4Route = true + } + route, err := newIPAMDefaultRoute(ipv6) + if err != nil { + return nil, "", err + } + routes = append(routes, route) + } + } + ipamConf = newIPAMHostLocalConf(routes, ipamRanges) + } else { + ipamConf = ipamConfig{PluginType: "dhcp"} + } + + vlan := 0 + mtu := 0 + vlanPluginMode := "" + for k, v := range network.Options { + switch k { + case "mtu": + mtu, err = internalutil.ParseMTU(v) + if err != nil { + return nil, "", err + } + + case "vlan": + vlan, err = internalutil.ParseVlan(v) + if err != nil { + return nil, "", err + } + + case "mode": + switch network.Driver { + case types.MacVLANNetworkDriver: + if !pkgutil.StringInSlice(v, types.ValidMacVLANModes) { + return nil, "", errors.Errorf("unknown macvlan mode %q", v) + } + case types.IPVLANNetworkDriver: + if !pkgutil.StringInSlice(v, types.ValidIPVLANModes) { + return nil, "", errors.Errorf("unknown ipvlan mode %q", v) + } + default: + return nil, "", errors.Errorf("cannot set option \"mode\" with driver %q", network.Driver) + } + vlanPluginMode = v + + default: + return nil, "", errors.Errorf("unsupported network option %s", k) + } + } + + isGateway := true + ipMasq := true + if network.Internal { + isGateway = false + ipMasq = false + } + // create CNI plugin configuration + // explicitly use CNI version 0.4.0 here, to use v1.0.0 at least containernetwork-plugins-1.0.1 has to be installed + // the dnsname plugin also needs to be updated for 1.0.0 + // TODO change to 1.0.0 when most distros support it + ncList := newNcList(network.Name, "0.4.0", network.Labels, network.Options) + var plugins []interface{} + + switch network.Driver { + case types.BridgeNetworkDriver: + bridge := newHostLocalBridge(network.NetworkInterface, isGateway, ipMasq, mtu, vlan, &ipamConf) + plugins = append(plugins, bridge, newPortMapPlugin(), newFirewallPlugin(), newTuningPlugin()) + // if we find the dnsname plugin we add configuration for it + if hasDNSNamePlugin(n.cniPluginDirs) && network.DNSEnabled { + // Note: in the future we might like to allow for dynamic domain names + plugins = append(plugins, newDNSNamePlugin(defaultPodmanDomainName)) + } + + case types.MacVLANNetworkDriver: + plugins = append(plugins, newVLANPlugin(types.MacVLANNetworkDriver, network.NetworkInterface, vlanPluginMode, mtu, &ipamConf)) + + case types.IPVLANNetworkDriver: + plugins = append(plugins, newVLANPlugin(types.IPVLANNetworkDriver, network.NetworkInterface, vlanPluginMode, mtu, &ipamConf)) + + default: + return nil, "", errors.Errorf("driver %q is not supported by cni", network.Driver) + } + ncList["plugins"] = plugins + b, err := json.MarshalIndent(ncList, "", " ") + if err != nil { + return nil, "", err + } + cniPathName := "" + if writeToDisk { + cniPathName = filepath.Join(n.cniConfigDir, network.Name+".conflist") + err = ioutil.WriteFile(cniPathName, b, 0644) + if err != nil { + return nil, "", err + } + f, err := os.Stat(cniPathName) + if err != nil { + return nil, "", err + } + stat := f.Sys().(*syscall.Stat_t) + network.Created = time.Unix(int64(stat.Ctim.Sec), int64(stat.Ctim.Nsec)) + } else { + network.Created = time.Now() + } + config, err := libcni.ConfListFromBytes(b) + if err != nil { + return nil, "", err + } + return config, cniPathName, nil +} + +func convertSpecgenPortsToCNIPorts(ports []types.PortMapping) ([]cniPortMapEntry, error) { + cniPorts := make([]cniPortMapEntry, 0, len(ports)) + for _, port := range ports { + if port.Protocol == "" { + return nil, errors.New("port protocol should not be empty") + } + protocols := strings.Split(port.Protocol, ",") + + for _, protocol := range protocols { + if !pkgutil.StringInSlice(protocol, []string{"tcp", "udp", "sctp"}) { + return nil, errors.Errorf("unknown port protocol %s", protocol) + } + cniPort := cniPortMapEntry{ + HostPort: int(port.HostPort), + ContainerPort: int(port.ContainerPort), + HostIP: port.HostIP, + Protocol: protocol, + } + cniPorts = append(cniPorts, cniPort) + for i := 1; i < int(port.Range); i++ { + cniPort := cniPortMapEntry{ + HostPort: int(port.HostPort) + i, + ContainerPort: int(port.ContainerPort) + i, + HostIP: port.HostIP, + Protocol: protocol, + } + cniPorts = append(cniPorts, cniPort) + } + } + } + return cniPorts, nil +} + +func removeMachinePlugin(conf *libcni.NetworkConfigList) *libcni.NetworkConfigList { + plugins := make([]*libcni.NetworkConfig, 0, len(conf.Plugins)) + for _, net := range conf.Plugins { + if net.Network.Type != "podman-machine" { + plugins = append(plugins, net) + } + } + conf.Plugins = plugins + return conf +} diff --git a/vendor/github.com/containers/common/libnetwork/cni/cni_exec.go b/vendor/github.com/containers/common/libnetwork/cni/cni_exec.go new file mode 100644 index 00000000000..c66e7ef5d7a --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/cni/cni_exec.go @@ -0,0 +1,110 @@ +// Copyright 2016 CNI authors +// Copyright 2021 Podman authors +// +// This code has been originally copied from github.com/containernetworking/cni +// but has been changed to better fit the Podman use case. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux + +package cni + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "os/exec" + "path/filepath" + + "github.com/containernetworking/cni/pkg/invoke" + "github.com/containernetworking/cni/pkg/version" + "github.com/containers/storage/pkg/unshare" +) + +type cniExec struct { + version.PluginDecoder +} + +type cniPluginError struct { + plugin string + Code uint `json:"code"` + Msg string `json:"msg"` + Details string `json:"details,omitempty"` +} + +// Error returns a nicely formatted error message for the cni plugin errors. +func (e *cniPluginError) Error() string { + err := fmt.Sprintf("cni plugin %s failed", e.plugin) + if e.Msg != "" { + err = fmt.Sprintf("%s: %s", err, e.Msg) + } else if e.Code > 0 { + err = fmt.Sprintf("%s with error code %d", err, e.Code) + } + if e.Details != "" { + err = fmt.Sprintf("%s: %s", err, e.Details) + } + return err +} + +// ExecPlugin execute the cni plugin. Returns the stdout of the plugin or an error. +func (e *cniExec) ExecPlugin(ctx context.Context, pluginPath string, stdinData []byte, environ []string) ([]byte, error) { + stdout := &bytes.Buffer{} + stderr := &bytes.Buffer{} + c := exec.CommandContext(ctx, pluginPath) + c.Env = environ + c.Stdin = bytes.NewBuffer(stdinData) + c.Stdout = stdout + c.Stderr = stderr + + // The dnsname plugin tries to use XDG_RUNTIME_DIR to store files. + // podman run will have XDG_RUNTIME_DIR set and thus the cni plugin can use + // it. The problem is that XDG_RUNTIME_DIR is unset for the conmon process + // for rootful users. This causes issues since the cleanup process is spawned + // by conmon and thus not have XDG_RUNTIME_DIR set to same value as podman run. + // Because of it dnsname will not find the config files and cannot correctly cleanup. + // To fix this we should also unset XDG_RUNTIME_DIR for the cni plugins as rootful. + if !unshare.IsRootless() { + c.Env = append(c.Env, "XDG_RUNTIME_DIR=") + } + + err := c.Run() + if err != nil { + return nil, annotatePluginError(err, pluginPath, stdout.Bytes(), stderr.Bytes()) + } + return stdout.Bytes(), nil +} + +// annotatePluginError parses the common cni plugin error json. +func annotatePluginError(err error, plugin string, stdout, stderr []byte) error { + pluginName := filepath.Base(plugin) + emsg := cniPluginError{ + plugin: pluginName, + } + if len(stdout) == 0 { + if len(stderr) == 0 { + emsg.Msg = err.Error() + } else { + emsg.Msg = string(stderr) + } + } else if perr := json.Unmarshal(stdout, &emsg); perr != nil { + emsg.Msg = fmt.Sprintf("failed to unmarshal error message %q: %v", string(stdout), perr) + } + return &emsg +} + +// FindInPath finds the plugin in the given paths. +func (e *cniExec) FindInPath(plugin string, paths []string) (string, error) { + return invoke.FindInPath(plugin, paths) +} diff --git a/vendor/github.com/containers/common/libnetwork/cni/cni_types.go b/vendor/github.com/containers/common/libnetwork/cni/cni_types.go new file mode 100644 index 00000000000..fbfcd49ad37 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/cni/cni_types.go @@ -0,0 +1,281 @@ +// +build linux + +package cni + +import ( + "net" + "os" + "path/filepath" + + "github.com/containers/common/libnetwork/types" +) + +const ( + defaultIPv4Route = "0.0.0.0/0" + defaultIPv6Route = "::/0" + // defaultPodmanDomainName is used for the dnsname plugin to define + // a localized domain name for a created network + defaultPodmanDomainName = "dns.podman" + + // cniDeviceName is the default name for a new bridge, it should be suffixed with an integer + cniDeviceName = "cni-podman" + + // podmanLabelKey key used to store the podman network label in a cni config + podmanLabelKey = "podman_labels" + + // podmanOptionsKey key used to store the podman network options in a cni config + podmanOptionsKey = "podman_options" +) + +// cniPortMapEntry struct is used by the portmap plugin +// https://github.com/containernetworking/plugins/blob/649e0181fe7b3a61e708f3e4249a798f57f25cc5/plugins/meta/portmap/main.go#L43-L50 +type cniPortMapEntry struct { + HostPort int `json:"hostPort"` + ContainerPort int `json:"containerPort"` + Protocol string `json:"protocol"` + HostIP string `json:"hostIP,omitempty"` +} + +// hostLocalBridge describes a configuration for a bridge plugin +// https://github.com/containernetworking/plugins/tree/master/plugins/main/bridge#network-configuration-reference +type hostLocalBridge struct { + PluginType string `json:"type"` + BrName string `json:"bridge,omitempty"` + IsGW bool `json:"isGateway"` + IsDefaultGW bool `json:"isDefaultGateway,omitempty"` + ForceAddress bool `json:"forceAddress,omitempty"` + IPMasq bool `json:"ipMasq,omitempty"` + MTU int `json:"mtu,omitempty"` + HairpinMode bool `json:"hairpinMode,omitempty"` + PromiscMode bool `json:"promiscMode,omitempty"` + Vlan int `json:"vlan,omitempty"` + IPAM ipamConfig `json:"ipam"` + Capabilities map[string]bool `json:"capabilities,omitempty"` +} + +// ipamConfig describes an IPAM configuration +// https://github.com/containernetworking/plugins/tree/master/plugins/ipam/host-local#network-configuration-reference +type ipamConfig struct { + PluginType string `json:"type"` + Routes []ipamRoute `json:"routes,omitempty"` + ResolveConf string `json:"resolveConf,omitempty"` + DataDir string `json:"dataDir,omitempty"` + Ranges [][]ipamLocalHostRangeConf `json:"ranges,omitempty"` +} + +// ipamLocalHostRangeConf describes the new style IPAM ranges +type ipamLocalHostRangeConf struct { + Subnet string `json:"subnet"` + RangeStart string `json:"rangeStart,omitempty"` + RangeEnd string `json:"rangeEnd,omitempty"` + Gateway string `json:"gateway,omitempty"` +} + +// ipamRoute describes a route in an ipam config +type ipamRoute struct { + Dest string `json:"dst"` +} + +// portMapConfig describes the default portmapping config +type portMapConfig struct { + PluginType string `json:"type"` + Capabilities map[string]bool `json:"capabilities"` +} + +// VLANConfig describes the macvlan config +type VLANConfig struct { + PluginType string `json:"type"` + Master string `json:"master"` + IPAM ipamConfig `json:"ipam"` + MTU int `json:"mtu,omitempty"` + Mode string `json:"mode,omitempty"` + Capabilities map[string]bool `json:"capabilities,omitempty"` +} + +// firewallConfig describes the firewall plugin +type firewallConfig struct { + PluginType string `json:"type"` + Backend string `json:"backend"` +} + +// tuningConfig describes the tuning plugin +type tuningConfig struct { + PluginType string `json:"type"` +} + +// dnsNameConfig describes the dns container name resolution plugin config +type dnsNameConfig struct { + PluginType string `json:"type"` + DomainName string `json:"domainName"` + Capabilities map[string]bool `json:"capabilities"` +} + +// ncList describes a generic map +type ncList map[string]interface{} + +// newNcList creates a generic map of values with string +// keys and adds in version and network name +func newNcList(name, version string, labels, options map[string]string) ncList { + n := ncList{} + n["cniVersion"] = version + n["name"] = name + args := map[string]map[string]string{} + if len(labels) > 0 { + args[podmanLabelKey] = labels + } + if len(options) > 0 { + args[podmanOptionsKey] = options + } + if len(args) > 0 { + n["args"] = args + } + return n +} + +// newHostLocalBridge creates a new LocalBridge for host-local +func newHostLocalBridge(name string, isGateWay, ipMasq bool, mtu, vlan int, ipamConf *ipamConfig) *hostLocalBridge { + caps := make(map[string]bool) + caps["ips"] = true + bridge := hostLocalBridge{ + PluginType: "bridge", + BrName: name, + IsGW: isGateWay, + IPMasq: ipMasq, + MTU: mtu, + HairpinMode: true, + Vlan: vlan, + IPAM: *ipamConf, + } + // if we use host-local set the ips cap to ensure we can set static ips via runtime config + if ipamConf.PluginType == types.HostLocalIPAMDriver { + bridge.Capabilities = caps + } + return &bridge +} + +// newIPAMHostLocalConf creates a new IPAMHostLocal configuration +func newIPAMHostLocalConf(routes []ipamRoute, ipamRanges [][]ipamLocalHostRangeConf) ipamConfig { + ipamConf := ipamConfig{ + PluginType: "host-local", + Routes: routes, + } + + ipamConf.Ranges = ipamRanges + return ipamConf +} + +// newIPAMLocalHostRange create a new IPAM range +func newIPAMLocalHostRange(subnet types.IPNet, leaseRange *types.LeaseRange, gw net.IP) *ipamLocalHostRangeConf { + hostRange := &ipamLocalHostRangeConf{ + Subnet: subnet.String(), + } + + // a user provided a range, we add it here + if leaseRange != nil { + if leaseRange.StartIP != nil { + hostRange.RangeStart = leaseRange.StartIP.String() + } + if leaseRange.EndIP != nil { + hostRange.RangeEnd = leaseRange.EndIP.String() + } + } + + if gw != nil { + hostRange.Gateway = gw.String() + } + return hostRange +} + +// newIPAMRoute creates a new IPAM route configuration +// nolint:interfacer +func newIPAMRoute(r *net.IPNet) ipamRoute { + return ipamRoute{Dest: r.String()} +} + +// newIPAMDefaultRoute creates a new IPAMDefault route of +// 0.0.0.0/0 for IPv4 or ::/0 for IPv6 +func newIPAMDefaultRoute(isIPv6 bool) (ipamRoute, error) { + route := defaultIPv4Route + if isIPv6 { + route = defaultIPv6Route + } + _, n, err := net.ParseCIDR(route) + if err != nil { + return ipamRoute{}, err + } + return newIPAMRoute(n), nil +} + +// newPortMapPlugin creates a predefined, default portmapping +// configuration +func newPortMapPlugin() portMapConfig { + caps := make(map[string]bool) + caps["portMappings"] = true + p := portMapConfig{ + PluginType: "portmap", + Capabilities: caps, + } + return p +} + +// newFirewallPlugin creates a generic firewall plugin +func newFirewallPlugin() firewallConfig { + return firewallConfig{ + PluginType: "firewall", + } +} + +// newTuningPlugin creates a generic tuning section +func newTuningPlugin() tuningConfig { + return tuningConfig{ + PluginType: "tuning", + } +} + +// newDNSNamePlugin creates the dnsname config with a given +// domainname +func newDNSNamePlugin(domainName string) dnsNameConfig { + caps := make(map[string]bool, 1) + caps["aliases"] = true + return dnsNameConfig{ + PluginType: "dnsname", + DomainName: domainName, + Capabilities: caps, + } +} + +// hasDNSNamePlugin looks to see if the dnsname cni plugin is present +func hasDNSNamePlugin(paths []string) bool { + for _, p := range paths { + if _, err := os.Stat(filepath.Join(p, "dnsname")); err == nil { + return true + } + } + return false +} + +// newVLANPlugin creates a macvlanconfig with a given device name +func newVLANPlugin(pluginType, device, mode string, mtu int, ipam *ipamConfig) VLANConfig { + m := VLANConfig{ + PluginType: pluginType, + IPAM: *ipam, + } + if mtu > 0 { + m.MTU = mtu + } + if len(mode) > 0 { + m.Mode = mode + } + // CNI is supposed to use the default route if a + // parent device is not provided + if len(device) > 0 { + m.Master = device + } + caps := make(map[string]bool) + caps["ips"] = true + // if we use host-local set the ips cap to ensure we can set static ips via runtime config + if ipam.PluginType == types.HostLocalIPAMDriver { + m.Capabilities = caps + } + return m +} diff --git a/vendor/github.com/containers/common/libnetwork/cni/config.go b/vendor/github.com/containers/common/libnetwork/cni/config.go new file mode 100644 index 00000000000..b1f89400cf3 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/cni/config.go @@ -0,0 +1,208 @@ +// +build linux + +package cni + +import ( + "net" + "os" + + internalutil "github.com/containers/common/libnetwork/internal/util" + "github.com/containers/common/libnetwork/types" + pkgutil "github.com/containers/common/pkg/util" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/vishvananda/netlink" +) + +// NetworkCreate will take a partial filled Network and fill the +// missing fields. It creates the Network and returns the full Network. +// nolint:gocritic +func (n *cniNetwork) NetworkCreate(net types.Network) (types.Network, error) { + n.lock.Lock() + defer n.lock.Unlock() + err := n.loadNetworks() + if err != nil { + return types.Network{}, err + } + network, err := n.networkCreate(&net, false) + if err != nil { + return types.Network{}, err + } + // add the new network to the map + n.networks[network.libpodNet.Name] = network + return *network.libpodNet, nil +} + +// networkCreate will fill out the given network struct and return the new network entry. +// If defaultNet is true it will not validate against used subnets and it will not write the cni config to disk. +func (n *cniNetwork) networkCreate(newNetwork *types.Network, defaultNet bool) (*network, error) { + // if no driver is set use the default one + if newNetwork.Driver == "" { + newNetwork.Driver = types.DefaultNetworkDriver + } + + // FIXME: Should we use a different type for network create without the ID field? + // the caller is not allowed to set a specific ID + if newNetwork.ID != "" { + return nil, errors.Wrap(types.ErrInvalidArg, "ID can not be set for network create") + } + + err := internalutil.CommonNetworkCreate(n, newNetwork) + if err != nil { + return nil, err + } + + // Only get the used networks for validation if we do not create the default network. + // The default network should not be validated against used subnets, we have to ensure + // that this network can always be created even when a subnet is already used on the host. + // This could happen if you run a container on this net, then the cni interface will be + // created on the host and "block" this subnet from being used again. + // Therefore the next podman command tries to create the default net again and it would + // fail because it thinks the network is used on the host. + var usedNetworks []*net.IPNet + if !defaultNet && newNetwork.Driver == types.BridgeNetworkDriver { + usedNetworks, err = internalutil.GetUsedSubnets(n) + if err != nil { + return nil, err + } + } + + switch newNetwork.Driver { + case types.BridgeNetworkDriver: + err = internalutil.CreateBridge(n, newNetwork, usedNetworks) + if err != nil { + return nil, err + } + case types.MacVLANNetworkDriver, types.IPVLANNetworkDriver: + err = createIPMACVLAN(newNetwork) + if err != nil { + return nil, err + } + default: + return nil, errors.Wrapf(types.ErrInvalidArg, "unsupported driver %s", newNetwork.Driver) + } + + err = internalutil.ValidateSubnets(newNetwork, !newNetwork.Internal, usedNetworks) + if err != nil { + return nil, err + } + + // generate the network ID + newNetwork.ID = getNetworkIDFromName(newNetwork.Name) + + // FIXME: Should this be a hard error? + if newNetwork.DNSEnabled && newNetwork.Internal && hasDNSNamePlugin(n.cniPluginDirs) { + logrus.Warnf("dnsname and internal networks are incompatible. dnsname plugin not configured for network %s", newNetwork.Name) + newNetwork.DNSEnabled = false + } + + cniConf, path, err := n.createCNIConfigListFromNetwork(newNetwork, !defaultNet) + if err != nil { + return nil, err + } + return &network{cniNet: cniConf, libpodNet: newNetwork, filename: path}, nil +} + +// NetworkRemove will remove the Network with the given name or ID. +// It does not ensure that the network is unused. +func (n *cniNetwork) NetworkRemove(nameOrID string) error { + n.lock.Lock() + defer n.lock.Unlock() + err := n.loadNetworks() + if err != nil { + return err + } + + network, err := n.getNetwork(nameOrID) + if err != nil { + return err + } + + // Removing the default network is not allowed. + if network.libpodNet.Name == n.defaultNetwork { + return errors.Errorf("default network %s cannot be removed", n.defaultNetwork) + } + + // Remove the bridge network interface on the host. + if network.libpodNet.Driver == types.BridgeNetworkDriver { + link, err := netlink.LinkByName(network.libpodNet.NetworkInterface) + if err == nil { + err = netlink.LinkDel(link) + // only log the error, it is not fatal + if err != nil { + logrus.Infof("Failed to remove network interface %s: %v", network.libpodNet.NetworkInterface, err) + } + } + } + + file := network.filename + delete(n.networks, network.libpodNet.Name) + + // make sure to not error for ErrNotExist + if err := os.Remove(file); err != nil && !errors.Is(err, os.ErrNotExist) { + return err + } + return nil +} + +// NetworkList will return all known Networks. Optionally you can +// supply a list of filter functions. Only if a network matches all +// functions it is returned. +func (n *cniNetwork) NetworkList(filters ...types.FilterFunc) ([]types.Network, error) { + n.lock.Lock() + defer n.lock.Unlock() + err := n.loadNetworks() + if err != nil { + return nil, err + } + + networks := make([]types.Network, 0, len(n.networks)) +outer: + for _, net := range n.networks { + for _, filter := range filters { + // All filters have to match, if one does not match we can skip to the next network. + if !filter(*net.libpodNet) { + continue outer + } + } + networks = append(networks, *net.libpodNet) + } + return networks, nil +} + +// NetworkInspect will return the Network with the given name or ID. +func (n *cniNetwork) NetworkInspect(nameOrID string) (types.Network, error) { + n.lock.Lock() + defer n.lock.Unlock() + err := n.loadNetworks() + if err != nil { + return types.Network{}, err + } + + network, err := n.getNetwork(nameOrID) + if err != nil { + return types.Network{}, err + } + return *network.libpodNet, nil +} + +func createIPMACVLAN(network *types.Network) error { + if network.Internal { + return errors.New("internal is not supported with macvlan") + } + if network.NetworkInterface != "" { + interfaceNames, err := internalutil.GetLiveNetworkNames() + if err != nil { + return err + } + if !pkgutil.StringInSlice(network.NetworkInterface, interfaceNames) { + return errors.Errorf("parent interface %s does not exist", network.NetworkInterface) + } + } + if len(network.Subnets) == 0 { + network.IPAMOptions["driver"] = types.DHCPIPAMDriver + } else { + network.IPAMOptions["driver"] = types.HostLocalIPAMDriver + } + return nil +} diff --git a/vendor/github.com/containers/common/libnetwork/cni/network.go b/vendor/github.com/containers/common/libnetwork/cni/network.go new file mode 100644 index 00000000000..95822723525 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/cni/network.go @@ -0,0 +1,275 @@ +// +build linux + +package cni + +import ( + "context" + "crypto/sha256" + "encoding/hex" + "os" + "path/filepath" + "strings" + "time" + + "github.com/containernetworking/cni/libcni" + "github.com/containers/common/libnetwork/types" + "github.com/containers/storage/pkg/lockfile" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +type cniNetwork struct { + // cniConfigDir is directory where the cni config files are stored. + cniConfigDir string + // cniPluginDirs is a list of directories where cni should look for the plugins. + cniPluginDirs []string + + cniConf *libcni.CNIConfig + + // defaultNetwork is the name for the default network. + defaultNetwork string + // defaultSubnet is the default subnet for the default network. + defaultSubnet types.IPNet + + // isMachine describes whenever podman runs in a podman machine environment. + isMachine bool + + // lock is a internal lock for critical operations + lock lockfile.Locker + + // modTime is the timestamp when the config dir was modified + modTime time.Time + + // networks is a map with loaded networks, the key is the network name + networks map[string]*network +} + +type network struct { + // filename is the full path to the cni config file on disk + filename string + libpodNet *types.Network + cniNet *libcni.NetworkConfigList +} + +type InitConfig struct { + // CNIConfigDir is directory where the cni config files are stored. + CNIConfigDir string + // CNIPluginDirs is a list of directories where cni should look for the plugins. + CNIPluginDirs []string + + // DefaultNetwork is the name for the default network. + DefaultNetwork string + // DefaultSubnet is the default subnet for the default network. + DefaultSubnet string + + // IsMachine describes whenever podman runs in a podman machine environment. + IsMachine bool +} + +// NewCNINetworkInterface creates the ContainerNetwork interface for the CNI backend. +// Note: The networks are not loaded from disk until a method is called. +func NewCNINetworkInterface(conf *InitConfig) (types.ContainerNetwork, error) { + // TODO: consider using a shared memory lock + lock, err := lockfile.GetLockfile(filepath.Join(conf.CNIConfigDir, "cni.lock")) + if err != nil { + return nil, err + } + + defaultNetworkName := conf.DefaultNetwork + if defaultNetworkName == "" { + defaultNetworkName = types.DefaultNetworkName + } + + defaultSubnet := conf.DefaultSubnet + if defaultSubnet == "" { + defaultSubnet = types.DefaultSubnet + } + defaultNet, err := types.ParseCIDR(defaultSubnet) + if err != nil { + return nil, errors.Wrap(err, "failed to parse default subnet") + } + + cni := libcni.NewCNIConfig(conf.CNIPluginDirs, &cniExec{}) + n := &cniNetwork{ + cniConfigDir: conf.CNIConfigDir, + cniPluginDirs: conf.CNIPluginDirs, + cniConf: cni, + defaultNetwork: defaultNetworkName, + defaultSubnet: defaultNet, + isMachine: conf.IsMachine, + lock: lock, + } + + return n, nil +} + +// Drivers will return the list of supported network drivers +// for this interface. +func (n *cniNetwork) Drivers() []string { + return []string{types.BridgeNetworkDriver, types.MacVLANNetworkDriver, types.IPVLANNetworkDriver} +} + +// DefaultNetworkName will return the default cni network name. +func (n *cniNetwork) DefaultNetworkName() string { + return n.defaultNetwork +} + +func (n *cniNetwork) loadNetworks() error { + // check the mod time of the config dir + f, err := os.Stat(n.cniConfigDir) + if err != nil { + return err + } + modTime := f.ModTime() + + // skip loading networks if they are already loaded and + // if the config dir was not modified since the last call + if n.networks != nil && modTime.Equal(n.modTime) { + return nil + } + // make sure the remove all networks before we reload them + n.networks = nil + n.modTime = modTime + + // FIXME: do we have to support other file types as well, e.g. .conf? + files, err := libcni.ConfFiles(n.cniConfigDir, []string{".conflist"}) + if err != nil { + return err + } + networks := make(map[string]*network, len(files)) + for _, file := range files { + conf, err := libcni.ConfListFromFile(file) + if err != nil { + // do not log ENOENT errors + if !errors.Is(err, os.ErrNotExist) { + logrus.Warnf("Error loading CNI config file %s: %v", file, err) + } + continue + } + + if !types.NameRegex.MatchString(conf.Name) { + logrus.Warnf("CNI config list %s has invalid name, skipping: %v", file, types.RegexError) + continue + } + + // podman < v4.0 used the podman-machine cni plugin for podman machine port forwarding + // since this is now build into podman we no longer use the plugin + // old configs may still contain it so we just remove it here + if n.isMachine { + conf = removeMachinePlugin(conf) + } + + if _, err := n.cniConf.ValidateNetworkList(context.Background(), conf); err != nil { + logrus.Warnf("Error validating CNI config file %s: %v", file, err) + continue + } + + if val, ok := networks[conf.Name]; ok { + logrus.Warnf("CNI config list %s has the same network name as %s, skipping", file, val.filename) + continue + } + + net, err := createNetworkFromCNIConfigList(conf, file) + if err != nil { + logrus.Errorf("CNI config list %s could not be converted to a libpod config, skipping: %v", file, err) + continue + } + logrus.Debugf("Successfully loaded network %s: %v", net.Name, net) + networkInfo := network{ + filename: file, + cniNet: conf, + libpodNet: net, + } + networks[net.Name] = &networkInfo + } + + // create the default network in memory if it did not exists on disk + if networks[n.defaultNetwork] == nil { + networkInfo, err := n.createDefaultNetwork() + if err != nil { + return errors.Wrapf(err, "failed to create default network %s", n.defaultNetwork) + } + networks[n.defaultNetwork] = networkInfo + } + + logrus.Debugf("Successfully loaded %d networks", len(networks)) + n.networks = networks + return nil +} + +func (n *cniNetwork) createDefaultNetwork() (*network, error) { + net := types.Network{ + Name: n.defaultNetwork, + NetworkInterface: "cni-podman0", + Driver: types.BridgeNetworkDriver, + Subnets: []types.Subnet{ + {Subnet: n.defaultSubnet}, + }, + } + return n.networkCreate(&net, true) +} + +// getNetwork will lookup a network by name or ID. It returns an +// error when no network was found or when more than one network +// with the given (partial) ID exists. +// getNetwork will read from the networks map, therefore the caller +// must ensure that n.lock is locked before using it. +func (n *cniNetwork) getNetwork(nameOrID string) (*network, error) { + // fast path check the map key, this will only work for names + if val, ok := n.networks[nameOrID]; ok { + return val, nil + } + // If there was no match we might got a full or partial ID. + var net *network + for _, val := range n.networks { + // This should not happen because we already looked up the map by name but check anyway. + if val.libpodNet.Name == nameOrID { + return val, nil + } + + if strings.HasPrefix(val.libpodNet.ID, nameOrID) { + if net != nil { + return nil, errors.Errorf("more than one result for network ID %s", nameOrID) + } + net = val + } + } + if net != nil { + return net, nil + } + return nil, errors.Wrapf(types.ErrNoSuchNetwork, "unable to find network with name or ID %s", nameOrID) +} + +// getNetworkIDFromName creates a network ID from the name. It is just the +// sha256 hash so it is not safe but it should be safe enough for our use case. +func getNetworkIDFromName(name string) string { + hash := sha256.Sum256([]byte(name)) + return hex.EncodeToString(hash[:]) +} + +// Implement the NetUtil interface for easy code sharing with other network interfaces. + +// ForEach call the given function for each network +func (n *cniNetwork) ForEach(run func(types.Network)) { + for _, val := range n.networks { + run(*val.libpodNet) + } +} + +// Len return the number of networks +func (n *cniNetwork) Len() int { + return len(n.networks) +} + +// DefaultInterfaceName return the default cni bridge name, must be suffixed with a number. +func (n *cniNetwork) DefaultInterfaceName() string { + return cniDeviceName +} + +func (n *cniNetwork) Network(nameOrID string) (*types.Network, error) { + network, err := n.getNetwork(nameOrID) + if err != nil { + return nil, err + } + return network.libpodNet, err +} diff --git a/vendor/github.com/containers/common/libnetwork/cni/run.go b/vendor/github.com/containers/common/libnetwork/cni/run.go new file mode 100644 index 00000000000..af05d9d9d5c --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/cni/run.go @@ -0,0 +1,274 @@ +// +build linux + +package cni + +import ( + "context" + "net" + "os" + "strings" + + "github.com/containernetworking/cni/libcni" + cnitypes "github.com/containernetworking/cni/pkg/types" + types040 "github.com/containernetworking/cni/pkg/types/040" + "github.com/containernetworking/plugins/pkg/ns" + "github.com/containers/common/libnetwork/internal/util" + "github.com/containers/common/libnetwork/types" + "github.com/hashicorp/go-multierror" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/vishvananda/netlink" +) + +// Setup will setup the container network namespace. It returns +// a map of StatusBlocks, the key is the network name. +func (n *cniNetwork) Setup(namespacePath string, options types.SetupOptions) (map[string]types.StatusBlock, error) { + n.lock.Lock() + defer n.lock.Unlock() + err := n.loadNetworks() + if err != nil { + return nil, err + } + + err = util.ValidateSetupOptions(n, namespacePath, options) + if err != nil { + return nil, err + } + + // set the loopback adapter up in the container netns + err = ns.WithNetNSPath(namespacePath, func(_ ns.NetNS) error { + link, err := netlink.LinkByName("lo") + if err == nil { + err = netlink.LinkSetUp(link) + } + return err + }) + if err != nil { + return nil, errors.Wrapf(err, "failed to set the loopback adapter up") + } + + var retErr error + teardownOpts := options + teardownOpts.Networks = map[string]types.PerNetworkOptions{} + // make sure to teardown the already connected networks on error + defer func() { + if retErr != nil { + if len(teardownOpts.Networks) > 0 { + err := n.teardown(namespacePath, types.TeardownOptions(teardownOpts)) + if err != nil { + logrus.Warn(err) + } + } + } + }() + + ports, err := convertSpecgenPortsToCNIPorts(options.PortMappings) + if err != nil { + return nil, err + } + + results := make(map[string]types.StatusBlock, len(options.Networks)) + for name, netOpts := range options.Networks { + netOpts := netOpts + network := n.networks[name] + rt := getRuntimeConfig(namespacePath, options.ContainerName, options.ContainerID, name, ports, &netOpts) + + // If we have more than one static ip we need parse the ips via runtime config, + // make sure to add the ips capability to the first plugin otherwise it doesn't get the ips + if len(netOpts.StaticIPs) > 0 && !network.cniNet.Plugins[0].Network.Capabilities["ips"] { + caps := make(map[string]interface{}) + caps["capabilities"] = map[string]bool{"ips": true} + network.cniNet.Plugins[0], retErr = libcni.InjectConf(network.cniNet.Plugins[0], caps) + if retErr != nil { + return nil, retErr + } + } + + var res cnitypes.Result + res, retErr = n.cniConf.AddNetworkList(context.Background(), network.cniNet, rt) + // Add this network to teardown opts since it is now connected. + // Also add this if an errors was returned since we want to call teardown on this regardless. + teardownOpts.Networks[name] = netOpts + if retErr != nil { + return nil, retErr + } + + logrus.Debugf("cni result for container %s network %s: %v", options.ContainerID, name, res) + var status types.StatusBlock + status, retErr = CNIResultToStatus(res) + if retErr != nil { + return nil, retErr + } + results[name] = status + } + return results, nil +} + +// CNIResultToStatus convert the cni result to status block +// nolint:golint +func CNIResultToStatus(res cnitypes.Result) (types.StatusBlock, error) { + result := types.StatusBlock{} + cniResult, err := types040.GetResult(res) + if err != nil { + return result, err + } + nameservers := make([]net.IP, 0, len(cniResult.DNS.Nameservers)) + for _, nameserver := range cniResult.DNS.Nameservers { + ip := net.ParseIP(nameserver) + if ip == nil { + return result, errors.Errorf("failed to parse cni nameserver ip %s", nameserver) + } + nameservers = append(nameservers, ip) + } + result.DNSServerIPs = nameservers + result.DNSSearchDomains = cniResult.DNS.Search + + interfaces := make(map[string]types.NetInterface) + for _, ip := range cniResult.IPs { + if ip.Interface == nil { + // we do no expect ips without an interface + continue + } + if len(cniResult.Interfaces) <= *ip.Interface { + return result, errors.Errorf("invalid cni result, interface index %d out of range", *ip.Interface) + } + cniInt := cniResult.Interfaces[*ip.Interface] + netInt, ok := interfaces[cniInt.Name] + if ok { + netInt.Subnets = append(netInt.Subnets, types.NetAddress{ + IPNet: types.IPNet{IPNet: ip.Address}, + Gateway: ip.Gateway, + }) + interfaces[cniInt.Name] = netInt + } else { + mac, err := net.ParseMAC(cniInt.Mac) + if err != nil { + return result, err + } + interfaces[cniInt.Name] = types.NetInterface{ + MacAddress: types.HardwareAddr(mac), + Subnets: []types.NetAddress{{ + IPNet: types.IPNet{IPNet: ip.Address}, + Gateway: ip.Gateway, + }}, + } + } + } + result.Interfaces = interfaces + return result, nil +} + +func getRuntimeConfig(netns, conName, conID, networkName string, ports []cniPortMapEntry, opts *types.PerNetworkOptions) *libcni.RuntimeConf { + rt := &libcni.RuntimeConf{ + ContainerID: conID, + NetNS: netns, + IfName: opts.InterfaceName, + Args: [][2]string{ + {"IgnoreUnknown", "1"}, + // Do not set the K8S env vars, see https://github.com/containers/podman/issues/12083. + // Only K8S_POD_NAME is used by dnsname to get the container name. + {"K8S_POD_NAME", conName}, + }, + CapabilityArgs: map[string]interface{}{}, + } + + // Propagate environment CNI_ARGS + for _, kvpairs := range strings.Split(os.Getenv("CNI_ARGS"), ";") { + if keyval := strings.SplitN(kvpairs, "=", 2); len(keyval) == 2 { + rt.Args = append(rt.Args, [2]string{keyval[0], keyval[1]}) + } + } + + // Add mac address to cni args + if len(opts.StaticMAC) > 0 { + rt.Args = append(rt.Args, [2]string{"MAC", opts.StaticMAC.String()}) + } + + if len(opts.StaticIPs) == 1 { + // Add a single IP to the args field. CNI plugins < 1.0.0 + // do not support multiple ips via capability args. + rt.Args = append(rt.Args, [2]string{"IP", opts.StaticIPs[0].String()}) + } else if len(opts.StaticIPs) > 1 { + // Set the static ips in the capability args + // to support more than one static ip per network. + rt.CapabilityArgs["ips"] = opts.StaticIPs + } + + // Set network aliases for the dnsname plugin. + if len(opts.Aliases) > 0 { + rt.CapabilityArgs["aliases"] = map[string][]string{ + networkName: opts.Aliases, + } + } + + // Set PortMappings in Capabilities + if len(ports) > 0 { + rt.CapabilityArgs["portMappings"] = ports + } + + return rt +} + +// Teardown will teardown the container network namespace. +func (n *cniNetwork) Teardown(namespacePath string, options types.TeardownOptions) error { + n.lock.Lock() + defer n.lock.Unlock() + err := n.loadNetworks() + if err != nil { + return err + } + return n.teardown(namespacePath, options) +} + +func (n *cniNetwork) teardown(namespacePath string, options types.TeardownOptions) error { + // Note: An empty namespacePath is allowed because some plugins + // still need teardown, for example ipam should remove used ip allocations. + + ports, err := convertSpecgenPortsToCNIPorts(options.PortMappings) + if err != nil { + return err + } + + var multiErr *multierror.Error + for name, netOpts := range options.Networks { + netOpts := netOpts + rt := getRuntimeConfig(namespacePath, options.ContainerName, options.ContainerID, name, ports, &netOpts) + + cniConfList, newRt, err := getCachedNetworkConfig(n.cniConf, name, rt) + if err == nil { + rt = newRt + } else { + logrus.Warnf("Failed to load cached network config: %v, falling back to loading network %s from disk", err, name) + network := n.networks[name] + if network == nil { + multiErr = multierror.Append(multiErr, errors.Wrapf(types.ErrNoSuchNetwork, "network %s", name)) + continue + } + cniConfList = network.cniNet + } + + err = n.cniConf.DelNetworkList(context.Background(), cniConfList, rt) + if err != nil { + multiErr = multierror.Append(multiErr, err) + } + } + return multiErr.ErrorOrNil() +} + +func getCachedNetworkConfig(cniConf *libcni.CNIConfig, name string, rt *libcni.RuntimeConf) (*libcni.NetworkConfigList, *libcni.RuntimeConf, error) { + cniConfList := &libcni.NetworkConfigList{ + Name: name, + } + confBytes, rt, err := cniConf.GetNetworkListCachedConfig(cniConfList, rt) + if err != nil { + return nil, nil, err + } else if confBytes == nil { + return nil, nil, errors.Errorf("network %s not found in CNI cache", name) + } + + cniConfList, err = libcni.ConfListFromBytes(confBytes) + if err != nil { + return nil, nil, err + } + return cniConfList, rt, nil +} diff --git a/vendor/github.com/containers/common/libnetwork/internal/util/bridge.go b/vendor/github.com/containers/common/libnetwork/internal/util/bridge.go new file mode 100644 index 00000000000..27ad0a4fbbc --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/internal/util/bridge.go @@ -0,0 +1,68 @@ +package util + +import ( + "net" + + "github.com/containers/common/libnetwork/types" + "github.com/containers/common/libnetwork/util" + pkgutil "github.com/containers/common/pkg/util" + "github.com/pkg/errors" +) + +func CreateBridge(n NetUtil, network *types.Network, usedNetworks []*net.IPNet) error { + if network.NetworkInterface != "" { + bridges := GetBridgeInterfaceNames(n) + if pkgutil.StringInSlice(network.NetworkInterface, bridges) { + return errors.Errorf("bridge name %s already in use", network.NetworkInterface) + } + if !types.NameRegex.MatchString(network.NetworkInterface) { + return errors.Wrapf(types.RegexError, "bridge name %s invalid", network.NetworkInterface) + } + } else { + var err error + network.NetworkInterface, err = GetFreeDeviceName(n) + if err != nil { + return err + } + } + + if network.IPAMOptions["driver"] != types.DHCPIPAMDriver { + if len(network.Subnets) == 0 { + freeSubnet, err := GetFreeIPv4NetworkSubnet(usedNetworks) + if err != nil { + return err + } + network.Subnets = append(network.Subnets, *freeSubnet) + } + // ipv6 enabled means dual stack, check if we already have + // a ipv4 or ipv6 subnet and add one if not. + if network.IPv6Enabled { + ipv4 := false + ipv6 := false + for _, subnet := range network.Subnets { + if util.IsIPv6(subnet.Subnet.IP) { + ipv6 = true + } + if util.IsIPv4(subnet.Subnet.IP) { + ipv4 = true + } + } + if !ipv4 { + freeSubnet, err := GetFreeIPv4NetworkSubnet(usedNetworks) + if err != nil { + return err + } + network.Subnets = append(network.Subnets, *freeSubnet) + } + if !ipv6 { + freeSubnet, err := GetFreeIPv6NetworkSubnet(usedNetworks) + if err != nil { + return err + } + network.Subnets = append(network.Subnets, *freeSubnet) + } + } + network.IPAMOptions["driver"] = types.HostLocalIPAMDriver + } + return nil +} diff --git a/vendor/github.com/containers/common/libnetwork/internal/util/create.go b/vendor/github.com/containers/common/libnetwork/internal/util/create.go new file mode 100644 index 00000000000..ccb0f001a51 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/internal/util/create.go @@ -0,0 +1,41 @@ +package util + +import ( + "github.com/containers/common/libnetwork/types" + "github.com/pkg/errors" +) + +func CommonNetworkCreate(n NetUtil, network *types.Network) error { + if network.Labels == nil { + network.Labels = map[string]string{} + } + if network.Options == nil { + network.Options = map[string]string{} + } + if network.IPAMOptions == nil { + network.IPAMOptions = map[string]string{} + } + + var name string + var err error + // validate the name when given + if network.Name != "" { + if !types.NameRegex.MatchString(network.Name) { + return errors.Wrapf(types.RegexError, "network name %s invalid", network.Name) + } + if _, err := n.Network(network.Name); err == nil { + return errors.Wrapf(types.ErrNetworkExists, "network name %s already used", network.Name) + } + } else { + name, err = GetFreeDeviceName(n) + if err != nil { + return err + } + network.Name = name + // also use the name as interface name when we create a bridge network + if network.Driver == types.BridgeNetworkDriver && network.NetworkInterface == "" { + network.NetworkInterface = name + } + } + return nil +} diff --git a/vendor/github.com/containers/common/libnetwork/internal/util/interface.go b/vendor/github.com/containers/common/libnetwork/internal/util/interface.go new file mode 100644 index 00000000000..650fcb193c9 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/internal/util/interface.go @@ -0,0 +1,19 @@ +package util + +import "github.com/containers/common/libnetwork/types" + +// This is a helper package to allow code sharing between the different +// network interfaces. + +// NetUtil is a helper interface which all network interfaces should implement to allow easy code sharing +type NetUtil interface { + // ForEach eaxecutes the given function for each network + ForEach(func(types.Network)) + // Len returns the number of networks + Len() int + // DefaultInterfaceName return the default interface name, this will be suffixed by a number + DefaultInterfaceName() string + // Network returns the network with the given name or ID. + // It returns an error if the network is not found + Network(nameOrID string) (*types.Network, error) +} diff --git a/vendor/github.com/containers/common/libnetwork/internal/util/interfaces.go b/vendor/github.com/containers/common/libnetwork/internal/util/interfaces.go new file mode 100644 index 00000000000..20819f75663 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/internal/util/interfaces.go @@ -0,0 +1,34 @@ +package util + +import "net" + +// getLiveNetworkSubnets returns a slice of subnets representing what the system +// has defined as network interfaces +func getLiveNetworkSubnets() ([]*net.IPNet, error) { + addrs, err := net.InterfaceAddrs() + if err != nil { + return nil, err + } + nets := make([]*net.IPNet, 0, len(addrs)) + for _, address := range addrs { + _, n, err := net.ParseCIDR(address.String()) + if err != nil { + return nil, err + } + nets = append(nets, n) + } + return nets, nil +} + +// GetLiveNetworkNames returns a list of network interface names on the system +func GetLiveNetworkNames() ([]string, error) { + liveInterfaces, err := net.Interfaces() + if err != nil { + return nil, err + } + interfaceNames := make([]string, 0, len(liveInterfaces)) + for _, i := range liveInterfaces { + interfaceNames = append(interfaceNames, i.Name) + } + return interfaceNames, nil +} diff --git a/vendor/github.com/containers/common/libnetwork/internal/util/ip.go b/vendor/github.com/containers/common/libnetwork/internal/util/ip.go new file mode 100644 index 00000000000..8f00a2a5560 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/internal/util/ip.go @@ -0,0 +1,70 @@ +package util + +import ( + "crypto/rand" + "net" + + "github.com/pkg/errors" +) + +func incByte(subnet *net.IPNet, idx int, shift uint) error { + if idx < 0 { + return errors.New("no more subnets left") + } + if subnet.IP[idx] == 255 { + subnet.IP[idx] = 0 + return incByte(subnet, idx-1, 0) + } + subnet.IP[idx] += 1 << shift + return nil +} + +// NextSubnet returns subnet incremented by 1 +func NextSubnet(subnet *net.IPNet) (*net.IPNet, error) { + newSubnet := &net.IPNet{ + IP: subnet.IP, + Mask: subnet.Mask, + } + ones, bits := newSubnet.Mask.Size() + if ones == 0 { + return nil, errors.Errorf("%s has only one subnet", subnet.String()) + } + zeroes := uint(bits - ones) + shift := zeroes % 8 + idx := ones/8 - 1 + if idx < 0 { + idx = 0 + } + if err := incByte(newSubnet, idx, shift); err != nil { + return nil, err + } + return newSubnet, nil +} + +func NetworkIntersectsWithNetworks(n *net.IPNet, networklist []*net.IPNet) bool { + for _, nw := range networklist { + if networkIntersect(n, nw) { + return true + } + } + return false +} + +func networkIntersect(n1, n2 *net.IPNet) bool { + return n2.Contains(n1.IP) || n1.Contains(n2.IP) +} + +// getRandomIPv6Subnet returns a random internal ipv6 subnet as described in RFC3879. +func getRandomIPv6Subnet() (net.IPNet, error) { + ip := make(net.IP, 8, net.IPv6len) + // read 8 random bytes + _, err := rand.Read(ip) + if err != nil { + return net.IPNet{}, err + } + // first byte must be FD as per RFC3879 + ip[0] = 0xfd + // add 8 zero bytes + ip = append(ip, make([]byte, 8)...) + return net.IPNet{IP: ip, Mask: net.CIDRMask(64, 128)}, nil +} diff --git a/vendor/github.com/containers/common/libnetwork/internal/util/parse.go b/vendor/github.com/containers/common/libnetwork/internal/util/parse.go new file mode 100644 index 00000000000..1f68df0bb0d --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/internal/util/parse.go @@ -0,0 +1,37 @@ +package util + +import ( + "strconv" + + "github.com/pkg/errors" +) + +// ParseMTU parses the mtu option +func ParseMTU(mtu string) (int, error) { + if mtu == "" { + return 0, nil // default + } + m, err := strconv.Atoi(mtu) + if err != nil { + return 0, err + } + if m < 0 { + return 0, errors.Errorf("mtu %d is less than zero", m) + } + return m, nil +} + +// ParseVlan parses the vlan option +func ParseVlan(vlan string) (int, error) { + if vlan == "" { + return 0, nil // default + } + v, err := strconv.Atoi(vlan) + if err != nil { + return 0, err + } + if v < 0 || v > 4094 { + return 0, errors.Errorf("vlan ID %d must be between 0 and 4094", v) + } + return v, nil +} diff --git a/vendor/github.com/containers/common/libnetwork/internal/util/util.go b/vendor/github.com/containers/common/libnetwork/internal/util/util.go new file mode 100644 index 00000000000..8138d9fbc42 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/internal/util/util.go @@ -0,0 +1,123 @@ +package util + +import ( + "errors" + "fmt" + "net" + + "github.com/containers/common/libnetwork/types" + "github.com/containers/common/pkg/util" + "github.com/sirupsen/logrus" +) + +// GetBridgeInterfaceNames returns all bridge interface names +// already used by network configs +func GetBridgeInterfaceNames(n NetUtil) []string { + names := make([]string, 0, n.Len()) + n.ForEach(func(net types.Network) { + if net.Driver == types.BridgeNetworkDriver { + names = append(names, net.NetworkInterface) + } + }) + return names +} + +// GetUsedNetworkNames returns all network names already used +// by network configs +func GetUsedNetworkNames(n NetUtil) []string { + names := make([]string, 0, n.Len()) + n.ForEach(func(net types.Network) { + if net.Driver == types.BridgeNetworkDriver { + names = append(names, net.NetworkInterface) + } + }) + return names +} + +// GetFreeDeviceName returns a free device name which can +// be used for new configs as name and bridge interface name. +// The base name is suffixed by a number +func GetFreeDeviceName(n NetUtil) (string, error) { + bridgeNames := GetBridgeInterfaceNames(n) + netNames := GetUsedNetworkNames(n) + liveInterfaces, err := GetLiveNetworkNames() + if err != nil { + return "", nil + } + names := make([]string, 0, len(bridgeNames)+len(netNames)+len(liveInterfaces)) + names = append(names, bridgeNames...) + names = append(names, netNames...) + names = append(names, liveInterfaces...) + // FIXME: Is a limit fine? + // Start by 1, 0 is reserved for the default network + for i := 1; i < 1000000; i++ { + deviceName := fmt.Sprintf("%s%d", n.DefaultInterfaceName(), i) + if !util.StringInSlice(deviceName, names) { + logrus.Debugf("found free device name %s", deviceName) + return deviceName, nil + } + } + return "", errors.New("could not find free device name, to many iterations") +} + +// GetUsedSubnets returns a list of all used subnets by network +// configs and interfaces on the host. +func GetUsedSubnets(n NetUtil) ([]*net.IPNet, error) { + // first, load all used subnets from network configs + subnets := make([]*net.IPNet, 0, n.Len()) + n.ForEach(func(n types.Network) { + for i := range n.Subnets { + subnets = append(subnets, &n.Subnets[i].Subnet.IPNet) + } + }) + // second, load networks from the current system + liveSubnets, err := getLiveNetworkSubnets() + if err != nil { + return nil, err + } + return append(subnets, liveSubnets...), nil +} + +// GetFreeIPv4NetworkSubnet returns a unused ipv4 subnet +func GetFreeIPv4NetworkSubnet(usedNetworks []*net.IPNet) (*types.Subnet, error) { + // the default podman network is 10.88.0.0/16 + // start locking for free /24 networks + network := &net.IPNet{ + IP: net.IP{10, 89, 0, 0}, + Mask: net.IPMask{255, 255, 255, 0}, + } + + // TODO: make sure to not use public subnets + for { + if intersectsConfig := NetworkIntersectsWithNetworks(network, usedNetworks); !intersectsConfig { + logrus.Debugf("found free ipv4 network subnet %s", network.String()) + return &types.Subnet{ + Subnet: types.IPNet{IPNet: *network}, + }, nil + } + var err error + network, err = NextSubnet(network) + if err != nil { + return nil, err + } + } +} + +// GetFreeIPv6NetworkSubnet returns a unused ipv6 subnet +func GetFreeIPv6NetworkSubnet(usedNetworks []*net.IPNet) (*types.Subnet, error) { + // FIXME: Is 10000 fine as limit? We should prevent an endless loop. + for i := 0; i < 10000; i++ { + // RFC4193: Choose the ipv6 subnet random and NOT sequentially. + network, err := getRandomIPv6Subnet() + if err != nil { + return nil, err + } + if intersectsConfig := NetworkIntersectsWithNetworks(&network, usedNetworks); !intersectsConfig { + logrus.Debugf("found free ipv6 network subnet %s", network.String()) + return &types.Subnet{ + Subnet: types.IPNet{IPNet: network}, + }, nil + } + } + return nil, errors.New("failed to get random ipv6 subnet") +} diff --git a/vendor/github.com/containers/common/libnetwork/internal/util/validate.go b/vendor/github.com/containers/common/libnetwork/internal/util/validate.go new file mode 100644 index 00000000000..ac3934f8df3 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/internal/util/validate.go @@ -0,0 +1,124 @@ +package util + +import ( + "net" + + "github.com/containers/common/libnetwork/types" + "github.com/containers/common/libnetwork/util" + "github.com/pkg/errors" +) + +// ValidateSubnet will validate a given Subnet. It checks if the +// given gateway and lease range are part of this subnet. If the +// gateway is empty and addGateway is true it will get the first +// available ip in the subnet assigned. +func ValidateSubnet(s *types.Subnet, addGateway bool, usedNetworks []*net.IPNet) error { + if s == nil { + return errors.New("subnet is nil") + } + if s.Subnet.IP == nil { + return errors.New("subnet ip is nil") + } + + // Reparse to ensure subnet is valid. + // Do not use types.ParseCIDR() because we want the ip to be + // the network address and not a random ip in the subnet. + _, n, err := net.ParseCIDR(s.Subnet.String()) + if err != nil { + return errors.Wrap(err, "subnet invalid") + } + + // check that the new subnet does not conflict with existing ones + if NetworkIntersectsWithNetworks(n, usedNetworks) { + return errors.Errorf("subnet %s is already used on the host or by another config", n.String()) + } + + s.Subnet = types.IPNet{IPNet: *n} + if s.Gateway != nil { + if !s.Subnet.Contains(s.Gateway) { + return errors.Errorf("gateway %s not in subnet %s", s.Gateway, &s.Subnet) + } + util.NormalizeIP(&s.Gateway) + } else if addGateway { + ip, err := util.FirstIPInSubnet(n) + if err != nil { + return err + } + s.Gateway = ip + } + + if s.LeaseRange != nil { + if s.LeaseRange.StartIP != nil { + if !s.Subnet.Contains(s.LeaseRange.StartIP) { + return errors.Errorf("lease range start ip %s not in subnet %s", s.LeaseRange.StartIP, &s.Subnet) + } + util.NormalizeIP(&s.LeaseRange.StartIP) + } + if s.LeaseRange.EndIP != nil { + if !s.Subnet.Contains(s.LeaseRange.EndIP) { + return errors.Errorf("lease range end ip %s not in subnet %s", s.LeaseRange.EndIP, &s.Subnet) + } + util.NormalizeIP(&s.LeaseRange.EndIP) + } + } + return nil +} + +// ValidateSubnets will validate the subnets for this network. +// It also sets the gateway if the gateway is empty and addGateway is set to true +// IPv6Enabled to true if at least one subnet is ipv6. +func ValidateSubnets(network *types.Network, addGateway bool, usedNetworks []*net.IPNet) error { + for i := range network.Subnets { + err := ValidateSubnet(&network.Subnets[i], addGateway, usedNetworks) + if err != nil { + return err + } + if util.IsIPv6(network.Subnets[i].Subnet.IP) { + network.IPv6Enabled = true + } + } + return nil +} + +func ValidateSetupOptions(n NetUtil, namespacePath string, options types.SetupOptions) error { + if namespacePath == "" { + return errors.New("namespacePath is empty") + } + if options.ContainerID == "" { + return errors.New("ContainerID is empty") + } + if len(options.Networks) == 0 { + return errors.New("must specify at least one network") + } + for name, netOpts := range options.Networks { + netOpts := netOpts + network, err := n.Network(name) + if err != nil { + return err + } + err = validatePerNetworkOpts(network, &netOpts) + if err != nil { + return err + } + } + return nil +} + +// validatePerNetworkOpts checks that all given static ips are in a subnet on this network +func validatePerNetworkOpts(network *types.Network, netOpts *types.PerNetworkOptions) error { + if netOpts.InterfaceName == "" { + return errors.Errorf("interface name on network %s is empty", network.Name) + } + if network.IPAMOptions["driver"] == types.HostLocalIPAMDriver { + outer: + for _, ip := range netOpts.StaticIPs { + for _, s := range network.Subnets { + if s.Subnet.Contains(ip) { + continue outer + } + } + return errors.Errorf("requested static ip %s not in any subnet on network %s", ip.String(), network.Name) + } + } + return nil +} diff --git a/vendor/github.com/containers/common/libnetwork/netavark/config.go b/vendor/github.com/containers/common/libnetwork/netavark/config.go new file mode 100644 index 00000000000..16b4e5c53b7 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/netavark/config.go @@ -0,0 +1,249 @@ +// +build linux + +package netavark + +import ( + "encoding/json" + "net" + "os" + "path/filepath" + "time" + + internalutil "github.com/containers/common/libnetwork/internal/util" + "github.com/containers/common/libnetwork/types" + "github.com/containers/common/pkg/util" + "github.com/containers/storage/pkg/stringid" + "github.com/pkg/errors" +) + +// NetworkCreate will take a partial filled Network and fill the +// missing fields. It creates the Network and returns the full Network. +// nolint:gocritic +func (n *netavarkNetwork) NetworkCreate(net types.Network) (types.Network, error) { + n.lock.Lock() + defer n.lock.Unlock() + err := n.loadNetworks() + if err != nil { + return types.Network{}, err + } + network, err := n.networkCreate(&net, false) + if err != nil { + return types.Network{}, err + } + // add the new network to the map + n.networks[network.Name] = network + return *network, nil +} + +func (n *netavarkNetwork) networkCreate(newNetwork *types.Network, defaultNet bool) (*types.Network, error) { + // if no driver is set use the default one + if newNetwork.Driver == "" { + newNetwork.Driver = types.DefaultNetworkDriver + } + if !defaultNet { + // FIXME: Should we use a different type for network create without the ID field? + // the caller is not allowed to set a specific ID + if newNetwork.ID != "" { + return nil, errors.Wrap(types.ErrInvalidArg, "ID can not be set for network create") + } + + // generate random network ID + var i int + for i = 0; i < 1000; i++ { + id := stringid.GenerateNonCryptoID() + if _, err := n.getNetwork(id); err != nil { + newNetwork.ID = id + break + } + } + if i == 1000 { + return nil, errors.New("failed to create random network ID") + } + } + + err := internalutil.CommonNetworkCreate(n, newNetwork) + if err != nil { + return nil, err + } + + // Only get the used networks for validation if we do not create the default network. + // The default network should not be validated against used subnets, we have to ensure + // that this network can always be created even when a subnet is already used on the host. + // This could happen if you run a container on this net, then the cni interface will be + // created on the host and "block" this subnet from being used again. + // Therefore the next podman command tries to create the default net again and it would + // fail because it thinks the network is used on the host. + var usedNetworks []*net.IPNet + if !defaultNet && newNetwork.Driver == types.BridgeNetworkDriver { + usedNetworks, err = internalutil.GetUsedSubnets(n) + if err != nil { + return nil, err + } + } + + switch newNetwork.Driver { + case types.BridgeNetworkDriver: + err = internalutil.CreateBridge(n, newNetwork, usedNetworks) + if err != nil { + return nil, err + } + // validate the given options, we do not need them but just check to make sure they are valid + for key, value := range newNetwork.Options { + switch key { + case "mtu": + _, err = internalutil.ParseMTU(value) + if err != nil { + return nil, err + } + + case "vlan": + _, err = internalutil.ParseVlan(value) + if err != nil { + return nil, err + } + + default: + return nil, errors.Errorf("unsupported bridge network option %s", key) + } + } + case types.MacVLANNetworkDriver: + err = createMacvlan(newNetwork) + if err != nil { + return nil, err + } + default: + return nil, errors.Wrapf(types.ErrInvalidArg, "unsupported driver %s", newNetwork.Driver) + } + + // add gatway when not internal or dns enabled + addGateway := !newNetwork.Internal || newNetwork.DNSEnabled + err = internalutil.ValidateSubnets(newNetwork, addGateway, usedNetworks) + if err != nil { + return nil, err + } + + newNetwork.Created = time.Now() + + if !defaultNet { + confPath := filepath.Join(n.networkConfigDir, newNetwork.Name+".json") + f, err := os.Create(confPath) + if err != nil { + return nil, err + } + enc := json.NewEncoder(f) + enc.SetIndent("", " ") + err = enc.Encode(newNetwork) + if err != nil { + return nil, err + } + } + + return newNetwork, nil +} + +func createMacvlan(network *types.Network) error { + if network.Internal { + return errors.New("internal is not supported with macvlan") + } + if network.NetworkInterface != "" { + interfaceNames, err := internalutil.GetLiveNetworkNames() + if err != nil { + return err + } + if !util.StringInSlice(network.NetworkInterface, interfaceNames) { + return errors.Errorf("parent interface %s does not exist", network.NetworkInterface) + } + } + if len(network.Subnets) == 0 { + return errors.Errorf("macvlan driver needs at least one subnet specified, DHCP is not supported with netavark") + } + network.IPAMOptions["driver"] = types.HostLocalIPAMDriver + + // validate the given options, we do not need them but just check to make sure they are valid + for key, value := range network.Options { + switch key { + case "mode": + if !util.StringInSlice(value, types.ValidMacVLANModes) { + return errors.Errorf("unknown macvlan mode %q", value) + } + case "mtu": + _, err := internalutil.ParseMTU(value) + if err != nil { + return err + } + default: + return errors.Errorf("unsupported macvlan network option %s", key) + } + } + return nil +} + +// NetworkRemove will remove the Network with the given name or ID. +// It does not ensure that the network is unused. +func (n *netavarkNetwork) NetworkRemove(nameOrID string) error { + n.lock.Lock() + defer n.lock.Unlock() + err := n.loadNetworks() + if err != nil { + return err + } + + network, err := n.getNetwork(nameOrID) + if err != nil { + return err + } + + // Removing the default network is not allowed. + if network.Name == n.defaultNetwork { + return errors.Errorf("default network %s cannot be removed", n.defaultNetwork) + } + + file := filepath.Join(n.networkConfigDir, network.Name+".json") + // make sure to not error for ErrNotExist + if err := os.Remove(file); err != nil && !errors.Is(err, os.ErrNotExist) { + return err + } + delete(n.networks, network.Name) + return nil +} + +// NetworkList will return all known Networks. Optionally you can +// supply a list of filter functions. Only if a network matches all +// functions it is returned. +func (n *netavarkNetwork) NetworkList(filters ...types.FilterFunc) ([]types.Network, error) { + n.lock.Lock() + defer n.lock.Unlock() + err := n.loadNetworks() + if err != nil { + return nil, err + } + + networks := make([]types.Network, 0, len(n.networks)) +outer: + for _, net := range n.networks { + for _, filter := range filters { + // All filters have to match, if one does not match we can skip to the next network. + if !filter(*net) { + continue outer + } + } + networks = append(networks, *net) + } + return networks, nil +} + +// NetworkInspect will return the Network with the given name or ID. +func (n *netavarkNetwork) NetworkInspect(nameOrID string) (types.Network, error) { + n.lock.Lock() + defer n.lock.Unlock() + err := n.loadNetworks() + if err != nil { + return types.Network{}, err + } + + network, err := n.getNetwork(nameOrID) + if err != nil { + return types.Network{}, err + } + return *network, nil +} diff --git a/vendor/github.com/containers/common/libnetwork/netavark/const.go b/vendor/github.com/containers/common/libnetwork/netavark/const.go new file mode 100644 index 00000000000..9709315c6ca --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/netavark/const.go @@ -0,0 +1,5 @@ +// +build linux + +package netavark + +const defaultBridgeName = "podman" diff --git a/vendor/github.com/containers/common/libnetwork/netavark/exec.go b/vendor/github.com/containers/common/libnetwork/netavark/exec.go new file mode 100644 index 00000000000..1812b908435 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/netavark/exec.go @@ -0,0 +1,161 @@ +// +build linux + +package netavark + +import ( + "encoding/json" + "errors" + "io" + "os" + "os/exec" + "strconv" + + "github.com/sirupsen/logrus" +) + +type netavarkError struct { + exitCode int + // Set the json key to "error" so we can directly unmarshal into this struct + Msg string `json:"error"` + err error +} + +func (e *netavarkError) Error() string { + ec := "" + // only add the exit code the the error message if we have at least info log level + // the normal user does not need to care about the number + if e.exitCode > 0 && logrus.IsLevelEnabled(logrus.InfoLevel) { + ec = " (exit code " + strconv.Itoa(e.exitCode) + ")" + } + msg := "netavark" + ec + if len(msg) > 0 { + msg += ": " + e.Msg + } + if e.err != nil { + msg += ": " + e.err.Error() + } + return msg +} + +func (e *netavarkError) Unwrap() error { + return e.err +} + +func newNetavarkError(msg string, err error) error { + return &netavarkError{ + Msg: msg, + err: err, + } +} + +// Type to implement io.Writer interface +// This will write the logrus at info level +type logrusNetavarkWriter struct{} + +func (l *logrusNetavarkWriter) Write(b []byte) (int, error) { + logrus.Info("netavark: ", string(b)) + return len(b), nil +} + +// getRustLogEnv returns the RUST_LOG env var based on the current logrus level +func getRustLogEnv() string { + level := logrus.GetLevel().String() + // rust env_log uses warn instead of warning + if level == "warning" { + level = "warn" + } + // the rust netlink library is very verbose + // make sure to only log netavark logs + return "RUST_LOG=netavark=" + level +} + +// execNetavark will execute netavark with the following arguments +// It takes the path to the binary, the list of args and an interface which is +// marshaled to json and send via stdin to netavark. The result interface is +// used to marshal the netavark output into it. This can be nil. +// All errors return by this function should be of the type netavarkError +// to provide a helpful error message. +func (n *netavarkNetwork) execNetavark(args []string, stdin, result interface{}) error { + stdinR, stdinW, err := os.Pipe() + if err != nil { + return newNetavarkError("failed to create stdin pipe", err) + } + stdinWClosed := false + defer func() { + stdinR.Close() + if !stdinWClosed { + stdinW.Close() + } + }() + + stdoutR, stdoutW, err := os.Pipe() + if err != nil { + return newNetavarkError("failed to create stdout pipe", err) + } + stdoutWClosed := false + defer func() { + stdoutR.Close() + if !stdoutWClosed { + stdoutW.Close() + } + }() + + // connect stderr to the podman stderr for logging + var logWriter io.Writer = os.Stderr + if n.syslog { + // connect logrus to stderr as well so that the logs will be written to the syslog as well + logWriter = io.MultiWriter(logWriter, &logrusNetavarkWriter{}) + } + + cmd := exec.Command(n.netavarkBinary, append(n.getCommonNetavarkOptions(), args...)...) + // connect the pipes to stdin and stdout + cmd.Stdin = stdinR + cmd.Stdout = stdoutW + cmd.Stderr = logWriter + // set the netavark log level to the same as the podman + cmd.Env = append(os.Environ(), getRustLogEnv()) + // if we run with debug log level lets also set RUST_BACKTRACE=1 so we can get the full stack trace in case of panics + if logrus.IsLevelEnabled(logrus.DebugLevel) { + cmd.Env = append(cmd.Env, "RUST_BACKTRACE=1") + } + + err = cmd.Start() + if err != nil { + return newNetavarkError("failed to start process", err) + } + err = json.NewEncoder(stdinW).Encode(stdin) + // we have to close stdinW so netavark gets the EOF and does not hang forever + stdinW.Close() + stdinWClosed = true + if err != nil { + return newNetavarkError("failed to encode stdin data", err) + } + + dec := json.NewDecoder(stdoutR) + + err = cmd.Wait() + // we have to close stdoutW so we can decode the json without hanging forever + stdoutW.Close() + stdoutWClosed = true + if err != nil { + exitError := &exec.ExitError{} + if errors.As(err, &exitError) { + ne := &netavarkError{} + // lets disallow unknown fields to make sure we do not get some unexpected stuff + dec.DisallowUnknownFields() + // this will unmarshal the error message into the error struct + ne.err = dec.Decode(ne) + ne.exitCode = exitError.ExitCode() + return ne + } + return newNetavarkError("unexpected failure during execution", err) + } + + if result != nil { + err = dec.Decode(result) + if err != nil { + return newNetavarkError("failed to decode result", err) + } + } + return nil +} diff --git a/vendor/github.com/containers/common/libnetwork/netavark/ipam.go b/vendor/github.com/containers/common/libnetwork/netavark/ipam.go new file mode 100644 index 00000000000..f99d099cad3 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/netavark/ipam.go @@ -0,0 +1,372 @@ +// +build linux + +package netavark + +import ( + "encoding/json" + "fmt" + "net" + + "github.com/containers/common/libnetwork/types" + "github.com/containers/common/libnetwork/util" + "github.com/pkg/errors" + "go.etcd.io/bbolt" +) + +// IPAM boltdb structure +// Each network has their own bucket with the network name as bucket key. +// Inside the network bucket there is an ID bucket which maps the container ID (key) +// to a json array of ip addresses (value). +// The network bucket also has a bucket for each subnet, the subnet is used as key. +// Inside the subnet bucket an ip is used as key and the container ID as value. + +const ( + idBucket = "ids" + // lastIP this is used as key to store the last allocated ip + // note that this string should not be 4 or 16 byte long + lastIP = "lastIP" +) + +var ( + idBucketKey = []byte(idBucket) + lastIPKey = []byte(lastIP) +) + +type ipamError struct { + msg string + cause error +} + +func (e *ipamError) Error() string { + msg := "IPAM error" + if e.msg != "" { + msg += ": " + e.msg + } + if e.cause != nil { + msg += ": " + e.cause.Error() + } + return msg +} + +func newIPAMError(cause error, msg string, args ...interface{}) *ipamError { + return &ipamError{ + msg: fmt.Sprintf(msg, args...), + cause: cause, + } +} + +// openDB will open the ipam database +// Note that the caller has to Close it. +func (n *netavarkNetwork) openDB() (*bbolt.DB, error) { + // linter complains about the octal value + // nolint:gocritic + db, err := bbolt.Open(n.ipamDBPath, 0600, nil) + if err != nil { + return nil, newIPAMError(err, "failed to open database %s", n.ipamDBPath) + } + return db, nil +} + +// allocIPs will allocate ips for the the container. It will change the +// NetworkOptions in place. When static ips are given it will validate +// that these are free to use and will allocate them to the container. +func (n *netavarkNetwork) allocIPs(opts *types.NetworkOptions) error { + db, err := n.openDB() + if err != nil { + return err + } + defer db.Close() + + err = db.Update(func(tx *bbolt.Tx) error { + for netName, netOpts := range opts.Networks { + network := n.networks[netName] + if network == nil { + return newIPAMError(nil, "could not find network %q", netName) + } + + // check if we have to alloc ips + if !requiresIPAMAlloc(network) { + continue + } + + // create/get network bucket + netBkt, err := tx.CreateBucketIfNotExists([]byte(netName)) + if err != nil { + return newIPAMError(err, "failed to create/get network bucket for network %s", netName) + } + + // requestIPs is the list of ips which should be used for this container + requestIPs := make([]net.IP, 0, len(network.Subnets)) + + for i := range network.Subnets { + subnetBkt, err := netBkt.CreateBucketIfNotExists([]byte(network.Subnets[i].Subnet.String())) + if err != nil { + return newIPAMError(err, "failed to create/get subnet bucket for network %s", netName) + } + + // search for a static ip which matches the current subnet + // in this case the user wants this one and we should not assign a free one + var ip net.IP + for _, staticIP := range netOpts.StaticIPs { + if network.Subnets[i].Subnet.Contains(staticIP) { + ip = staticIP + break + } + } + + // when static ip is requested for this network + if ip != nil { + // convert to 4 byte ipv4 if needed + util.NormalizeIP(&ip) + id := subnetBkt.Get(ip) + if id != nil { + return newIPAMError(nil, "requested ip address %s is already allocated to container ID %s", ip.String(), string(id)) + } + } else { + ip, err = getFreeIPFromBucket(subnetBkt, &network.Subnets[i]) + if err != nil { + return err + } + err = subnetBkt.Put(lastIPKey, ip) + if err != nil { + return newIPAMError(err, "failed to store last ip in database") + } + } + + err = subnetBkt.Put(ip, []byte(opts.ContainerID)) + if err != nil { + return newIPAMError(err, "failed to store ip in database") + } + + requestIPs = append(requestIPs, ip) + } + + idsBucket, err := netBkt.CreateBucketIfNotExists(idBucketKey) + if err != nil { + return newIPAMError(err, "failed to create/get id bucket for network %s", netName) + } + + ipsBytes, err := json.Marshal(requestIPs) + if err != nil { + return newIPAMError(err, "failed to marshal ips") + } + + err = idsBucket.Put([]byte(opts.ContainerID), ipsBytes) + if err != nil { + return newIPAMError(err, "failed to store ips in database") + } + + netOpts.StaticIPs = requestIPs + opts.Networks[netName] = netOpts + } + return nil + }) + return err +} + +func getFreeIPFromBucket(bucket *bbolt.Bucket, subnet *types.Subnet) (net.IP, error) { + var rangeStart net.IP + var rangeEnd net.IP + if subnet.LeaseRange != nil { + rangeStart = subnet.LeaseRange.StartIP + rangeEnd = subnet.LeaseRange.EndIP + } + + if rangeStart == nil { + // let start with the first ip in subnet + rangeStart = util.NextIP(subnet.Subnet.IP) + } + + if rangeEnd == nil { + lastIP, err := util.LastIPInSubnet(&subnet.Subnet.IPNet) + // this error should never happen but lets check anyways to prevent panics + if err != nil { + return nil, errors.Wrap(err, "failed to get lastIP") + } + // ipv4 uses the last ip in a subnet for broadcast so we cannot use it + if util.IsIPv4(lastIP) { + lastIP = util.PrevIP(lastIP) + } + rangeEnd = lastIP + } + + lastIPByte := bucket.Get(lastIPKey) + curIP := net.IP(lastIPByte) + if curIP == nil { + curIP = rangeStart + } else { + curIP = util.NextIP(curIP) + } + + // store the start ip to make sure we know when we looped over all available ips + startIP := curIP + + for { + // skip the gateway + if subnet.Gateway != nil { + if util.Cmp(curIP, subnet.Gateway) == 0 { + curIP = util.NextIP(curIP) + continue + } + } + + // if we are at the end we need to jump back to the start ip + if util.Cmp(curIP, rangeEnd) > 0 { + if util.Cmp(rangeStart, startIP) == 0 { + return nil, newIPAMError(nil, "failed to find free IP in range: %s - %s", rangeStart.String(), rangeEnd.String()) + } + curIP = rangeStart + continue + } + + // check if ip is already used by another container + // if not return it + if bucket.Get(curIP) == nil { + return curIP, nil + } + + curIP = util.NextIP(curIP) + + if util.Cmp(curIP, startIP) == 0 { + return nil, newIPAMError(nil, "failed to find free IP in range: %s - %s", rangeStart.String(), rangeEnd.String()) + } + } +} + +// getAssignedIPs will read the ipam database and will fill in the used ips for this container. +// It will change the NetworkOptions in place. +func (n *netavarkNetwork) getAssignedIPs(opts *types.NetworkOptions) error { + db, err := n.openDB() + if err != nil { + return err + } + defer db.Close() + + err = db.View(func(tx *bbolt.Tx) error { + for netName, netOpts := range opts.Networks { + network := n.networks[netName] + if network == nil { + return newIPAMError(nil, "could not find network %q", netName) + } + + // check if we have to alloc ips + if !requiresIPAMAlloc(network) { + continue + } + // get network bucket + netBkt := tx.Bucket([]byte(netName)) + if netBkt == nil { + return newIPAMError(nil, "failed to get network bucket for network %s", netName) + } + + idBkt := netBkt.Bucket(idBucketKey) + if idBkt == nil { + return newIPAMError(nil, "failed to get id bucket for network %s", netName) + } + + ipJSON := idBkt.Get([]byte(opts.ContainerID)) + if ipJSON == nil { + return newIPAMError(nil, "failed to get ips for container ID %s on network %s", opts.ContainerID, netName) + } + + // assignedIPs is the list of ips which should be used for this container + assignedIPs := make([]net.IP, 0, len(network.Subnets)) + + err = json.Unmarshal(ipJSON, &assignedIPs) + if err != nil { + return newIPAMError(err, "failed to unmarshal ips from database") + } + + for i := range assignedIPs { + util.NormalizeIP(&assignedIPs[i]) + } + + netOpts.StaticIPs = assignedIPs + opts.Networks[netName] = netOpts + } + return nil + }) + return err +} + +// deallocIPs will release the ips in the network options from the DB so that +// they can be reused by other containers. It expects that the network options +// are already filled with the correct ips. Use getAssignedIPs() for this. +func (n *netavarkNetwork) deallocIPs(opts *types.NetworkOptions) error { + db, err := n.openDB() + if err != nil { + return err + } + defer db.Close() + + err = db.Update(func(tx *bbolt.Tx) error { + for netName, netOpts := range opts.Networks { + network := n.networks[netName] + if network == nil { + return newIPAMError(nil, "could not find network %q", netName) + } + + // check if we have to alloc ips + if !requiresIPAMAlloc(network) { + continue + } + // get network bucket + netBkt := tx.Bucket([]byte(netName)) + if netBkt == nil { + return newIPAMError(nil, "failed to get network bucket for network %s", netName) + } + + for _, subnet := range network.Subnets { + subnetBkt := netBkt.Bucket([]byte(subnet.Subnet.String())) + if subnetBkt == nil { + return newIPAMError(nil, "failed to get subnet bucket for network %s", netName) + } + + // search for a static ip which matches the current subnet + // in this case the user wants this one and we should not assign a free one + var ip net.IP + for _, staticIP := range netOpts.StaticIPs { + if subnet.Subnet.Contains(staticIP) { + ip = staticIP + break + } + } + if ip == nil { + return newIPAMError(nil, "failed to find ip for subnet %s on network %s", subnet.Subnet.String(), netName) + } + util.NormalizeIP(&ip) + + err = subnetBkt.Delete(ip) + if err != nil { + return newIPAMError(err, "failed to remove ip %s from subnet bucket for network %s", ip.String(), netName) + } + } + + idBkt := netBkt.Bucket(idBucketKey) + if idBkt == nil { + return newIPAMError(nil, "failed to get id bucket for network %s", netName) + } + + err = idBkt.Delete([]byte(opts.ContainerID)) + if err != nil { + return newIPAMError(err, "failed to remove allocated ips for container ID %s on network %s", opts.ContainerID, netName) + } + } + return nil + }) + return err +} + +// requiresIPAMAlloc return true when we have to allocate ips for this network +// it checks the ipam driver and if subnets are set +func requiresIPAMAlloc(network *types.Network) bool { + // only do host allocation when driver is set to HostLocalIPAMDriver or unset + switch network.IPAMOptions["driver"] { + case "", types.HostLocalIPAMDriver: + default: + return false + } + + // no subnets == no ips to assign + return len(network.Subnets) > 0 +} diff --git a/vendor/github.com/containers/common/libnetwork/netavark/network.go b/vendor/github.com/containers/common/libnetwork/netavark/network.go new file mode 100644 index 00000000000..efea36fec2e --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/netavark/network.go @@ -0,0 +1,309 @@ +// +build linux + +package netavark + +import ( + "encoding/json" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" + + "github.com/containers/common/libnetwork/internal/util" + "github.com/containers/common/libnetwork/types" + "github.com/containers/storage/pkg/lockfile" + "github.com/containers/storage/pkg/unshare" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +type netavarkNetwork struct { + // networkConfigDir is directory where the network config files are stored. + networkConfigDir string + + // networkRunDir is where temporary files are stored, i.e.the ipam db, aardvark config etc + networkRunDir string + + // tells netavark whether this is rootless mode or rootfull, "true" or "false" + networkRootless bool + + // netavarkBinary is the path to the netavark binary. + netavarkBinary string + // aardvarkBinary is the path to the aardvark binary. + aardvarkBinary string + + // defaultNetwork is the name for the default network. + defaultNetwork string + // defaultSubnet is the default subnet for the default network. + defaultSubnet types.IPNet + + // ipamDBPath is the path to the ip allocation bolt db + ipamDBPath string + + // syslog describes whenever the netavark debbug output should be log to the syslog as well. + // This will use logrus to do so, make sure logrus is set up to log to the syslog. + syslog bool + + // lock is a internal lock for critical operations + lock lockfile.Locker + + // modTime is the timestamp when the config dir was modified + modTime time.Time + + // networks is a map with loaded networks, the key is the network name + networks map[string]*types.Network +} + +type InitConfig struct { + // NetworkConfigDir is directory where the network config files are stored. + NetworkConfigDir string + + // NetavarkBinary is the path to the netavark binary. + NetavarkBinary string + // AardvarkBinary is the path to the aardvark binary. + AardvarkBinary string + + // NetworkRunDir is where temporary files are stored, i.e.the ipam db, aardvark config + NetworkRunDir string + + // DefaultNetwork is the name for the default network. + DefaultNetwork string + // DefaultSubnet is the default subnet for the default network. + DefaultSubnet string + + // Syslog describes whenever the netavark debbug output should be log to the syslog as well. + // This will use logrus to do so, make sure logrus is set up to log to the syslog. + Syslog bool +} + +// NewNetworkInterface creates the ContainerNetwork interface for the netavark backend. +// Note: The networks are not loaded from disk until a method is called. +func NewNetworkInterface(conf *InitConfig) (types.ContainerNetwork, error) { + // TODO: consider using a shared memory lock + lock, err := lockfile.GetLockfile(filepath.Join(conf.NetworkConfigDir, "netavark.lock")) + if err != nil { + return nil, err + } + + defaultNetworkName := conf.DefaultNetwork + if defaultNetworkName == "" { + defaultNetworkName = types.DefaultNetworkName + } + + defaultSubnet := conf.DefaultSubnet + if defaultSubnet == "" { + defaultSubnet = types.DefaultSubnet + } + defaultNet, err := types.ParseCIDR(defaultSubnet) + if err != nil { + return nil, errors.Wrap(err, "failed to parse default subnet") + } + + if err := os.MkdirAll(conf.NetworkConfigDir, 0755); err != nil { + return nil, err + } + + if err := os.MkdirAll(conf.NetworkRunDir, 0755); err != nil { + return nil, err + } + + n := &netavarkNetwork{ + networkConfigDir: conf.NetworkConfigDir, + networkRunDir: conf.NetworkRunDir, + netavarkBinary: conf.NetavarkBinary, + aardvarkBinary: conf.AardvarkBinary, + networkRootless: unshare.IsRootless(), + ipamDBPath: filepath.Join(conf.NetworkRunDir, "ipam.db"), + defaultNetwork: defaultNetworkName, + defaultSubnet: defaultNet, + lock: lock, + syslog: conf.Syslog, + } + + return n, nil +} + +// Drivers will return the list of supported network drivers +// for this interface. +func (n *netavarkNetwork) Drivers() []string { + return []string{types.BridgeNetworkDriver, types.MacVLANNetworkDriver} +} + +// DefaultNetworkName will return the default netavark network name. +func (n *netavarkNetwork) DefaultNetworkName() string { + return n.defaultNetwork +} + +func (n *netavarkNetwork) loadNetworks() error { + // check the mod time of the config dir + f, err := os.Stat(n.networkConfigDir) + if err != nil { + return err + } + modTime := f.ModTime() + + // skip loading networks if they are already loaded and + // if the config dir was not modified since the last call + if n.networks != nil && modTime.Equal(n.modTime) { + return nil + } + // make sure the remove all networks before we reload them + n.networks = nil + n.modTime = modTime + + files, err := ioutil.ReadDir(n.networkConfigDir) + if err != nil && !errors.Is(err, os.ErrNotExist) { + return err + } + + networks := make(map[string]*types.Network, len(files)) + for _, f := range files { + if f.IsDir() { + continue + } + if filepath.Ext(f.Name()) != ".json" { + continue + } + + path := filepath.Join(n.networkConfigDir, f.Name()) + file, err := os.Open(path) + if err != nil { + // do not log ENOENT errors + if !errors.Is(err, os.ErrNotExist) { + logrus.Warnf("Error loading network config file %q: %v", path, err) + } + continue + } + network := new(types.Network) + err = json.NewDecoder(file).Decode(network) + if err != nil { + logrus.Warnf("Error reading network config file %q: %v", path, err) + continue + } + + // check that the filename matches the network name + if network.Name+".json" != f.Name() { + logrus.Warnf("Network config name %q does not match file name %q, skipping", network.Name, f.Name()) + continue + } + + if !types.NameRegex.MatchString(network.Name) { + logrus.Warnf("Network config %q has invalid name: %q, skipping: %v", path, network.Name, types.RegexError) + continue + } + + err = parseNetwork(network) + if err != nil { + logrus.Warnf("Network config %q could not be parsed, skipping: %v", path, err) + continue + } + + logrus.Debugf("Successfully loaded network %s: %v", network.Name, network) + networks[network.Name] = network + } + + // create the default network in memory if it did not exists on disk + if networks[n.defaultNetwork] == nil { + networkInfo, err := n.createDefaultNetwork() + if err != nil { + return errors.Wrapf(err, "failed to create default network %s", n.defaultNetwork) + } + networks[n.defaultNetwork] = networkInfo + } + logrus.Debugf("Successfully loaded %d networks", len(networks)) + n.networks = networks + return nil +} + +func parseNetwork(network *types.Network) error { + if network.Labels == nil { + network.Labels = map[string]string{} + } + if network.Options == nil { + network.Options = map[string]string{} + } + if network.IPAMOptions == nil { + network.IPAMOptions = map[string]string{} + } + + if len(network.ID) != 64 { + return errors.Errorf("invalid network ID %q", network.ID) + } + + // add gatway when not internal or dns enabled + addGateway := !network.Internal || network.DNSEnabled + return util.ValidateSubnets(network, addGateway, nil) +} + +func (n *netavarkNetwork) createDefaultNetwork() (*types.Network, error) { + net := types.Network{ + Name: n.defaultNetwork, + NetworkInterface: defaultBridgeName + "0", + // Important do not change this ID + ID: "2f259bab93aaaaa2542ba43ef33eb990d0999ee1b9924b557b7be53c0b7a1bb9", + Driver: types.BridgeNetworkDriver, + Subnets: []types.Subnet{ + {Subnet: n.defaultSubnet}, + }, + } + return n.networkCreate(&net, true) +} + +// getNetwork will lookup a network by name or ID. It returns an +// error when no network was found or when more than one network +// with the given (partial) ID exists. +// getNetwork will read from the networks map, therefore the caller +// must ensure that n.lock is locked before using it. +func (n *netavarkNetwork) getNetwork(nameOrID string) (*types.Network, error) { + // fast path check the map key, this will only work for names + if val, ok := n.networks[nameOrID]; ok { + return val, nil + } + // If there was no match we might got a full or partial ID. + var net *types.Network + for _, val := range n.networks { + // This should not happen because we already looked up the map by name but check anyway. + if val.Name == nameOrID { + return val, nil + } + + if strings.HasPrefix(val.ID, nameOrID) { + if net != nil { + return nil, errors.Errorf("more than one result for network ID %s", nameOrID) + } + net = val + } + } + if net != nil { + return net, nil + } + return nil, errors.Wrapf(types.ErrNoSuchNetwork, "unable to find network with name or ID %s", nameOrID) +} + +// Implement the NetUtil interface for easy code sharing with other network interfaces. + +// ForEach call the given function for each network +func (n *netavarkNetwork) ForEach(run func(types.Network)) { + for _, val := range n.networks { + run(*val) + } +} + +// Len return the number of networks +func (n *netavarkNetwork) Len() int { + return len(n.networks) +} + +// DefaultInterfaceName return the default cni bridge name, must be suffixed with a number. +func (n *netavarkNetwork) DefaultInterfaceName() string { + return defaultBridgeName +} + +func (n *netavarkNetwork) Network(nameOrID string) (*types.Network, error) { + network, err := n.getNetwork(nameOrID) + if err != nil { + return nil, err + } + return network, nil +} diff --git a/vendor/github.com/containers/common/libnetwork/netavark/run.go b/vendor/github.com/containers/common/libnetwork/netavark/run.go new file mode 100644 index 00000000000..0a9dc370422 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/netavark/run.go @@ -0,0 +1,132 @@ +// +build linux + +package netavark + +import ( + "encoding/json" + "fmt" + "strconv" + + "github.com/containers/common/libnetwork/internal/util" + "github.com/containers/common/libnetwork/types" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +type netavarkOptions struct { + types.NetworkOptions + Networks map[string]*types.Network `json:"network_info"` +} + +// Setup will setup the container network namespace. It returns +// a map of StatusBlocks, the key is the network name. +func (n *netavarkNetwork) Setup(namespacePath string, options types.SetupOptions) (map[string]types.StatusBlock, error) { + n.lock.Lock() + defer n.lock.Unlock() + err := n.loadNetworks() + if err != nil { + return nil, err + } + + err = util.ValidateSetupOptions(n, namespacePath, options) + if err != nil { + return nil, err + } + + // allocate IPs in the IPAM db + err = n.allocIPs(&options.NetworkOptions) + if err != nil { + return nil, err + } + + netavarkOpts, err := n.convertNetOpts(options.NetworkOptions) + if err != nil { + return nil, errors.Wrap(err, "failed to convert net opts") + } + + // trace output to get the json + if logrus.IsLevelEnabled(logrus.TraceLevel) { + b, err := json.Marshal(&netavarkOpts) + if err != nil { + return nil, err + } + // show the full netavark command so we can easily reproduce errors from the cli + logrus.Tracef("netavark command: printf '%s' | %s setup %s", string(b), n.netavarkBinary, namespacePath) + } + + result := map[string]types.StatusBlock{} + err = n.execNetavark([]string{"setup", namespacePath}, netavarkOpts, &result) + if err != nil { + // lets dealloc ips to prevent leaking + if err := n.deallocIPs(&options.NetworkOptions); err != nil { + logrus.Error(err) + } + return nil, err + } + + // make sure that the result makes sense + if len(result) != len(options.Networks) { + logrus.Errorf("unexpected netavark result: %v", result) + return nil, fmt.Errorf("unexpected netavark result length, want (%d), got (%d) networks", len(options.Networks), len(result)) + } + + return result, err +} + +// Teardown will teardown the container network namespace. +func (n *netavarkNetwork) Teardown(namespacePath string, options types.TeardownOptions) error { + n.lock.Lock() + defer n.lock.Unlock() + err := n.loadNetworks() + if err != nil { + return err + } + + // get IPs from the IPAM db + err = n.getAssignedIPs(&options.NetworkOptions) + if err != nil { + // when there is an error getting the ips we should still continue + // to call teardown for netavark to prevent leaking network interfaces + logrus.Error(err) + } + + netavarkOpts, err := n.convertNetOpts(options.NetworkOptions) + if err != nil { + return errors.Wrap(err, "failed to convert net opts") + } + + retErr := n.execNetavark([]string{"teardown", namespacePath}, netavarkOpts, nil) + + // when netavark returned an error we still free the used ips + // otherwise we could end up in a state where block the ips forever + err = n.deallocIPs(&netavarkOpts.NetworkOptions) + if err != nil { + if retErr != nil { + logrus.Error(err) + } else { + retErr = err + } + } + + return retErr +} + +func (n *netavarkNetwork) getCommonNetavarkOptions() []string { + return []string{"--config", n.networkRunDir, "--rootless=" + strconv.FormatBool(n.networkRootless), "--aardvark-binary=" + n.aardvarkBinary} +} + +func (n *netavarkNetwork) convertNetOpts(opts types.NetworkOptions) (*netavarkOptions, error) { + netavarkOptions := netavarkOptions{ + NetworkOptions: opts, + Networks: make(map[string]*types.Network, len(opts.Networks)), + } + + for network := range opts.Networks { + net, err := n.getNetwork(network) + if err != nil { + return nil, err + } + netavarkOptions.Networks[network] = net + } + return &netavarkOptions, nil +} diff --git a/vendor/github.com/containers/common/libnetwork/network/interface.go b/vendor/github.com/containers/common/libnetwork/network/interface.go new file mode 100644 index 00000000000..cd4fd89f159 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/network/interface.go @@ -0,0 +1,203 @@ +// +build linux + +package network + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + + "github.com/containers/common/libnetwork/cni" + "github.com/containers/common/libnetwork/netavark" + "github.com/containers/common/libnetwork/types" + "github.com/containers/common/pkg/config" + "github.com/containers/storage" + "github.com/containers/storage/pkg/homedir" + "github.com/containers/storage/pkg/ioutils" + "github.com/containers/storage/pkg/unshare" + "github.com/sirupsen/logrus" +) + +const ( + // defaultNetworkBackendFileName is the file name for sentinel file to store the backend + defaultNetworkBackendFileName = "defaultNetworkBackend" + // cniConfigDir is the directory where cni configuration is found + cniConfigDir = "/etc/cni/net.d/" + // cniConfigDirRootless is the directory in XDG_CONFIG_HOME for cni plugins + cniConfigDirRootless = "cni/net.d/" + // netavarkConfigDir is the config directory for the rootful network files + netavarkConfigDir = "/etc/containers/networks" + // netavarkRunDir is the run directory for the rootful temporary network files such as the ipam db + netavarkRunDir = "/run/containers/networks" + + // netavarkBinary is the name of the netavark binary + netavarkBinary = "netavark" + // aardvarkBinary is the name of the aardvark binary + aardvarkBinary = "aardvark-dns" +) + +// NetworkBackend returns the network backend name and interface +// It returns either the CNI or netavark backend depending on what is set in the config. +// If the the backend is set to "" we will automatically assign the backend on the following conditions: +// 1. read ${graphroot}/defaultNetworkBackend +// 2. find netavark binary (if not installed use CNI) +// 3. check containers, images and CNI networks and if there are some we have an existing install and should continue to use CNI +func NetworkBackend(store storage.Store, conf *config.Config, syslog bool) (types.NetworkBackend, types.ContainerNetwork, error) { + backend := types.NetworkBackend(conf.Network.NetworkBackend) + if backend == "" { + var err error + backend, err = defaultNetworkBackend(store, conf) + if err != nil { + return "", nil, fmt.Errorf("failed to get default network backend: %w", err) + } + } + + switch backend { + case types.Netavark: + netavarkBin, err := conf.FindHelperBinary(netavarkBinary, false) + if err != nil { + return "", nil, err + } + + aardvarkBin, err := conf.FindHelperBinary(aardvarkBinary, false) + if err != nil { + // this is not a fatal error we can still use netavark without dns + logrus.Warnf("%s binary not found, container dns will not be enabled", aardvarkBin) + } + + confDir := conf.Network.NetworkConfigDir + if confDir == "" { + confDir = getDefaultNetavarkConfigDir(store) + } + + // We cannot use the runroot for rootful since the network namespace is shared for all + // libpod instances they also have to share the same ipam db. + // For rootless we have our own network namespace per libpod instances, + // so this is not a problem there. + runDir := netavarkRunDir + if unshare.IsRootless() { + runDir = filepath.Join(store.RunRoot(), "networks") + } + + netInt, err := netavark.NewNetworkInterface(&netavark.InitConfig{ + NetworkConfigDir: confDir, + NetworkRunDir: runDir, + NetavarkBinary: netavarkBin, + AardvarkBinary: aardvarkBin, + DefaultNetwork: conf.Network.DefaultNetwork, + DefaultSubnet: conf.Network.DefaultSubnet, + Syslog: syslog, + }) + return types.Netavark, netInt, err + case types.CNI: + netInt, err := getCniInterface(conf) + return types.CNI, netInt, err + + default: + return "", nil, fmt.Errorf("unsupported network backend %q, check network_backend in containers.conf", backend) + } +} + +func defaultNetworkBackend(store storage.Store, conf *config.Config) (backend types.NetworkBackend, err error) { + // read defaultNetworkBackend file + file := filepath.Join(store.GraphRoot(), defaultNetworkBackendFileName) + b, err := ioutil.ReadFile(file) + if err == nil { + val := string(b) + if val == string(types.Netavark) { + return types.Netavark, nil + } + if val == string(types.CNI) { + return types.CNI, nil + } + return "", fmt.Errorf("unknown network backend value %q in %q", val, file) + } + // fail for all errors except ENOENT + if !errors.Is(err, os.ErrNotExist) { + return "", fmt.Errorf("could not read network backend value: %w", err) + } + + // cache the network backend to make sure always the same one will be used + defer func() { + // only write when there is no error + if err == nil { + // nolint:gocritic + if err := ioutils.AtomicWriteFile(file, []byte(backend), 0644); err != nil { + logrus.Errorf("could not write network backend to file: %v", err) + } + } + }() + + _, err = conf.FindHelperBinary("netavark", false) + if err != nil { + // if we cannot find netavark use CNI + return types.CNI, nil + } + + // now check if there are already containers, images and CNI networks (new install?) + cons, err := store.Containers() + if err != nil { + return "", err + } + if len(cons) == 0 { + imgs, err := store.Images() + if err != nil { + return "", err + } + if len(imgs) == 0 { + cniInterface, err := getCniInterface(conf) + if err == nil { + nets, err := cniInterface.NetworkList() + // there is always a default network so check <= 1 + if err == nil && len(nets) <= 1 { + // we have a fresh system so use netavark + return types.Netavark, nil + } + } + } + } + return types.CNI, nil +} + +func getCniInterface(conf *config.Config) (types.ContainerNetwork, error) { + confDir := conf.Network.NetworkConfigDir + if confDir == "" { + var err error + confDir, err = getDefultCNIConfigDir() + if err != nil { + return nil, err + } + } + return cni.NewCNINetworkInterface(&cni.InitConfig{ + CNIConfigDir: confDir, + CNIPluginDirs: conf.Network.CNIPluginDirs, + DefaultNetwork: conf.Network.DefaultNetwork, + DefaultSubnet: conf.Network.DefaultSubnet, + IsMachine: conf.Engine.MachineEnabled, + }) +} + +func getDefultCNIConfigDir() (string, error) { + if !unshare.IsRootless() { + return cniConfigDir, nil + } + + configHome, err := homedir.GetConfigHome() + if err != nil { + return "", err + } + return filepath.Join(configHome, cniConfigDirRootless), nil +} + +// getDefaultNetavarkConfigDir return the netavark config dir. For rootful it will +// use "/etc/containers/networks" and for rootless "$graphroot/networks". We cannot +// use the graphroot for rootful since the network namespace is shared for all +// libpod instances. +func getDefaultNetavarkConfigDir(store storage.Store) string { + if !unshare.IsRootless() { + return netavarkConfigDir + } + return filepath.Join(store.GraphRoot(), "networks") +} diff --git a/vendor/github.com/containers/common/libnetwork/types/const.go b/vendor/github.com/containers/common/libnetwork/types/const.go new file mode 100644 index 00000000000..b2d4a4538ec --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/types/const.go @@ -0,0 +1,47 @@ +package types + +const ( + // BridgeNetworkDriver defines the bridge driver + BridgeNetworkDriver = "bridge" + // DefaultNetworkDriver is the default network type used + DefaultNetworkDriver = BridgeNetworkDriver + // MacVLANNetworkDriver defines the macvlan driver + MacVLANNetworkDriver = "macvlan" + // MacVLANNetworkDriver defines the macvlan driver + IPVLANNetworkDriver = "ipvlan" + + // IPAM drivers + // HostLocalIPAMDriver store the ip + HostLocalIPAMDriver = "host-local" + // DHCPIPAMDriver get subnet and ip from dhcp server + DHCPIPAMDriver = "dhcp" + + // DefaultSubnet is the name that will be used for the default CNI network. + DefaultNetworkName = "podman" + // DefaultSubnet is the subnet that will be used for the default CNI network. + DefaultSubnet = "10.88.0.0/16" + + // valid macvlan driver mode values + MacVLANModeBridge = "bridge" + MacVLANModePrivate = "private" + MacVLANModeVepa = "vepa" + MacVLANModePassthru = "passthru" + + // valid ipvlan driver modes + IPVLANModeL2 = "l2" + IPVLANModeL3 = "l3" + IPVLANModeL3s = "l3s" +) + +type NetworkBackend string + +const ( + CNI NetworkBackend = "cni" + Netavark NetworkBackend = "netavark" +) + +// ValidMacVLANModes is the list of valid mode options for the macvlan driver +var ValidMacVLANModes = []string{MacVLANModeBridge, MacVLANModePrivate, MacVLANModeVepa, MacVLANModePassthru} + +// ValidIPVLANModes is the list of valid mode options for the ipvlan driver +var ValidIPVLANModes = []string{IPVLANModeL2, IPVLANModeL3, IPVLANModeL3s} diff --git a/vendor/github.com/containers/common/libnetwork/types/define.go b/vendor/github.com/containers/common/libnetwork/types/define.go new file mode 100644 index 00000000000..d37e529dfe6 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/types/define.go @@ -0,0 +1,25 @@ +package types + +import ( + "regexp" + + "github.com/pkg/errors" +) + +var ( + // ErrNoSuchNetwork indicates the requested network does not exist + ErrNoSuchNetwork = errors.New("network not found") + + // ErrInvalidArg indicates that an invalid argument was passed + ErrInvalidArg = errors.New("invalid argument") + + // ErrNetworkExists indicates that a network with the given name already + // exists. + ErrNetworkExists = errors.New("network already exists") + + // NameRegex is a regular expression to validate names. + // This must NOT be changed. + NameRegex = regexp.MustCompile("^[a-zA-Z0-9][a-zA-Z0-9_.-]*$") + // RegexError is thrown in presence of an invalid name. + RegexError = errors.Wrapf(ErrInvalidArg, "names must match [a-zA-Z0-9][a-zA-Z0-9_.-]*") +) diff --git a/vendor/github.com/containers/common/libnetwork/types/network.go b/vendor/github.com/containers/common/libnetwork/types/network.go new file mode 100644 index 00000000000..de865537738 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/types/network.go @@ -0,0 +1,282 @@ +package types + +import ( + "encoding/json" + "net" + "time" +) + +type ContainerNetwork interface { + // NetworkCreate will take a partial filled Network and fill the + // missing fields. It creates the Network and returns the full Network. + NetworkCreate(Network) (Network, error) + // NetworkRemove will remove the Network with the given name or ID. + NetworkRemove(nameOrID string) error + // NetworkList will return all known Networks. Optionally you can + // supply a list of filter functions. Only if a network matches all + // functions it is returned. + NetworkList(...FilterFunc) ([]Network, error) + // NetworkInspect will return the Network with the given name or ID. + NetworkInspect(nameOrID string) (Network, error) + + // Setup will setup the container network namespace. It returns + // a map of StatusBlocks, the key is the network name. + Setup(namespacePath string, options SetupOptions) (map[string]StatusBlock, error) + // Teardown will teardown the container network namespace. + Teardown(namespacePath string, options TeardownOptions) error + + // Drivers will return the list of supported network drivers + // for this interface. + Drivers() []string + + // DefaultNetworkName will return the default network name + // for this interface. + DefaultNetworkName() string +} + +// Network describes the Network attributes. +type Network struct { + // Name of the Network. + Name string `json:"name"` + // ID of the Network. + ID string `json:"id"` + // Driver for this Network, e.g. bridge, macvlan... + Driver string `json:"driver"` + // NetworkInterface is the network interface name on the host. + NetworkInterface string `json:"network_interface,omitempty"` + // Created contains the timestamp when this network was created. + Created time.Time `json:"created,omitempty"` + // Subnets to use for this network. + Subnets []Subnet `json:"subnets,omitempty"` + // IPv6Enabled if set to true an ipv6 subnet should be created for this net. + IPv6Enabled bool `json:"ipv6_enabled"` + // Internal is whether the Network should not have external routes + // to public or other Networks. + Internal bool `json:"internal"` + // DNSEnabled is whether name resolution is active for container on + // this Network. + DNSEnabled bool `json:"dns_enabled"` + // Labels is a set of key-value labels that have been applied to the + // Network. + Labels map[string]string `json:"labels,omitempty"` + // Options is a set of key-value options that have been applied to + // the Network. + Options map[string]string `json:"options,omitempty"` + // IPAMOptions contains options used for the ip assignment. + IPAMOptions map[string]string `json:"ipam_options,omitempty"` +} + +// IPNet is used as custom net.IPNet type to add Marshal/Unmarshal methods. +type IPNet struct { + net.IPNet +} + +// ParseCIDR parse a string to IPNet +func ParseCIDR(cidr string) (IPNet, error) { + ip, subnet, err := net.ParseCIDR(cidr) + if err != nil { + return IPNet{}, err + } + // convert to 4 bytes if ipv4 + ipv4 := ip.To4() + if ipv4 != nil { + ip = ipv4 + } + subnet.IP = ip + return IPNet{*subnet}, err +} + +func (n *IPNet) MarshalText() ([]byte, error) { + return []byte(n.String()), nil +} + +func (n *IPNet) UnmarshalText(text []byte) error { + subnet, err := ParseCIDR(string(text)) + if err != nil { + return err + } + *n = subnet + return nil +} + +// HardwareAddr is the same as net.HardwareAddr except +// that it adds the json marshal/unmarshal methods. +// This allows us to read the mac from a json string +// and a byte array. +// swagger:model MacAddress +type HardwareAddr net.HardwareAddr + +func (h *HardwareAddr) String() string { + return (*net.HardwareAddr)(h).String() +} + +func (h HardwareAddr) MarshalText() ([]byte, error) { + return []byte(h.String()), nil +} + +func (h *HardwareAddr) UnmarshalJSON(text []byte) error { + if len(text) == 0 { + *h = nil + return nil + } + + // if the json string start with a quote we got a string + // unmarshal the string and parse the mac from this string + if string(text[0]) == `"` { + var macString string + err := json.Unmarshal(text, &macString) + if err == nil { + mac, err := net.ParseMAC(macString) + if err == nil { + *h = HardwareAddr(mac) + return nil + } + } + } + // not a string or got an error fallback to the normal parsing + mac := make(net.HardwareAddr, 0, 6) + // use the standard json unmarshal for backwards compat + err := json.Unmarshal(text, &mac) + if err != nil { + return err + } + *h = HardwareAddr(mac) + return nil +} + +type Subnet struct { + // Subnet for this Network in CIDR form. + // swagger:strfmt string + Subnet IPNet `json:"subnet"` + // Gateway IP for this Network. + // swagger:strfmt string + Gateway net.IP `json:"gateway,omitempty"` + // LeaseRange contains the range where IP are leased. Optional. + LeaseRange *LeaseRange `json:"lease_range,omitempty"` +} + +// LeaseRange contains the range where IP are leased. +type LeaseRange struct { + // StartIP first IP in the subnet which should be used to assign ips. + // swagger:strfmt string + StartIP net.IP `json:"start_ip,omitempty"` + // EndIP last IP in the subnet which should be used to assign ips. + // swagger:strfmt string + EndIP net.IP `json:"end_ip,omitempty"` +} + +// StatusBlock contains the network information about a container +// connected to one Network. +type StatusBlock struct { + // Interfaces contains the created network interface in the container. + // The map key is the interface name. + Interfaces map[string]NetInterface `json:"interfaces,omitempty"` + // DNSServerIPs nameserver addresses which should be added to + // the containers resolv.conf file. + DNSServerIPs []net.IP `json:"dns_server_ips,omitempty"` + // DNSSearchDomains search domains which should be added to + // the containers resolv.conf file. + DNSSearchDomains []string `json:"dns_search_domains,omitempty"` +} + +// NetInterface contains the settings for a given network interface. +type NetInterface struct { + // Subnets list of assigned subnets with their gateway. + Subnets []NetAddress `json:"subnets,omitempty"` + // MacAddress for this Interface. + MacAddress HardwareAddr `json:"mac_address"` +} + +// NetAddress contains the ip address, subnet and gateway. +type NetAddress struct { + // IPNet of this NetAddress. Note that this is a subnet but it has to contain the + // actual ip of the network interface and not the network address. + IPNet IPNet `json:"ipnet"` + // Gateway for the network. This can be empty if there is no gateway, e.g. internal network. + Gateway net.IP `json:"gateway,omitempty"` +} + +// PerNetworkOptions are options which should be set on a per network basis. +type PerNetworkOptions struct { + // StaticIPs for this container. Optional. + StaticIPs []net.IP `json:"static_ips,omitempty"` + // Aliases contains a list of names which the dns server should resolve + // to this container. Should only be set when DNSEnabled is true on the Network. + // If aliases are set but there is no dns support for this network the + // network interface implementation should ignore this and NOT error. + // Optional. + Aliases []string `json:"aliases,omitempty"` + // StaticMac for this container. Optional. + StaticMAC HardwareAddr `json:"static_mac,omitempty"` + // InterfaceName for this container. Required in the backend. + // Optional in the frontend. Will be filled with ethX (where X is a integer) when empty. + InterfaceName string `json:"interface_name"` +} + +// NetworkOptions for a given container. +type NetworkOptions struct { + // ContainerID is the container id, used for iptables comments and ipam allocation. + ContainerID string `json:"container_id"` + // ContainerName is the container name, used as dns name. + ContainerName string `json:"container_name"` + // PortMappings contains the port mappings for this container + PortMappings []PortMapping `json:"port_mappings,omitempty"` + // Networks contains all networks with the PerNetworkOptions. + // The map should contain at least one element. + Networks map[string]PerNetworkOptions `json:"networks"` +} + +// PortMapping is one or more ports that will be mapped into the container. +type PortMapping struct { + // HostIP is the IP that we will bind to on the host. + // If unset, assumed to be 0.0.0.0 (all interfaces). + HostIP string `json:"host_ip"` + // ContainerPort is the port number that will be exposed from the + // container. + // Mandatory. + ContainerPort uint16 `json:"container_port"` + // HostPort is the port number that will be forwarded from the host into + // the container. + // If omitted, a random port on the host (guaranteed to be over 1024) + // will be assigned. + HostPort uint16 `json:"host_port"` + // Range is the number of ports that will be forwarded, starting at + // HostPort and ContainerPort and counting up. + // This is 1-indexed, so 1 is assumed to be a single port (only the + // Hostport:Containerport mapping will be added), 2 is two ports (both + // Hostport:Containerport and Hostport+1:Containerport+1), etc. + // If unset, assumed to be 1 (a single port). + // Both hostport + range and containerport + range must be less than + // 65536. + Range uint16 `json:"range"` + // Protocol is the protocol forward. + // Must be either "tcp", "udp", and "sctp", or some combination of these + // separated by commas. + // If unset, assumed to be TCP. + Protocol string `json:"protocol"` +} + +// OCICNIPortMapping maps to the standard CNI portmapping Capability. +// Deprecated: Do not use this struct for new fields. This only exists +// for backwards compatibility. +type OCICNIPortMapping struct { + // HostPort is the port number on the host. + HostPort int32 `json:"hostPort"` + // ContainerPort is the port number inside the sandbox. + ContainerPort int32 `json:"containerPort"` + // Protocol is the protocol of the port mapping. + Protocol string `json:"protocol"` + // HostIP is the host ip to use. + HostIP string `json:"hostIP"` +} + +type SetupOptions struct { + NetworkOptions +} + +type TeardownOptions struct { + NetworkOptions +} + +// FilterFunc can be passed to NetworkList to filter the networks. +type FilterFunc func(Network) bool diff --git a/vendor/github.com/containers/common/libnetwork/util/filters.go b/vendor/github.com/containers/common/libnetwork/util/filters.go new file mode 100644 index 00000000000..b27ca1f9aa6 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/util/filters.go @@ -0,0 +1,80 @@ +package util + +import ( + "strings" + + "github.com/containers/common/libnetwork/types" + "github.com/containers/common/pkg/filters" + "github.com/containers/common/pkg/util" + "github.com/pkg/errors" +) + +func GenerateNetworkFilters(f map[string][]string) ([]types.FilterFunc, error) { + filterFuncs := make([]types.FilterFunc, 0, len(f)) + for key, filterValues := range f { + filterFunc, err := createFilterFuncs(key, filterValues) + if err != nil { + return nil, err + } + filterFuncs = append(filterFuncs, filterFunc) + } + return filterFuncs, nil +} + +func createFilterFuncs(key string, filterValues []string) (types.FilterFunc, error) { + switch strings.ToLower(key) { + case "name": + // matches one name, regex allowed + return func(net types.Network) bool { + return util.StringMatchRegexSlice(net.Name, filterValues) + }, nil + + case "driver": + // matches network driver + return func(net types.Network) bool { + return util.StringInSlice(net.Driver, filterValues) + }, nil + + case "id": + // matches part of one id + return func(net types.Network) bool { + return util.StringMatchRegexSlice(net.ID, filterValues) + }, nil + + // TODO: add dns enabled, internal filter + } + return createPruneFilterFuncs(key, filterValues) +} + +func GenerateNetworkPruneFilters(f map[string][]string) ([]types.FilterFunc, error) { + filterFuncs := make([]types.FilterFunc, 0, len(f)) + for key, filterValues := range f { + filterFunc, err := createPruneFilterFuncs(key, filterValues) + if err != nil { + return nil, err + } + filterFuncs = append(filterFuncs, filterFunc) + } + return filterFuncs, nil +} + +func createPruneFilterFuncs(key string, filterValues []string) (types.FilterFunc, error) { + switch strings.ToLower(key) { + case "label": + // matches all labels + return func(net types.Network) bool { + return filters.MatchLabelFilters(filterValues, net.Labels) + }, nil + + case "until": + until, err := filters.ComputeUntilTimestamp(filterValues) + if err != nil { + return nil, err + } + return func(net types.Network) bool { + return net.Created.Before(until) + }, nil + default: + return nil, errors.Errorf("invalid filter %q", key) + } +} diff --git a/vendor/github.com/containers/common/libnetwork/util/ip.go b/vendor/github.com/containers/common/libnetwork/util/ip.go new file mode 100644 index 00000000000..7c315e31293 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/util/ip.go @@ -0,0 +1,56 @@ +package util + +import ( + "net" +) + +// IsIPv6 returns true if netIP is IPv6. +func IsIPv6(netIP net.IP) bool { + return netIP != nil && netIP.To4() == nil +} + +// IsIPv4 returns true if netIP is IPv4. +func IsIPv4(netIP net.IP) bool { + return netIP != nil && netIP.To4() != nil +} + +// LastIPInSubnet gets the last IP in a subnet +func LastIPInSubnet(addr *net.IPNet) (net.IP, error) { //nolint:interfacer + // re-parse to ensure clean network address + _, cidr, err := net.ParseCIDR(addr.String()) + if err != nil { + return nil, err + } + + ones, bits := cidr.Mask.Size() + if ones == bits { + return cidr.IP, nil + } + for i := range cidr.IP { + cidr.IP[i] |= ^cidr.Mask[i] + } + return cidr.IP, nil +} + +// FirstIPInSubnet gets the first IP in a subnet +func FirstIPInSubnet(addr *net.IPNet) (net.IP, error) { //nolint:interfacer + // re-parse to ensure clean network address + _, cidr, err := net.ParseCIDR(addr.String()) + if err != nil { + return nil, err + } + ones, bits := cidr.Mask.Size() + if ones == bits { + return cidr.IP, nil + } + cidr.IP[len(cidr.IP)-1]++ + return cidr.IP, nil +} + +// NormalizeIP will transform the given ip to the 4 byte len ipv4 if possible +func NormalizeIP(ip *net.IP) { + ipv4 := ip.To4() + if ipv4 != nil { + *ip = ipv4 + } +} diff --git a/vendor/github.com/containers/common/libnetwork/util/ip_calc.go b/vendor/github.com/containers/common/libnetwork/util/ip_calc.go new file mode 100644 index 00000000000..a27ddf78bf7 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/util/ip_calc.go @@ -0,0 +1,53 @@ +// Copyright 2015 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import ( + "math/big" + "net" +) + +// NextIP returns IP incremented by 1 +func NextIP(ip net.IP) net.IP { + i := ipToInt(ip) + return intToIP(i.Add(i, big.NewInt(1))) +} + +// PrevIP returns IP decremented by 1 +func PrevIP(ip net.IP) net.IP { + i := ipToInt(ip) + return intToIP(i.Sub(i, big.NewInt(1))) +} + +// Cmp compares two IPs, returning the usual ordering: +// a < b : -1 +// a == b : 0 +// a > b : 1 +func Cmp(a, b net.IP) int { + aa := ipToInt(a) + bb := ipToInt(b) + return aa.Cmp(bb) +} + +func ipToInt(ip net.IP) *big.Int { + if v := ip.To4(); v != nil { + return big.NewInt(0).SetBytes(v) + } + return big.NewInt(0).SetBytes(ip.To16()) +} + +func intToIP(i *big.Int) net.IP { + return net.IP(i.Bytes()) +} diff --git a/vendor/github.com/containers/common/pkg/apparmor/apparmor.go b/vendor/github.com/containers/common/pkg/apparmor/apparmor.go new file mode 100644 index 00000000000..146280df2f7 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/apparmor/apparmor.go @@ -0,0 +1,22 @@ +package apparmor + +import ( + "errors" + + "github.com/containers/common/version" +) + +const ( + // ProfilePrefix is used for version-independent presence checks. + ProfilePrefix = "containers-default-" + + // Profile default name + Profile = ProfilePrefix + version.Version +) + +var ( + // ErrApparmorUnsupported indicates that AppArmor support is not supported. + ErrApparmorUnsupported = errors.New("AppArmor is not supported") + // ErrApparmorRootless indicates that AppArmor support is not supported in rootless mode. + ErrApparmorRootless = errors.New("AppArmor is not supported in rootless mode") +) diff --git a/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux.go b/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux.go new file mode 100644 index 00000000000..735d194932f --- /dev/null +++ b/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux.go @@ -0,0 +1,301 @@ +// +build linux,apparmor + +package apparmor + +import ( + "bufio" + "bytes" + "io" + "os" + "os/exec" + "path" + "strconv" + "strings" + "text/template" + + "github.com/containers/common/pkg/apparmor/internal/supported" + "github.com/containers/storage/pkg/unshare" + runcaa "github.com/opencontainers/runc/libcontainer/apparmor" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// profileDirectory is the file store for apparmor profiles and macros. +var profileDirectory = "/etc/apparmor.d" + +// IsEnabled returns true if AppArmor is enabled on the host. It also checks +// for the existence of the `apparmor_parser` binary, which will be required to +// apply profiles. +func IsEnabled() bool { + return supported.NewAppArmorVerifier().IsSupported() == nil +} + +// profileData holds information about the given profile for generation. +type profileData struct { + // Name is profile name. + Name string + // Imports defines the apparmor functions to import, before defining the profile. + Imports []string + // InnerImports defines the apparmor functions to import in the profile. + InnerImports []string + // Version is the {major, minor, patch} version of apparmor_parser as a single number. + Version int +} + +// generateDefault creates an apparmor profile from ProfileData. +func (p *profileData) generateDefault(apparmorParserPath string, out io.Writer) error { + compiled, err := template.New("apparmor_profile").Parse(defaultProfileTemplate) + if err != nil { + return errors.Wrap(err, "create AppArmor profile from template") + } + + if macroExists("tunables/global") { + p.Imports = append(p.Imports, "#include ") + } else { + p.Imports = append(p.Imports, "@{PROC}=/proc/") + } + + if macroExists("abstractions/base") { + p.InnerImports = append(p.InnerImports, "#include ") + } + + ver, err := getAAParserVersion(apparmorParserPath) + if err != nil { + return errors.Wrap(err, "get AppArmor version") + } + p.Version = ver + + return errors.Wrap(compiled.Execute(out, p), "execute compiled profile") +} + +// macrosExists checks if the passed macro exists. +func macroExists(m string) bool { + _, err := os.Stat(path.Join(profileDirectory, m)) + return err == nil +} + +// InstallDefault generates a default profile and loads it into the kernel +// using 'apparmor_parser'. +func InstallDefault(name string) error { + if unshare.IsRootless() { + return ErrApparmorRootless + } + + p := profileData{ + Name: name, + } + + apparmorParserPath, err := supported.NewAppArmorVerifier().FindAppArmorParserBinary() + if err != nil { + return errors.Wrap(err, "find `apparmor_parser` binary") + } + + cmd := exec.Command(apparmorParserPath, "-Kr") + pipe, err := cmd.StdinPipe() + if err != nil { + return errors.Wrapf(err, "execute %s", apparmorParserPath) + } + if err := cmd.Start(); err != nil { + if pipeErr := pipe.Close(); pipeErr != nil { + logrus.Errorf("Unable to close AppArmor pipe: %q", pipeErr) + } + return errors.Wrapf(err, "start %s command", apparmorParserPath) + } + if err := p.generateDefault(apparmorParserPath, pipe); err != nil { + if pipeErr := pipe.Close(); pipeErr != nil { + logrus.Errorf("Unable to close AppArmor pipe: %q", pipeErr) + } + if cmdErr := cmd.Wait(); cmdErr != nil { + logrus.Errorf("Unable to wait for AppArmor command: %q", cmdErr) + } + return errors.Wrap(err, "generate default profile into pipe") + } + + if pipeErr := pipe.Close(); pipeErr != nil { + logrus.Errorf("Unable to close AppArmor pipe: %q", pipeErr) + } + + return errors.Wrap(cmd.Wait(), "wait for AppArmor command") +} + +// DefaultContent returns the default profile content as byte slice. The +// profile is named as the provided `name`. The function errors if the profile +// generation fails. +func DefaultContent(name string) ([]byte, error) { + p := profileData{Name: name} + buffer := &bytes.Buffer{} + + apparmorParserPath, err := supported.NewAppArmorVerifier().FindAppArmorParserBinary() + if err != nil { + return nil, errors.Wrap(err, "find `apparmor_parser` binary") + } + + if err := p.generateDefault(apparmorParserPath, buffer); err != nil { + return nil, errors.Wrap(err, "generate default AppAmor profile") + } + return buffer.Bytes(), nil +} + +// IsLoaded checks if a profile with the given name has been loaded into the +// kernel. +func IsLoaded(name string) (bool, error) { + if name != "" && unshare.IsRootless() { + return false, errors.Wrapf(ErrApparmorRootless, "cannot load AppArmor profile %q", name) + } + + file, err := os.Open("/sys/kernel/security/apparmor/profiles") + if err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, errors.Wrap(err, "open AppArmor profile path") + } + defer file.Close() + + r := bufio.NewReader(file) + for { + p, err := r.ReadString('\n') + if err == io.EOF { + break + } + if err != nil { + return false, errors.Wrap(err, "reading AppArmor profile") + } + if strings.HasPrefix(p, name+" ") { + return true, nil + } + } + + return false, nil +} + +// execAAParser runs `apparmor_parser` with the passed arguments. +func execAAParser(apparmorParserPath, dir string, args ...string) (string, error) { + c := exec.Command(apparmorParserPath, args...) + c.Dir = dir + + output, err := c.Output() + if err != nil { + return "", errors.Errorf("running `%s %s` failed with output: %s\nerror: %v", c.Path, strings.Join(c.Args, " "), output, err) + } + + return string(output), nil +} + +// getAAParserVersion returns the major and minor version of apparmor_parser. +func getAAParserVersion(apparmorParserPath string) (int, error) { + output, err := execAAParser(apparmorParserPath, "", "--version") + if err != nil { + return -1, errors.Wrap(err, "execute apparmor_parser") + } + return parseAAParserVersion(output) +} + +// parseAAParserVersion parses the given `apparmor_parser --version` output and +// returns the major and minor version number as an integer. +func parseAAParserVersion(output string) (int, error) { + // output is in the form of the following: + // AppArmor parser version 2.9.1 + // Copyright (C) 1999-2008 Novell Inc. + // Copyright 2009-2012 Canonical Ltd. + lines := strings.SplitN(output, "\n", 2) + words := strings.Split(lines[0], " ") + version := words[len(words)-1] + + // split by major minor version + v := strings.Split(version, ".") + if len(v) == 0 || len(v) > 3 { + return -1, errors.Errorf("parsing version failed for output: `%s`", output) + } + + // Default the versions to 0. + var majorVersion, minorVersion, patchLevel int + + majorVersion, err := strconv.Atoi(v[0]) + if err != nil { + return -1, errors.Wrap(err, "convert AppArmor major version") + } + + if len(v) > 1 { + minorVersion, err = strconv.Atoi(v[1]) + if err != nil { + return -1, errors.Wrap(err, "convert AppArmor minor version") + } + } + if len(v) > 2 { + patchLevel, err = strconv.Atoi(v[2]) + if err != nil { + return -1, errors.Wrap(err, "convert AppArmor patch version") + } + } + + // major*10^5 + minor*10^3 + patch*10^0 + numericVersion := majorVersion*1e5 + minorVersion*1e3 + patchLevel + return numericVersion, nil + +} + +// CheckProfileAndLoadDefault checks if the specified profile is loaded and +// loads the DefaultLibpodProfile if the specified on is prefixed by +// DefaultLipodProfilePrefix. This allows to always load and apply the latest +// default AppArmor profile. Note that AppArmor requires root. If it's a +// default profile, return DefaultLipodProfilePrefix, otherwise the specified +// one. +func CheckProfileAndLoadDefault(name string) (string, error) { + if name == "unconfined" { + return name, nil + } + + // AppArmor is not supported in rootless mode as it requires root + // privileges. Return an error in case a specific profile is specified. + if unshare.IsRootless() { + if name != "" { + return "", errors.Wrapf(ErrApparmorRootless, "cannot load AppArmor profile %q", name) + } else { + logrus.Debug("Skipping loading default AppArmor profile (rootless mode)") + return "", nil + } + } + + // Check if AppArmor is disabled and error out if a profile is to be set. + if !runcaa.IsEnabled() { + if name == "" { + return "", nil + } else { + return "", errors.Errorf("profile %q specified but AppArmor is disabled on the host", name) + } + } + + if name == "" { + name = Profile + } else if !strings.HasPrefix(name, ProfilePrefix) { + // If the specified name is not a default one, ignore it and return the + // name. + isLoaded, err := IsLoaded(name) + if err != nil { + return "", errors.Wrapf(err, "verify if profile %s is loaded", name) + } + if !isLoaded { + return "", errors.Errorf("AppArmor profile %q specified but not loaded", name) + } + return name, nil + } + + // To avoid expensive redundant loads on each invocation, check + // if it's loaded before installing it. + isLoaded, err := IsLoaded(name) + if err != nil { + return "", errors.Wrapf(err, "verify if profile %s is loaded", name) + } + if !isLoaded { + err = InstallDefault(name) + if err != nil { + return "", errors.Wrapf(err, "install profile %s", name) + } + logrus.Infof("Successfully loaded AppAmor profile %q", name) + } else { + logrus.Infof("AppAmor profile %q is already loaded", name) + } + + return name, nil +} diff --git a/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux_template.go b/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux_template.go new file mode 100644 index 00000000000..021e32571df --- /dev/null +++ b/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux_template.go @@ -0,0 +1,49 @@ +// +build linux,apparmor + +package apparmor + +const defaultProfileTemplate = ` +{{range $value := .Imports}} +{{$value}} +{{end}} + +profile {{.Name}} flags=(attach_disconnected,mediate_deleted) { +{{range $value := .InnerImports}} + {{$value}} +{{end}} + + network, + capability, + file, + umount, + +{{if ge .Version 208096}} + # Allow signals from privileged profiles and from within the same profile + signal (receive) peer=unconfined, + signal (send,receive) peer={{.Name}}, +{{end}} + + deny @{PROC}/* w, # deny write for all files directly in /proc (not in a subdir) + # deny write to files not in /proc//** or /proc/sys/** + deny @{PROC}/{[^1-9],[^1-9][^0-9],[^1-9s][^0-9y][^0-9s],[^1-9][^0-9][^0-9][^0-9]*}/** w, + deny @{PROC}/sys/[^k]** w, # deny /proc/sys except /proc/sys/k* (effectively /proc/sys/kernel) + deny @{PROC}/sys/kernel/{?,??,[^s][^h][^m]**} w, # deny everything except shm* in /proc/sys/kernel/ + deny @{PROC}/sysrq-trigger rwklx, + deny @{PROC}/kcore rwklx, + + deny mount, + + deny /sys/[^f]*/** wklx, + deny /sys/f[^s]*/** wklx, + deny /sys/fs/[^c]*/** wklx, + deny /sys/fs/c[^g]*/** wklx, + deny /sys/fs/cg[^r]*/** wklx, + deny /sys/firmware/** rwklx, + deny /sys/kernel/security/** rwklx, + +{{if ge .Version 208095}} + # suppress ptrace denials when using using 'ps' inside a container + ptrace (trace,read) peer={{.Name}}, +{{end}} +} +` diff --git a/vendor/github.com/containers/common/pkg/apparmor/apparmor_unsupported.go b/vendor/github.com/containers/common/pkg/apparmor/apparmor_unsupported.go new file mode 100644 index 00000000000..13469f1b629 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/apparmor/apparmor_unsupported.go @@ -0,0 +1,31 @@ +// +build !linux !apparmor + +package apparmor + +// IsEnabled dummy. +func IsEnabled() bool { + return false +} + +// InstallDefault dummy. +func InstallDefault(name string) error { + return ErrApparmorUnsupported +} + +// IsLoaded dummy. +func IsLoaded(name string) (bool, error) { + return false, ErrApparmorUnsupported +} + +// CheckProfileAndLoadDefault dummy. +func CheckProfileAndLoadDefault(name string) (string, error) { + if name == "" { + return "", nil + } + return "", ErrApparmorUnsupported +} + +// DefaultContent dummy. +func DefaultContent(name string) ([]byte, error) { + return nil, nil +} diff --git a/vendor/github.com/containers/common/pkg/apparmor/internal/supported/supported.go b/vendor/github.com/containers/common/pkg/apparmor/internal/supported/supported.go new file mode 100644 index 00000000000..778f4e3a20a --- /dev/null +++ b/vendor/github.com/containers/common/pkg/apparmor/internal/supported/supported.go @@ -0,0 +1,113 @@ +package supported + +import ( + "os" + "os/exec" + "path/filepath" + "sync" + + "github.com/containers/storage/pkg/unshare" + runcaa "github.com/opencontainers/runc/libcontainer/apparmor" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +//go:generate go run github.com/maxbrunsfeld/counterfeiter/v6 -generate + +// ApparmorVerifier is the global struct for verifying if AppAmor is available +// on the system. +type ApparmorVerifier struct { + impl verifierImpl + parserBinaryPath string +} + +var ( + singleton *ApparmorVerifier + once sync.Once +) + +// NewAppArmorVerifier can be used to retrieve a new ApparmorVerifier instance. +func NewAppArmorVerifier() *ApparmorVerifier { + once.Do(func() { + singleton = &ApparmorVerifier{impl: &defaultVerifier{}} + }) + return singleton +} + +// IsSupported returns nil if AppAmor is supported by the host system. +// The method will error if: +// - the process runs in rootless mode +// - AppArmor is disabled by the host system +// - the `apparmor_parser` binary is not discoverable +func (a *ApparmorVerifier) IsSupported() error { + if a.impl.UnshareIsRootless() { + return errors.New("AppAmor is not supported on rootless containers") + } + if !a.impl.RuncIsEnabled() { + return errors.New("AppArmor not supported by the host system") + } + + _, err := a.FindAppArmorParserBinary() + return err +} + +// FindAppArmorParserBinary returns the `apparmor_parser` binary either from +// `/sbin` or from `$PATH`. It returns an error if the binary could not be +// found. +func (a *ApparmorVerifier) FindAppArmorParserBinary() (string, error) { + // Use the memoized path if available + if a.parserBinaryPath != "" { + logrus.Debugf("Using %s binary", a.parserBinaryPath) + return a.parserBinaryPath, nil + } + + const ( + binary = "apparmor_parser" + sbin = "/sbin" + ) + + // `/sbin` is not always in `$PATH`, so we check it explicitly + sbinBinaryPath := filepath.Join(sbin, binary) + if _, err := a.impl.OsStat(sbinBinaryPath); err == nil { + logrus.Debugf("Found %s binary in %s", binary, sbinBinaryPath) + a.parserBinaryPath = sbinBinaryPath + return sbinBinaryPath, nil + } + + // Fallback to checking $PATH + if path, err := a.impl.ExecLookPath(binary); err == nil { + logrus.Debugf("Found %s binary in %s", binary, path) + a.parserBinaryPath = path + return path, nil + } + + return "", errors.Errorf( + "%s binary neither found in %s nor $PATH", binary, sbin, + ) +} + +//counterfeiter:generate . verifierImpl +type verifierImpl interface { + UnshareIsRootless() bool + RuncIsEnabled() bool + OsStat(name string) (os.FileInfo, error) + ExecLookPath(file string) (string, error) +} + +type defaultVerifier struct{} + +func (d *defaultVerifier) UnshareIsRootless() bool { + return unshare.IsRootless() +} + +func (d *defaultVerifier) RuncIsEnabled() bool { + return runcaa.IsEnabled() +} + +func (d *defaultVerifier) OsStat(name string) (os.FileInfo, error) { + return os.Stat(name) +} + +func (d *defaultVerifier) ExecLookPath(file string) (string, error) { + return exec.LookPath(file) +} diff --git a/vendor/github.com/containers/common/pkg/auth/auth.go b/vendor/github.com/containers/common/pkg/auth/auth.go new file mode 100644 index 00000000000..6765c9e5b8b --- /dev/null +++ b/vendor/github.com/containers/common/pkg/auth/auth.go @@ -0,0 +1,333 @@ +package auth + +import ( + "bufio" + "context" + "fmt" + "net/url" + "os" + "path/filepath" + "strings" + + "github.com/containers/image/v5/docker" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/pkg/docker/config" + "github.com/containers/image/v5/pkg/sysregistriesv2" + "github.com/containers/image/v5/types" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + terminal "golang.org/x/term" +) + +// GetDefaultAuthFile returns env value REGISTRY_AUTH_FILE as default +// --authfile path used in multiple --authfile flag definitions +// Will fail over to DOCKER_CONFIG if REGISTRY_AUTH_FILE environment is not set +func GetDefaultAuthFile() string { + if authfile := os.Getenv("REGISTRY_AUTH_FILE"); authfile != "" { + return authfile + } + if auth_env := os.Getenv("DOCKER_CONFIG"); auth_env != "" { + return filepath.Join(auth_env, "config.json") + } + return "" +} + +// CheckAuthFile validates filepath given by --authfile +// used by command has --authfile flag +func CheckAuthFile(authfile string) error { + if authfile == "" { + return nil + } + if _, err := os.Stat(authfile); err != nil { + return errors.Wrap(err, "checking authfile") + } + return nil +} + +// systemContextWithOptions returns a version of sys +// updated with authFile and certDir values (if they are not ""). +// NOTE: this is a shallow copy that can be used and updated, but may share +// data with the original parameter. +func systemContextWithOptions(sys *types.SystemContext, authFile, certDir string) *types.SystemContext { + if sys != nil { + sysCopy := *sys + sys = &sysCopy + } else { + sys = &types.SystemContext{} + } + + if authFile != "" { + sys.AuthFilePath = authFile + } + if certDir != "" { + sys.DockerCertPath = certDir + } + return sys +} + +// Login implements a “log in” command with the provided opts and args +// reading the password from opts.Stdin or the options in opts. +func Login(ctx context.Context, systemContext *types.SystemContext, opts *LoginOptions, args []string) error { + systemContext = systemContextWithOptions(systemContext, opts.AuthFile, opts.CertDir) + + var ( + key, registry string + err error + ) + switch len(args) { + case 0: + if !opts.AcceptUnspecifiedRegistry { + return errors.New("please provide a registry to login to") + } + if key, err = defaultRegistryWhenUnspecified(systemContext); err != nil { + return err + } + registry = key + logrus.Debugf("registry not specified, default to the first registry %q from registries.conf", key) + + case 1: + key, registry, err = parseCredentialsKey(args[0], opts.AcceptRepositories) + if err != nil { + return err + } + + default: + return errors.New("login accepts only one registry to login to") + } + + authConfig, err := config.GetCredentials(systemContext, key) + if err != nil { + return errors.Wrap(err, "get credentials") + } + + if opts.GetLoginSet { + if authConfig.Username == "" { + return errors.Errorf("not logged into %s", key) + } + fmt.Fprintf(opts.Stdout, "%s\n", authConfig.Username) + return nil + } + if authConfig.IdentityToken != "" { + return errors.New("currently logged in, auth file contains an Identity token") + } + + password := opts.Password + if opts.StdinPassword { + var stdinPasswordStrBuilder strings.Builder + if opts.Password != "" { + return errors.New("Can't specify both --password-stdin and --password") + } + if opts.Username == "" { + return errors.New("Must provide --username with --password-stdin") + } + scanner := bufio.NewScanner(opts.Stdin) + for scanner.Scan() { + fmt.Fprint(&stdinPasswordStrBuilder, scanner.Text()) + } + password = stdinPasswordStrBuilder.String() + } + + // If no username and no password is specified, try to use existing ones. + if opts.Username == "" && password == "" && authConfig.Username != "" && authConfig.Password != "" { + fmt.Fprintf(opts.Stdout, "Authenticating with existing credentials for %s\n", key) + if err := docker.CheckAuth(ctx, systemContext, authConfig.Username, authConfig.Password, registry); err == nil { + fmt.Fprintf(opts.Stdout, "Existing credentials are valid. Already logged in to %s\n", registry) + return nil + } + fmt.Fprintln(opts.Stdout, "Existing credentials are invalid, please enter valid username and password") + } + + username, password, err := getUserAndPass(opts, password, authConfig.Username) + if err != nil { + return errors.Wrap(err, "getting username and password") + } + + if err = docker.CheckAuth(ctx, systemContext, username, password, registry); err == nil { + // Write the new credentials to the authfile + desc, err := config.SetCredentials(systemContext, key, username, password) + if err != nil { + return err + } + if opts.Verbose { + fmt.Fprintln(opts.Stdout, "Used: ", desc) + } + } + if err == nil { + fmt.Fprintln(opts.Stdout, "Login Succeeded!") + return nil + } + if unauthorized, ok := err.(docker.ErrUnauthorizedForCredentials); ok { + logrus.Debugf("error logging into %q: %v", key, unauthorized) + return errors.Errorf("error logging into %q: invalid username/password", key) + } + return errors.Wrapf(err, "authenticating creds for %q", key) +} + +// parseCredentialsKey turns the provided argument into a valid credential key +// and computes the registry part. +func parseCredentialsKey(arg string, acceptRepositories bool) (key, registry string, err error) { + // URL arguments are replaced with their host[:port] parts. + key, err = replaceURLByHostPort(arg) + if err != nil { + return "", "", err + } + + split := strings.Split(key, "/") + registry = split[0] + + if !acceptRepositories { + return registry, registry, nil + } + + // Return early if the key isn't namespaced or uses an http{s} prefix. + if registry == key { + return key, registry, nil + } + + // Sanity-check that the key looks reasonable (e.g. doesn't use invalid characters), + // and does not contain a tag or digest. + // WARNING: ref.Named() MUST NOT be used to compute key, because + // reference.ParseNormalizedNamed() turns docker.io/vendor to docker.io/library/vendor + // Ideally c/image should provide dedicated validation functionality. + ref, err := reference.ParseNormalizedNamed(key) + if err != nil { + return "", "", errors.Wrapf(err, "parse reference from %q", key) + } + if !reference.IsNameOnly(ref) { + return "", "", errors.Errorf("reference %q contains tag or digest", ref.String()) + } + refRegistry := reference.Domain(ref) + if refRegistry != registry { // This should never happen, check just to make sure + return "", "", fmt.Errorf("internal error: key %q registry mismatch, %q vs. %q", key, ref, refRegistry) + } + + return key, registry, nil +} + +// If the specified string starts with http{s} it is replaced with it's +// host[:port] parts; everything else is stripped. Otherwise, the string is +// returned as is. +func replaceURLByHostPort(repository string) (string, error) { + if !strings.HasPrefix(repository, "https://") && !strings.HasPrefix(repository, "http://") { + return repository, nil + } + u, err := url.Parse(repository) + if err != nil { + return "", fmt.Errorf("trimming http{s} prefix: %v", err) + } + return u.Host, nil +} + +// getUserAndPass gets the username and password from STDIN if not given +// using the -u and -p flags. If the username prompt is left empty, the +// displayed userFromAuthFile will be used instead. +func getUserAndPass(opts *LoginOptions, password, userFromAuthFile string) (user, pass string, err error) { + reader := bufio.NewReader(opts.Stdin) + username := opts.Username + if username == "" { + if userFromAuthFile != "" { + fmt.Fprintf(opts.Stdout, "Username (%s): ", userFromAuthFile) + } else { + fmt.Fprint(opts.Stdout, "Username: ") + } + username, err = reader.ReadString('\n') + if err != nil { + return "", "", errors.Wrap(err, "reading username") + } + // If the user just hit enter, use the displayed user from the + // the authentication file. This allows to do a lazy + // `$ buildah login -p $NEW_PASSWORD` without specifying the + // user. + if strings.TrimSpace(username) == "" { + username = userFromAuthFile + } + } + if password == "" { + fmt.Fprint(opts.Stdout, "Password: ") + pass, err := terminal.ReadPassword(int(os.Stdin.Fd())) + if err != nil { + return "", "", errors.Wrap(err, "reading password") + } + password = string(pass) + fmt.Fprintln(opts.Stdout) + } + return strings.TrimSpace(username), password, err +} + +// Logout implements a “log out” command with the provided opts and args +func Logout(systemContext *types.SystemContext, opts *LogoutOptions, args []string) error { + if err := CheckAuthFile(opts.AuthFile); err != nil { + return err + } + systemContext = systemContextWithOptions(systemContext, opts.AuthFile, "") + + if opts.All { + if len(args) != 0 { + return errors.New("--all takes no arguments") + } + if err := config.RemoveAllAuthentication(systemContext); err != nil { + return err + } + fmt.Fprintln(opts.Stdout, "Removed login credentials for all registries") + return nil + } + + var ( + key, registry string + err error + ) + switch len(args) { + case 0: + if !opts.AcceptUnspecifiedRegistry { + return errors.New("please provide a registry to logout from") + } + if key, err = defaultRegistryWhenUnspecified(systemContext); err != nil { + return err + } + registry = key + logrus.Debugf("registry not specified, default to the first registry %q from registries.conf", key) + + case 1: + key, registry, err = parseCredentialsKey(args[0], opts.AcceptRepositories) + if err != nil { + return err + } + + default: + return errors.New("logout accepts only one registry to logout from") + } + + err = config.RemoveAuthentication(systemContext, key) + switch errors.Cause(err) { + case nil: + fmt.Fprintf(opts.Stdout, "Removed login credentials for %s\n", key) + return nil + case config.ErrNotLoggedIn: + authConfig, err := config.GetCredentials(systemContext, key) + if err != nil { + return errors.Wrap(err, "get credentials") + } + + authInvalid := docker.CheckAuth(context.Background(), systemContext, authConfig.Username, authConfig.Password, registry) + if authConfig.Username != "" && authConfig.Password != "" && authInvalid == nil { + fmt.Printf("Not logged into %s with current tool. Existing credentials were established via docker login. Please use docker logout instead.\n", key) + return nil + } + return errors.Errorf("Not logged into %s\n", key) + default: + return errors.Wrapf(err, "logging out of %q", key) + } +} + +// defaultRegistryWhenUnspecified returns first registry from search list of registry.conf +// used by login/logout when registry argument is not specified +func defaultRegistryWhenUnspecified(systemContext *types.SystemContext) (string, error) { + registriesFromFile, err := sysregistriesv2.UnqualifiedSearchRegistries(systemContext) + if err != nil { + return "", errors.Wrap(err, "getting registry from registry.conf, please specify a registry") + } + if len(registriesFromFile) == 0 { + return "", errors.New("no registries found in registries.conf, a registry must be provided") + } + return registriesFromFile[0], nil +} diff --git a/vendor/github.com/containers/common/pkg/auth/cli.go b/vendor/github.com/containers/common/pkg/auth/cli.go new file mode 100644 index 00000000000..7266bf48bd9 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/auth/cli.go @@ -0,0 +1,80 @@ +package auth + +import ( + "io" + + "github.com/containers/common/pkg/completion" + "github.com/spf13/pflag" +) + +// LoginOptions represents common flags in login +// In addition, the caller should probably provide a --tls-verify flag (that affects the provided +// *types.SystemContest) +type LoginOptions struct { + // CLI flags managed by the FlagSet returned by GetLoginFlags + // Callers that use GetLoginFlags should not need to touch these values at all; callers that use + // other CLI frameworks should set them based on user input. + AuthFile string + CertDir string + Password string + Username string + StdinPassword bool + GetLoginSet bool + Verbose bool // set to true for verbose output + AcceptRepositories bool // set to true to allow namespaces or repositories rather than just registries + // Options caller can set + Stdin io.Reader // set to os.Stdin + Stdout io.Writer // set to os.Stdout + AcceptUnspecifiedRegistry bool // set to true if allows login with unspecified registry +} + +// LogoutOptions represents the results for flags in logout +type LogoutOptions struct { + // CLI flags managed by the FlagSet returned by GetLogoutFlags + // Callers that use GetLogoutFlags should not need to touch these values at all; callers that use + // other CLI frameworks should set them based on user input. + AuthFile string + All bool + AcceptRepositories bool // set to true to allow namespaces or repositories rather than just registries + // Options caller can set + Stdout io.Writer // set to os.Stdout + AcceptUnspecifiedRegistry bool // set to true if allows logout with unspecified registry +} + +// GetLoginFlags defines and returns login flags for containers tools +func GetLoginFlags(flags *LoginOptions) *pflag.FlagSet { + fs := pflag.FlagSet{} + fs.StringVar(&flags.AuthFile, "authfile", GetDefaultAuthFile(), "path of the authentication file. Use REGISTRY_AUTH_FILE environment variable to override") + fs.StringVar(&flags.CertDir, "cert-dir", "", "use certificates at the specified path to access the registry") + fs.StringVarP(&flags.Password, "password", "p", "", "Password for registry") + fs.StringVarP(&flags.Username, "username", "u", "", "Username for registry") + fs.BoolVar(&flags.StdinPassword, "password-stdin", false, "Take the password from stdin") + fs.BoolVar(&flags.GetLoginSet, "get-login", false, "Return the current login user for the registry") + fs.BoolVarP(&flags.Verbose, "verbose", "v", false, "Write more detailed information to stdout") + return &fs +} + +// GetLoginFlagsCompletions returns the FlagCompletions for the login flags +func GetLoginFlagsCompletions() completion.FlagCompletions { + flagCompletion := completion.FlagCompletions{} + flagCompletion["authfile"] = completion.AutocompleteDefault + flagCompletion["cert-dir"] = completion.AutocompleteDefault + flagCompletion["password"] = completion.AutocompleteNone + flagCompletion["username"] = completion.AutocompleteNone + return flagCompletion +} + +// GetLogoutFlags defines and returns logout flags for containers tools +func GetLogoutFlags(flags *LogoutOptions) *pflag.FlagSet { + fs := pflag.FlagSet{} + fs.StringVar(&flags.AuthFile, "authfile", GetDefaultAuthFile(), "path of the authentication file. Use REGISTRY_AUTH_FILE environment variable to override") + fs.BoolVarP(&flags.All, "all", "a", false, "Remove the cached credentials for all registries in the auth file") + return &fs +} + +// GetLogoutFlagsCompletions returns the FlagCompletions for the logout flags +func GetLogoutFlagsCompletions() completion.FlagCompletions { + flagCompletion := completion.FlagCompletions{} + flagCompletion["authfile"] = completion.AutocompleteDefault + return flagCompletion +} diff --git a/vendor/github.com/containers/common/pkg/capabilities/capabilities.go b/vendor/github.com/containers/common/pkg/capabilities/capabilities.go new file mode 100644 index 00000000000..10c5dd7c4c6 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/capabilities/capabilities.go @@ -0,0 +1,209 @@ +package capabilities + +// Copyright 2013-2018 Docker, Inc. + +// NOTE: this package has been copied from github.com/docker/docker but been +// changed significantly to fit the needs of libpod. + +import ( + "sort" + "strings" + "sync" + + "github.com/pkg/errors" + "github.com/syndtr/gocapability/capability" +) + +var ( + // Used internally and populated during init(). + capabilityList []string + + // Used internally and populated during init(). + capsList []capability.Cap + + // ErrUnknownCapability is thrown when an unknown capability is processed. + ErrUnknownCapability = errors.New("unknown capability") + + // ContainerImageLabels - label can indicate the required + // capabilities required by containers to run the container image. + ContainerImageLabels = []string{"io.containers.capabilities"} +) + +// All is a special value used to add/drop all known capabilities. +// Useful on the CLI for `--cap-add=all` etc. +const All = "ALL" + +func getCapName(c capability.Cap) string { + return "CAP_" + strings.ToUpper(c.String()) +} + +func init() { + last := capability.CAP_LAST_CAP + // hack for RHEL6 which has no /proc/sys/kernel/cap_last_cap + if last == capability.Cap(63) { + last = capability.CAP_BLOCK_SUSPEND + } + for _, cap := range capability.List() { + if cap > last { + continue + } + capsList = append(capsList, cap) + capabilityList = append(capabilityList, getCapName(cap)) + sort.Strings(capabilityList) + } +} + +// stringInSlice determines if a string is in a string slice, returns bool +func stringInSlice(s string, sl []string) bool { + for _, i := range sl { + if i == s { + return true + } + } + return false +} + +var ( + boundingSetOnce sync.Once + boundingSetRet []string + boundingSetErr error +) + +// BoundingSet returns the capabilities in the current bounding set +func BoundingSet() ([]string, error) { + boundingSetOnce.Do(func() { + currentCaps, err := capability.NewPid2(0) + if err != nil { + boundingSetErr = err + return + } + err = currentCaps.Load() + if err != nil { + boundingSetErr = err + return + } + var r []string + for _, c := range capsList { + if !currentCaps.Get(capability.BOUNDING, c) { + continue + } + r = append(r, getCapName(c)) + } + boundingSetRet = r + sort.Strings(boundingSetRet) + boundingSetErr = err + }) + return boundingSetRet, boundingSetErr +} + +// AllCapabilities returns all known capabilities. +func AllCapabilities() []string { + return capabilityList +} + +// NormalizeCapabilities normalizes caps by adding a "CAP_" prefix (if not yet +// present). +func NormalizeCapabilities(caps []string) ([]string, error) { + normalized := make([]string, len(caps)) + for i, c := range caps { + c = strings.ToUpper(c) + if c == All { + normalized = append(normalized, c) + continue + } + if !strings.HasPrefix(c, "CAP_") { + c = "CAP_" + c + } + if !stringInSlice(c, capabilityList) { + return nil, errors.Wrapf(ErrUnknownCapability, "%q", c) + } + normalized[i] = c + } + sort.Strings(normalized) + return normalized, nil +} + +// ValidateCapabilities validates if caps only contains valid capabilities. +func ValidateCapabilities(caps []string) error { + for _, c := range caps { + if !stringInSlice(c, capabilityList) { + return errors.Wrapf(ErrUnknownCapability, "%q", c) + } + } + return nil +} + +// MergeCapabilities computes a set of capabilities by adding capabilities +// to or dropping them from base. +// +// Note that: +// "ALL" in capAdd adds returns known capabilities +// "All" in capDrop returns only the capabilities specified in capAdd +func MergeCapabilities(base, adds, drops []string) ([]string, error) { + var caps []string + + // Normalize the base capabilities + base, err := NormalizeCapabilities(base) + if err != nil { + return nil, err + } + if len(adds) == 0 && len(drops) == 0 { + // Nothing to tweak; we're done + return base, nil + } + capDrop, err := NormalizeCapabilities(drops) + if err != nil { + return nil, err + } + capAdd, err := NormalizeCapabilities(adds) + if err != nil { + return nil, err + } + + if stringInSlice(All, capDrop) { + if stringInSlice(All, capAdd) { + return nil, errors.New("adding all caps and removing all caps not allowed") + } + // "Drop" all capabilities; return what's in capAdd instead + sort.Strings(capAdd) + return capAdd, nil + } + + if stringInSlice(All, capAdd) { + base, err = BoundingSet() + if err != nil { + return nil, err + } + capAdd = []string{} + } else { + for _, add := range capAdd { + if stringInSlice(add, capDrop) { + return nil, errors.Errorf("capability %q cannot be dropped and added", add) + } + } + } + + for _, drop := range capDrop { + if stringInSlice(drop, capAdd) { + return nil, errors.Errorf("capability %q cannot be dropped and added", drop) + } + } + + // Drop any capabilities in capDrop that are in base + for _, cap := range base { + if stringInSlice(cap, capDrop) { + continue + } + caps = append(caps, cap) + } + + // Add any capabilities in capAdd that are not in base + for _, cap := range capAdd { + if stringInSlice(cap, base) { + continue + } + caps = append(caps, cap) + } + sort.Strings(caps) + return caps, nil +} diff --git a/vendor/github.com/containers/common/pkg/cgroups/blkio.go b/vendor/github.com/containers/common/pkg/cgroups/blkio.go new file mode 100644 index 00000000000..bacd4eb9362 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/cgroups/blkio.go @@ -0,0 +1,149 @@ +package cgroups + +import ( + "bufio" + "fmt" + "os" + "path/filepath" + "strconv" + "strings" + + spec "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" +) + +type blkioHandler struct { +} + +func getBlkioHandler() *blkioHandler { + return &blkioHandler{} +} + +// Apply set the specified constraints +func (c *blkioHandler) Apply(ctr *CgroupControl, res *spec.LinuxResources) error { + if res.BlockIO == nil { + return nil + } + return fmt.Errorf("blkio apply function not implemented yet") +} + +// Create the cgroup +func (c *blkioHandler) Create(ctr *CgroupControl) (bool, error) { + if ctr.cgroup2 { + return false, nil + } + return ctr.createCgroupDirectory(Blkio) +} + +// Destroy the cgroup +func (c *blkioHandler) Destroy(ctr *CgroupControl) error { + return rmDirRecursively(ctr.getCgroupv1Path(Blkio)) +} + +// Stat fills a metrics structure with usage stats for the controller +func (c *blkioHandler) Stat(ctr *CgroupControl, m *Metrics) error { + var ioServiceBytesRecursive []BlkIOEntry + + if ctr.cgroup2 { + // more details on the io.stat file format:X https://facebookmicrosites.github.io/cgroup2/docs/io-controller.html + values, err := readCgroup2MapFile(ctr, "io.stat") + if err != nil { + return err + } + for k, v := range values { + d := strings.Split(k, ":") + if len(d) != 2 { + continue + } + minor, err := strconv.ParseUint(d[0], 10, 0) + if err != nil { + return err + } + major, err := strconv.ParseUint(d[1], 10, 0) + if err != nil { + return err + } + + for _, item := range v { + d := strings.Split(item, "=") + if len(d) != 2 { + continue + } + op := d[0] + + // Accommodate the cgroup v1 naming + switch op { + case "rbytes": + op = "read" + case "wbytes": + op = "write" + } + + value, err := strconv.ParseUint(d[1], 10, 0) + if err != nil { + return err + } + + entry := BlkIOEntry{ + Op: op, + Major: major, + Minor: minor, + Value: value, + } + ioServiceBytesRecursive = append(ioServiceBytesRecursive, entry) + } + } + } else { + BlkioRoot := ctr.getCgroupv1Path(Blkio) + + p := filepath.Join(BlkioRoot, "blkio.throttle.io_service_bytes_recursive") + f, err := os.Open(p) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return errors.Wrapf(err, "open %s", p) + } + defer f.Close() + + scanner := bufio.NewScanner(f) + for scanner.Scan() { + line := scanner.Text() + parts := strings.Fields(line) + if len(parts) < 3 { + continue + } + d := strings.Split(parts[0], ":") + if len(d) != 2 { + continue + } + minor, err := strconv.ParseUint(d[0], 10, 0) + if err != nil { + return err + } + major, err := strconv.ParseUint(d[1], 10, 0) + if err != nil { + return err + } + + op := parts[1] + + value, err := strconv.ParseUint(parts[2], 10, 0) + if err != nil { + return err + } + entry := BlkIOEntry{ + Op: op, + Major: major, + Minor: minor, + Value: value, + } + ioServiceBytesRecursive = append(ioServiceBytesRecursive, entry) + } + if err := scanner.Err(); err != nil { + return errors.Wrapf(err, "parse %s", p) + } + } + m.Blkio = BlkioMetrics{IoServiceBytesRecursive: ioServiceBytesRecursive} + return nil +} diff --git a/vendor/github.com/containers/common/pkg/cgroups/cgroups.go b/vendor/github.com/containers/common/pkg/cgroups/cgroups.go new file mode 100644 index 00000000000..0bf275f3851 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/cgroups/cgroups.go @@ -0,0 +1,671 @@ +package cgroups + +import ( + "bufio" + "bytes" + "context" + "fmt" + "io/ioutil" + "math" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/containers/storage/pkg/unshare" + systemdDbus "github.com/coreos/go-systemd/v22/dbus" + "github.com/godbus/dbus/v5" + spec "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +var ( + // ErrCgroupDeleted means the cgroup was deleted + ErrCgroupDeleted = errors.New("cgroup deleted") + // ErrCgroupV1Rootless means the cgroup v1 were attempted to be used in rootless environment + ErrCgroupV1Rootless = errors.New("no support for CGroups V1 in rootless environments") + ErrStatCgroup = errors.New("no cgroup available for gathering user statistics") +) + +// CgroupControl controls a cgroup hierarchy +type CgroupControl struct { + cgroup2 bool + path string + systemd bool + // List of additional cgroup subsystems joined that + // do not have a custom handler. + additionalControllers []controller +} + +// CPUUsage keeps stats for the CPU usage (unit: nanoseconds) +type CPUUsage struct { + Kernel uint64 + Total uint64 + PerCPU []uint64 +} + +// MemoryUsage keeps stats for the memory usage +type MemoryUsage struct { + Usage uint64 + Limit uint64 +} + +// CPUMetrics keeps stats for the CPU usage +type CPUMetrics struct { + Usage CPUUsage +} + +// BlkIOEntry describes an entry in the blkio stats +type BlkIOEntry struct { + Op string + Major uint64 + Minor uint64 + Value uint64 +} + +// BlkioMetrics keeps usage stats for the blkio cgroup controller +type BlkioMetrics struct { + IoServiceBytesRecursive []BlkIOEntry +} + +// MemoryMetrics keeps usage stats for the memory cgroup controller +type MemoryMetrics struct { + Usage MemoryUsage +} + +// PidsMetrics keeps usage stats for the pids cgroup controller +type PidsMetrics struct { + Current uint64 +} + +// Metrics keeps usage stats for the cgroup controllers +type Metrics struct { + CPU CPUMetrics + Blkio BlkioMetrics + Memory MemoryMetrics + Pids PidsMetrics +} + +type controller struct { + name string + symlink bool +} + +type controllerHandler interface { + Create(*CgroupControl) (bool, error) + Apply(*CgroupControl, *spec.LinuxResources) error + Destroy(*CgroupControl) error + Stat(*CgroupControl, *Metrics) error +} + +const ( + cgroupRoot = "/sys/fs/cgroup" + // CPU is the cpu controller + CPU = "cpu" + // CPUAcct is the cpuacct controller + CPUAcct = "cpuacct" + // CPUset is the cpuset controller + CPUset = "cpuset" + // Memory is the memory controller + Memory = "memory" + // Pids is the pids controller + Pids = "pids" + // Blkio is the blkio controller + Blkio = "blkio" +) + +var handlers map[string]controllerHandler + +func init() { + handlers = make(map[string]controllerHandler) + handlers[CPU] = getCPUHandler() + handlers[CPUset] = getCpusetHandler() + handlers[Memory] = getMemoryHandler() + handlers[Pids] = getPidsHandler() + handlers[Blkio] = getBlkioHandler() +} + +// getAvailableControllers get the available controllers +func getAvailableControllers(exclude map[string]controllerHandler, cgroup2 bool) ([]controller, error) { + if cgroup2 { + controllers := []controller{} + controllersFile := cgroupRoot + "/cgroup.controllers" + // rootless cgroupv2: check available controllers for current user, systemd or servicescope will inherit + if unshare.IsRootless() { + userSlice, err := getCgroupPathForCurrentProcess() + if err != nil { + return controllers, err + } + // userSlice already contains '/' so not adding here + basePath := cgroupRoot + userSlice + controllersFile = fmt.Sprintf("%s/cgroup.controllers", basePath) + } + controllersFileBytes, err := ioutil.ReadFile(controllersFile) + if err != nil { + return nil, errors.Wrapf(err, "failed while reading controllers for cgroup v2 from %q", controllersFile) + } + for _, controllerName := range strings.Fields(string(controllersFileBytes)) { + c := controller{ + name: controllerName, + symlink: false, + } + controllers = append(controllers, c) + } + return controllers, nil + } + + subsystems, _ := cgroupV1GetAllSubsystems() + controllers := []controller{} + // cgroupv1 and rootless: No subsystem is available: delegation is unsafe. + if unshare.IsRootless() { + return controllers, nil + } + + for _, name := range subsystems { + if _, found := exclude[name]; found { + continue + } + fileInfo, err := os.Stat(cgroupRoot + "/" + name) + if err != nil { + continue + } + c := controller{ + name: name, + symlink: !fileInfo.IsDir(), + } + controllers = append(controllers, c) + } + + return controllers, nil +} + +// GetAvailableControllers get string:bool map of all the available controllers +func GetAvailableControllers(exclude map[string]controllerHandler, cgroup2 bool) ([]string, error) { + availableControllers, err := getAvailableControllers(exclude, cgroup2) + if err != nil { + return nil, err + } + controllerList := []string{} + for _, controller := range availableControllers { + controllerList = append(controllerList, controller.name) + } + + return controllerList, nil +} + +func cgroupV1GetAllSubsystems() ([]string, error) { + f, err := os.Open("/proc/cgroups") + if err != nil { + return nil, err + } + defer f.Close() + + subsystems := []string{} + + s := bufio.NewScanner(f) + for s.Scan() { + text := s.Text() + if text[0] != '#' { + parts := strings.Fields(text) + if len(parts) >= 4 && parts[3] != "0" { + subsystems = append(subsystems, parts[0]) + } + } + } + if err := s.Err(); err != nil { + return nil, err + } + return subsystems, nil +} + +func getCgroupPathForCurrentProcess() (string, error) { + path := fmt.Sprintf("/proc/%d/cgroup", os.Getpid()) + f, err := os.Open(path) + if err != nil { + return "", err + } + defer f.Close() + + cgroupPath := "" + s := bufio.NewScanner(f) + for s.Scan() { + text := s.Text() + procEntries := strings.SplitN(text, "::", 2) + // set process cgroupPath only if entry is valid + if len(procEntries) > 1 { + cgroupPath = procEntries[1] + } + } + if err := s.Err(); err != nil { + return cgroupPath, err + } + return cgroupPath, nil +} + +// getCgroupv1Path is a helper function to get the cgroup v1 path +func (c *CgroupControl) getCgroupv1Path(name string) string { + return filepath.Join(cgroupRoot, name, c.path) +} + +// createCgroupv2Path creates the cgroupv2 path and enables all the available controllers +func createCgroupv2Path(path string) (deferredError error) { + if !strings.HasPrefix(path, cgroupRoot+"/") { + return fmt.Errorf("invalid cgroup path %s", path) + } + content, err := ioutil.ReadFile(cgroupRoot + "/cgroup.controllers") + if err != nil { + return err + } + ctrs := bytes.Fields(content) + res := append([]byte("+"), bytes.Join(ctrs, []byte(" +"))...) + + current := "/sys/fs" + elements := strings.Split(path, "/") + for i, e := range elements[3:] { + current = filepath.Join(current, e) + if i > 0 { + if err := os.Mkdir(current, 0755); err != nil { + if !os.IsExist(err) { + return err + } + } else { + // If the directory was created, be sure it is not left around on errors. + defer func() { + if deferredError != nil { + os.Remove(current) + } + }() + } + } + // We enable the controllers for all the path components except the last one. It is not allowed to add + // PIDs if there are already enabled controllers. + if i < len(elements[3:])-1 { + if err := ioutil.WriteFile(filepath.Join(current, "cgroup.subtree_control"), res, 0755); err != nil { + return err + } + } + } + return nil +} + +// initialize initializes the specified hierarchy +func (c *CgroupControl) initialize() (err error) { + createdSoFar := map[string]controllerHandler{} + defer func() { + if err != nil { + for name, ctr := range createdSoFar { + if err := ctr.Destroy(c); err != nil { + logrus.Warningf("error cleaning up controller %s for %s", name, c.path) + } + } + } + }() + if c.cgroup2 { + if err := createCgroupv2Path(filepath.Join(cgroupRoot, c.path)); err != nil { + return errors.Wrapf(err, "error creating cgroup path %s", c.path) + } + } + for name, handler := range handlers { + created, err := handler.Create(c) + if err != nil { + return err + } + if created { + createdSoFar[name] = handler + } + } + + if !c.cgroup2 { + // We won't need to do this for cgroup v2 + for _, ctr := range c.additionalControllers { + if ctr.symlink { + continue + } + path := c.getCgroupv1Path(ctr.name) + if err := os.MkdirAll(path, 0755); err != nil { + return errors.Wrapf(err, "error creating cgroup path for %s", ctr.name) + } + } + } + + return nil +} + +func (c *CgroupControl) createCgroupDirectory(controller string) (bool, error) { + cPath := c.getCgroupv1Path(controller) + _, err := os.Stat(cPath) + if err == nil { + return false, nil + } + + if !os.IsNotExist(err) { + return false, err + } + + if err := os.MkdirAll(cPath, 0755); err != nil { + return false, errors.Wrapf(err, "error creating cgroup for %s", controller) + } + return true, nil +} + +func readFileAsUint64(path string) (uint64, error) { + data, err := ioutil.ReadFile(path) + if err != nil { + return 0, err + } + v := cleanString(string(data)) + if v == "max" { + return math.MaxUint64, nil + } + ret, err := strconv.ParseUint(v, 10, 64) + if err != nil { + return ret, errors.Wrapf(err, "parse %s from %s", v, path) + } + return ret, nil +} + +func readFileByKeyAsUint64(path, key string) (uint64, error) { + content, err := ioutil.ReadFile(path) + if err != nil { + return 0, err + } + for _, line := range strings.Split(string(content), "\n") { + fields := strings.SplitN(line, " ", 2) + if fields[0] == key { + v := cleanString(string(fields[1])) + if v == "max" { + return math.MaxUint64, nil + } + ret, err := strconv.ParseUint(v, 10, 64) + if err != nil { + return ret, errors.Wrapf(err, "parse %s from %s", v, path) + } + return ret, nil + } + } + + return 0, fmt.Errorf("no key named %s from %s", key, path) +} + +// New creates a new cgroup control +func New(path string, resources *spec.LinuxResources) (*CgroupControl, error) { + cgroup2, err := IsCgroup2UnifiedMode() + if err != nil { + return nil, err + } + control := &CgroupControl{ + cgroup2: cgroup2, + path: path, + } + + if !cgroup2 { + controllers, err := getAvailableControllers(handlers, false) + if err != nil { + return nil, err + } + control.additionalControllers = controllers + } + + if err := control.initialize(); err != nil { + return nil, err + } + + return control, nil +} + +// NewSystemd creates a new cgroup control +func NewSystemd(path string) (*CgroupControl, error) { + cgroup2, err := IsCgroup2UnifiedMode() + if err != nil { + return nil, err + } + control := &CgroupControl{ + cgroup2: cgroup2, + path: path, + systemd: true, + } + return control, nil +} + +// Load loads an existing cgroup control +func Load(path string) (*CgroupControl, error) { + cgroup2, err := IsCgroup2UnifiedMode() + if err != nil { + return nil, err + } + control := &CgroupControl{ + cgroup2: cgroup2, + path: path, + systemd: false, + } + if !cgroup2 { + controllers, err := getAvailableControllers(handlers, false) + if err != nil { + return nil, err + } + control.additionalControllers = controllers + } + if !cgroup2 { + oneExists := false + // check that the cgroup exists at least under one controller + for name := range handlers { + p := control.getCgroupv1Path(name) + if _, err := os.Stat(p); err == nil { + oneExists = true + break + } + } + + // if there is no controller at all, raise an error + if !oneExists { + if unshare.IsRootless() { + return nil, ErrCgroupV1Rootless + } + // compatible with the error code + // used by containerd/cgroups + return nil, ErrCgroupDeleted + } + } + return control, nil +} + +// CreateSystemdUnit creates the systemd cgroup +func (c *CgroupControl) CreateSystemdUnit(path string) error { + if !c.systemd { + return fmt.Errorf("the cgroup controller is not using systemd") + } + + conn, err := systemdDbus.NewWithContext(context.TODO()) + if err != nil { + return err + } + defer conn.Close() + + return systemdCreate(path, conn) +} + +// GetUserConnection returns an user connection to D-BUS +func GetUserConnection(uid int) (*systemdDbus.Conn, error) { + return systemdDbus.NewConnection(func() (*dbus.Conn, error) { + return dbusAuthConnection(uid, dbus.SessionBusPrivate) + }) +} + +// CreateSystemdUserUnit creates the systemd cgroup for the specified user +func (c *CgroupControl) CreateSystemdUserUnit(path string, uid int) error { + if !c.systemd { + return fmt.Errorf("the cgroup controller is not using systemd") + } + + conn, err := GetUserConnection(uid) + if err != nil { + return err + } + defer conn.Close() + + return systemdCreate(path, conn) +} + +func dbusAuthConnection(uid int, createBus func(opts ...dbus.ConnOption) (*dbus.Conn, error)) (*dbus.Conn, error) { + conn, err := createBus() + if err != nil { + return nil, err + } + + methods := []dbus.Auth{dbus.AuthExternal(strconv.Itoa(uid))} + + err = conn.Auth(methods) + if err != nil { + conn.Close() + return nil, err + } + if err := conn.Hello(); err != nil { + return nil, err + } + + return conn, nil +} + +// Delete cleans a cgroup +func (c *CgroupControl) Delete() error { + return c.DeleteByPath(c.path) +} + +// DeleteByPathConn deletes the specified cgroup path using the specified +// dbus connection if needed. +func (c *CgroupControl) DeleteByPathConn(path string, conn *systemdDbus.Conn) error { + if c.systemd { + return systemdDestroyConn(path, conn) + } + if c.cgroup2 { + return rmDirRecursively(filepath.Join(cgroupRoot, c.path)) + } + var lastError error + for _, h := range handlers { + if err := h.Destroy(c); err != nil { + lastError = err + } + } + + for _, ctr := range c.additionalControllers { + if ctr.symlink { + continue + } + p := c.getCgroupv1Path(ctr.name) + if err := rmDirRecursively(p); err != nil { + lastError = errors.Wrapf(err, "remove %s", p) + } + } + return lastError +} + +// DeleteByPath deletes the specified cgroup path +func (c *CgroupControl) DeleteByPath(path string) error { + if c.systemd { + conn, err := systemdDbus.NewWithContext(context.TODO()) + if err != nil { + return err + } + defer conn.Close() + return c.DeleteByPathConn(path, conn) + } + return c.DeleteByPathConn(path, nil) +} + +// Update updates the cgroups +func (c *CgroupControl) Update(resources *spec.LinuxResources) error { + for _, h := range handlers { + if err := h.Apply(c, resources); err != nil { + return err + } + } + return nil +} + +// AddPid moves the specified pid to the cgroup +func (c *CgroupControl) AddPid(pid int) error { + pidString := []byte(fmt.Sprintf("%d\n", pid)) + + if c.cgroup2 { + p := filepath.Join(cgroupRoot, c.path, "cgroup.procs") + if err := ioutil.WriteFile(p, pidString, 0644); err != nil { + return errors.Wrapf(err, "write %s", p) + } + return nil + } + + names := make([]string, 0, len(handlers)) + for n := range handlers { + names = append(names, n) + } + + for _, c := range c.additionalControllers { + if !c.symlink { + names = append(names, c.name) + } + } + + for _, n := range names { + // If we aren't using cgroup2, we won't write correctly to unified hierarchy + if !c.cgroup2 && n == "unified" { + continue + } + p := filepath.Join(c.getCgroupv1Path(n), "tasks") + if err := ioutil.WriteFile(p, pidString, 0644); err != nil { + return errors.Wrapf(err, "write %s", p) + } + } + return nil +} + +// Stat returns usage statistics for the cgroup +func (c *CgroupControl) Stat() (*Metrics, error) { + m := Metrics{} + found := false + for _, h := range handlers { + if err := h.Stat(c, &m); err != nil { + if !os.IsNotExist(errors.Cause(err)) { + return nil, err + } + logrus.Warningf("Failed to retrieve cgroup stats: %v", err) + continue + } + found = true + } + if !found { + return nil, ErrStatCgroup + } + return &m, nil +} + +func readCgroup2MapPath(path string) (map[string][]string, error) { + ret := map[string][]string{} + f, err := os.Open(path) + if err != nil { + if os.IsNotExist(err) { + return ret, nil + } + return nil, errors.Wrapf(err, "open file %s", path) + } + defer f.Close() + scanner := bufio.NewScanner(f) + for scanner.Scan() { + line := scanner.Text() + parts := strings.Fields(line) + if len(parts) < 2 { + continue + } + ret[parts[0]] = parts[1:] + } + if err := scanner.Err(); err != nil { + return nil, errors.Wrapf(err, "parsing file %s", path) + } + return ret, nil +} + +func readCgroup2MapFile(ctr *CgroupControl, name string) (map[string][]string, error) { + p := filepath.Join(cgroupRoot, ctr.path, name) + + return readCgroup2MapPath(p) +} diff --git a/vendor/github.com/containers/common/pkg/cgroups/cgroups_supported.go b/vendor/github.com/containers/common/pkg/cgroups/cgroups_supported.go new file mode 100644 index 00000000000..c1fe194b23a --- /dev/null +++ b/vendor/github.com/containers/common/pkg/cgroups/cgroups_supported.go @@ -0,0 +1,129 @@ +// +build linux + +package cgroups + +import ( + "bufio" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + "sync" + "syscall" + "time" + + "github.com/pkg/errors" + "golang.org/x/sys/unix" +) + +var ( + isUnifiedOnce sync.Once + isUnified bool + isUnifiedErr error +) + +// IsCgroup2UnifiedMode returns whether we are running in cgroup 2 cgroup2 mode. +func IsCgroup2UnifiedMode() (bool, error) { + isUnifiedOnce.Do(func() { + var st syscall.Statfs_t + if err := syscall.Statfs("/sys/fs/cgroup", &st); err != nil { + isUnified, isUnifiedErr = false, err + } else { + isUnified, isUnifiedErr = st.Type == unix.CGROUP2_SUPER_MAGIC, nil + } + }) + return isUnified, isUnifiedErr +} + +// UserOwnsCurrentSystemdCgroup checks whether the current EUID owns the +// current cgroup. +func UserOwnsCurrentSystemdCgroup() (bool, error) { + uid := os.Geteuid() + + cgroup2, err := IsCgroup2UnifiedMode() + if err != nil { + return false, err + } + + f, err := os.Open("/proc/self/cgroup") + if err != nil { + return false, err + } + defer f.Close() + + scanner := bufio.NewScanner(f) + for scanner.Scan() { + line := scanner.Text() + parts := strings.SplitN(line, ":", 3) + + if len(parts) < 3 { + continue + } + + var cgroupPath string + + if cgroup2 { + cgroupPath = filepath.Join(cgroupRoot, parts[2]) + } else { + if parts[1] != "name=systemd" { + continue + } + cgroupPath = filepath.Join(cgroupRoot, "systemd", parts[2]) + } + + st, err := os.Stat(cgroupPath) + if err != nil { + return false, err + } + s := st.Sys() + if s == nil { + return false, fmt.Errorf("error stat cgroup path %s", cgroupPath) + } + + if int(s.(*syscall.Stat_t).Uid) != uid { + return false, nil + } + } + if err := scanner.Err(); err != nil { + return false, errors.Wrapf(err, "parsing file /proc/self/cgroup") + } + return true, nil +} + +// rmDirRecursively delete recursively a cgroup directory. +// It differs from os.RemoveAll as it doesn't attempt to unlink files. +// On cgroupfs we are allowed only to rmdir empty directories. +func rmDirRecursively(path string) error { + if err := os.Remove(path); err == nil || os.IsNotExist(err) { + return nil + } + entries, err := ioutil.ReadDir(path) + if err != nil { + return err + } + for _, i := range entries { + if i.IsDir() { + if err := rmDirRecursively(filepath.Join(path, i.Name())); err != nil { + return err + } + } + } + + attempts := 0 + for { + err := os.Remove(path) + if err == nil || os.IsNotExist(err) { + return nil + } + if errors.Is(err, unix.EBUSY) { + // attempt up to 5 seconds if the cgroup is busy + if attempts < 500 { + time.Sleep(time.Millisecond * 10) + attempts++ + continue + } + } + return errors.Wrapf(err, "remove %s", path) + } +} diff --git a/vendor/github.com/containers/common/pkg/cgroups/cgroups_unsupported.go b/vendor/github.com/containers/common/pkg/cgroups/cgroups_unsupported.go new file mode 100644 index 00000000000..95d42417021 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/cgroups/cgroups_unsupported.go @@ -0,0 +1,22 @@ +// +build !linux + +package cgroups + +import ( + "os" +) + +// IsCgroup2UnifiedMode returns whether we are running in cgroup 2 cgroup2 mode. +func IsCgroup2UnifiedMode() (bool, error) { + return false, nil +} + +// UserOwnsCurrentSystemdCgroup checks whether the current EUID owns the +// current cgroup. +func UserOwnsCurrentSystemdCgroup() (bool, error) { + return false, nil +} + +func rmDirRecursively(path string) error { + return os.RemoveAll(path) +} diff --git a/vendor/github.com/containers/common/pkg/cgroups/cpu.go b/vendor/github.com/containers/common/pkg/cgroups/cpu.go new file mode 100644 index 00000000000..23539757d23 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/cgroups/cpu.go @@ -0,0 +1,160 @@ +package cgroups + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + + spec "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" +) + +type cpuHandler struct { +} + +func getCPUHandler() *cpuHandler { + return &cpuHandler{} +} + +func cleanString(s string) string { + return strings.Trim(s, "\n") +} + +func readAcct(ctr *CgroupControl, name string) (uint64, error) { + p := filepath.Join(ctr.getCgroupv1Path(CPUAcct), name) + return readFileAsUint64(p) +} + +func readAcctList(ctr *CgroupControl, name string) ([]uint64, error) { + p := filepath.Join(ctr.getCgroupv1Path(CPUAcct), name) + data, err := ioutil.ReadFile(p) + if err != nil { + return nil, errors.Wrapf(err, "reading %s", p) + } + r := []uint64{} + for _, s := range strings.Split(string(data), " ") { + s = cleanString(s) + if s == "" { + break + } + v, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return nil, errors.Wrapf(err, "parsing %s", s) + } + r = append(r, v) + } + return r, nil +} + +// Apply set the specified constraints +func (c *cpuHandler) Apply(ctr *CgroupControl, res *spec.LinuxResources) error { + if res.CPU == nil { + return nil + } + return fmt.Errorf("cpu apply not implemented yet") +} + +// Create the cgroup +func (c *cpuHandler) Create(ctr *CgroupControl) (bool, error) { + if ctr.cgroup2 { + return false, nil + } + return ctr.createCgroupDirectory(CPU) +} + +// Destroy the cgroup +func (c *cpuHandler) Destroy(ctr *CgroupControl) error { + return rmDirRecursively(ctr.getCgroupv1Path(CPU)) +} + +// Stat fills a metrics structure with usage stats for the controller +func (c *cpuHandler) Stat(ctr *CgroupControl, m *Metrics) error { + var err error + usage := CPUUsage{} + if ctr.cgroup2 { + values, err := readCgroup2MapFile(ctr, "cpu.stat") + if err != nil { + return err + } + if val, found := values["usage_usec"]; found { + usage.Total, err = strconv.ParseUint(cleanString(val[0]), 10, 64) + if err != nil { + return err + } + usage.Kernel *= 1000 + } + if val, found := values["system_usec"]; found { + usage.Kernel, err = strconv.ParseUint(cleanString(val[0]), 10, 64) + if err != nil { + return err + } + usage.Total *= 1000 + } + // FIXME: How to read usage.PerCPU? + } else { + usage.Total, err = readAcct(ctr, "cpuacct.usage") + if err != nil { + if !os.IsNotExist(errors.Cause(err)) { + return err + } + usage.Total = 0 + } + usage.Kernel, err = readAcct(ctr, "cpuacct.usage_sys") + if err != nil { + if !os.IsNotExist(errors.Cause(err)) { + return err + } + usage.Kernel = 0 + } + usage.PerCPU, err = readAcctList(ctr, "cpuacct.usage_percpu") + if err != nil { + if !os.IsNotExist(errors.Cause(err)) { + return err + } + usage.PerCPU = nil + } + } + m.CPU = CPUMetrics{Usage: usage} + return nil +} + +// GetSystemCPUUsage returns the system usage for all the cgroups +func GetSystemCPUUsage() (uint64, error) { + cgroupv2, err := IsCgroup2UnifiedMode() + if err != nil { + return 0, err + } + if !cgroupv2 { + p := filepath.Join(cgroupRoot, CPUAcct, "cpuacct.usage") + return readFileAsUint64(p) + } + + files, err := ioutil.ReadDir(cgroupRoot) + if err != nil { + return 0, err + } + var total uint64 + for _, file := range files { + if !file.IsDir() { + continue + } + p := filepath.Join(cgroupRoot, file.Name(), "cpu.stat") + + values, err := readCgroup2MapPath(p) + if err != nil { + return 0, err + } + + if val, found := values["usage_usec"]; found { + v, err := strconv.ParseUint(cleanString(val[0]), 10, 64) + if err != nil { + return 0, err + } + total += v * 1000 + } + } + return total, nil +} diff --git a/vendor/github.com/containers/common/pkg/cgroups/cpuset.go b/vendor/github.com/containers/common/pkg/cgroups/cpuset.go new file mode 100644 index 00000000000..22ac0a07968 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/cgroups/cpuset.go @@ -0,0 +1,85 @@ +package cgroups + +import ( + "fmt" + "io/ioutil" + "path/filepath" + "strings" + + spec "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" +) + +type cpusetHandler struct { +} + +func cpusetCopyFileFromParent(dir, file string, cgroupv2 bool) ([]byte, error) { + if dir == cgroupRoot { + return nil, fmt.Errorf("could not find parent to initialize cpuset %s", file) + } + path := filepath.Join(dir, file) + parentPath := path + if cgroupv2 { + parentPath = fmt.Sprintf("%s.effective", parentPath) + } + data, err := ioutil.ReadFile(parentPath) + if err != nil { + return nil, errors.Wrapf(err, "open %s", path) + } + if strings.Trim(string(data), "\n") != "" { + return data, nil + } + data, err = cpusetCopyFileFromParent(filepath.Dir(dir), file, cgroupv2) + if err != nil { + return nil, err + } + if err := ioutil.WriteFile(path, data, 0644); err != nil { + return nil, errors.Wrapf(err, "write %s", path) + } + return data, nil +} + +func cpusetCopyFromParent(path string, cgroupv2 bool) error { + for _, file := range []string{"cpuset.cpus", "cpuset.mems"} { + if _, err := cpusetCopyFileFromParent(path, file, cgroupv2); err != nil { + return err + } + } + return nil +} + +func getCpusetHandler() *cpusetHandler { + return &cpusetHandler{} +} + +// Apply set the specified constraints +func (c *cpusetHandler) Apply(ctr *CgroupControl, res *spec.LinuxResources) error { + if res.CPU == nil { + return nil + } + return fmt.Errorf("cpuset apply not implemented yet") +} + +// Create the cgroup +func (c *cpusetHandler) Create(ctr *CgroupControl) (bool, error) { + if ctr.cgroup2 { + path := filepath.Join(cgroupRoot, ctr.path) + return true, cpusetCopyFromParent(path, true) + } + + created, err := ctr.createCgroupDirectory(CPUset) + if !created || err != nil { + return created, err + } + return true, cpusetCopyFromParent(ctr.getCgroupv1Path(CPUset), false) +} + +// Destroy the cgroup +func (c *cpusetHandler) Destroy(ctr *CgroupControl) error { + return rmDirRecursively(ctr.getCgroupv1Path(CPUset)) +} + +// Stat fills a metrics structure with usage stats for the controller +func (c *cpusetHandler) Stat(ctr *CgroupControl, m *Metrics) error { + return nil +} diff --git a/vendor/github.com/containers/common/pkg/cgroups/memory.go b/vendor/github.com/containers/common/pkg/cgroups/memory.go new file mode 100644 index 00000000000..10d65893c6c --- /dev/null +++ b/vendor/github.com/containers/common/pkg/cgroups/memory.go @@ -0,0 +1,66 @@ +package cgroups + +import ( + "fmt" + "path/filepath" + + spec "github.com/opencontainers/runtime-spec/specs-go" +) + +type memHandler struct{} + +func getMemoryHandler() *memHandler { + return &memHandler{} +} + +// Apply set the specified constraints +func (c *memHandler) Apply(ctr *CgroupControl, res *spec.LinuxResources) error { + if res.Memory == nil { + return nil + } + return fmt.Errorf("memory apply not implemented yet") +} + +// Create the cgroup +func (c *memHandler) Create(ctr *CgroupControl) (bool, error) { + if ctr.cgroup2 { + return false, nil + } + return ctr.createCgroupDirectory(Memory) +} + +// Destroy the cgroup +func (c *memHandler) Destroy(ctr *CgroupControl) error { + return rmDirRecursively(ctr.getCgroupv1Path(Memory)) +} + +// Stat fills a metrics structure with usage stats for the controller +func (c *memHandler) Stat(ctr *CgroupControl, m *Metrics) error { + var err error + usage := MemoryUsage{} + + var memoryRoot string + var limitFilename string + + if ctr.cgroup2 { + memoryRoot = filepath.Join(cgroupRoot, ctr.path) + limitFilename = "memory.max" + if usage.Usage, err = readFileByKeyAsUint64(filepath.Join(memoryRoot, "memory.stat"), "anon"); err != nil { + return err + } + } else { + memoryRoot = ctr.getCgroupv1Path(Memory) + limitFilename = "memory.limit_in_bytes" + if usage.Usage, err = readFileAsUint64(filepath.Join(memoryRoot, "memory.usage_in_bytes")); err != nil { + return err + } + } + + usage.Limit, err = readFileAsUint64(filepath.Join(memoryRoot, limitFilename)) + if err != nil { + return err + } + + m.Memory = MemoryMetrics{Usage: usage} + return nil +} diff --git a/vendor/github.com/containers/common/pkg/cgroups/pids.go b/vendor/github.com/containers/common/pkg/cgroups/pids.go new file mode 100644 index 00000000000..58cb32b3ba3 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/cgroups/pids.go @@ -0,0 +1,69 @@ +package cgroups + +import ( + "fmt" + "io/ioutil" + "path/filepath" + + spec "github.com/opencontainers/runtime-spec/specs-go" +) + +type pidHandler struct { +} + +func getPidsHandler() *pidHandler { + return &pidHandler{} +} + +// Apply set the specified constraints +func (c *pidHandler) Apply(ctr *CgroupControl, res *spec.LinuxResources) error { + if res.Pids == nil { + return nil + } + var PIDRoot string + + if ctr.cgroup2 { + PIDRoot = filepath.Join(cgroupRoot, ctr.path) + } else { + PIDRoot = ctr.getCgroupv1Path(Pids) + } + + p := filepath.Join(PIDRoot, "pids.max") + return ioutil.WriteFile(p, []byte(fmt.Sprintf("%d\n", res.Pids.Limit)), 0644) +} + +// Create the cgroup +func (c *pidHandler) Create(ctr *CgroupControl) (bool, error) { + if ctr.cgroup2 { + return false, nil + } + return ctr.createCgroupDirectory(Pids) +} + +// Destroy the cgroup +func (c *pidHandler) Destroy(ctr *CgroupControl) error { + return rmDirRecursively(ctr.getCgroupv1Path(Pids)) +} + +// Stat fills a metrics structure with usage stats for the controller +func (c *pidHandler) Stat(ctr *CgroupControl, m *Metrics) error { + if ctr.path == "" { + // nothing we can do to retrieve the pids.current path + return nil + } + + var PIDRoot string + if ctr.cgroup2 { + PIDRoot = filepath.Join(cgroupRoot, ctr.path) + } else { + PIDRoot = ctr.getCgroupv1Path(Pids) + } + + current, err := readFileAsUint64(filepath.Join(PIDRoot, "pids.current")) + if err != nil { + return err + } + + m.Pids = PidsMetrics{Current: current} + return nil +} diff --git a/vendor/github.com/containers/common/pkg/cgroups/systemd.go b/vendor/github.com/containers/common/pkg/cgroups/systemd.go new file mode 100644 index 00000000000..92065a2d7cd --- /dev/null +++ b/vendor/github.com/containers/common/pkg/cgroups/systemd.go @@ -0,0 +1,80 @@ +package cgroups + +import ( + "context" + "fmt" + "path/filepath" + "strings" + + systemdDbus "github.com/coreos/go-systemd/v22/dbus" + "github.com/godbus/dbus/v5" +) + +func systemdCreate(path string, c *systemdDbus.Conn) error { + slice, name := filepath.Split(path) + slice = strings.TrimSuffix(slice, "/") + + var lastError error + for i := 0; i < 2; i++ { + properties := []systemdDbus.Property{ + systemdDbus.PropDescription(fmt.Sprintf("cgroup %s", name)), + systemdDbus.PropWants(slice), + } + pMap := map[string]bool{ + "DefaultDependencies": false, + "MemoryAccounting": true, + "CPUAccounting": true, + "BlockIOAccounting": true, + } + if i == 0 { + pMap["Delegate"] = true + } + for k, v := range pMap { + p := systemdDbus.Property{ + Name: k, + Value: dbus.MakeVariant(v), + } + properties = append(properties, p) + } + + ch := make(chan string) + _, err := c.StartTransientUnitContext(context.TODO(), name, "replace", properties, ch) + if err != nil { + lastError = err + continue + } + <-ch + return nil + } + return lastError +} + +/* + systemdDestroyConn is copied from containerd/cgroups/systemd.go file, that + has the following license: + + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +func systemdDestroyConn(path string, c *systemdDbus.Conn) error { + name := filepath.Base(path) + + ch := make(chan string) + _, err := c.StopUnitContext(context.TODO(), name, "replace", ch) + if err != nil { + return err + } + <-ch + return nil +} diff --git a/vendor/github.com/containers/common/pkg/cgroupv2/cgroups_linux.go b/vendor/github.com/containers/common/pkg/cgroupv2/cgroups_linux.go new file mode 100644 index 00000000000..749c89932d9 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/cgroupv2/cgroups_linux.go @@ -0,0 +1,27 @@ +package cgroupv2 + +import ( + "sync" + "syscall" + + "golang.org/x/sys/unix" +) + +var ( + isCgroupV2Once sync.Once + isCgroupV2 bool + isCgroupV2Err error +) + +// Enabled returns whether we are running on cgroup v2 +func Enabled() (bool, error) { + isCgroupV2Once.Do(func() { + var st syscall.Statfs_t + if err := syscall.Statfs("/sys/fs/cgroup", &st); err != nil { + isCgroupV2, isCgroupV2Err = false, err + } else { + isCgroupV2, isCgroupV2Err = st.Type == unix.CGROUP2_SUPER_MAGIC, nil + } + }) + return isCgroupV2, isCgroupV2Err +} diff --git a/vendor/github.com/containers/common/pkg/cgroupv2/cgroups_unsupported.go b/vendor/github.com/containers/common/pkg/cgroupv2/cgroups_unsupported.go new file mode 100644 index 00000000000..61b3653e57c --- /dev/null +++ b/vendor/github.com/containers/common/pkg/cgroupv2/cgroups_unsupported.go @@ -0,0 +1,8 @@ +// +build !linux + +package cgroupv2 + +// Enabled returns whether we are running on cgroup v2 +func Enabled() (bool, error) { + return false, nil +} diff --git a/vendor/github.com/containers/common/pkg/chown/chown.go b/vendor/github.com/containers/common/pkg/chown/chown.go new file mode 100644 index 00000000000..fb7d67f33c0 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/chown/chown.go @@ -0,0 +1,65 @@ +package chown + +import ( + "os" + "os/user" + "path/filepath" + + "github.com/containers/storage/pkg/homedir" +) + +// DangerousHostPath validates if a host path is dangerous and should not be modified +func DangerousHostPath(path string) (bool, error) { + excludePaths := map[string]bool{ + "/": true, + "/bin": true, + "/boot": true, + "/dev": true, + "/etc": true, + "/etc/passwd": true, + "/etc/pki": true, + "/etc/shadow": true, + "/home": true, + "/lib": true, + "/lib64": true, + "/media": true, + "/opt": true, + "/proc": true, + "/root": true, + "/run": true, + "/sbin": true, + "/srv": true, + "/sys": true, + "/tmp": true, + "/usr": true, + "/var": true, + "/var/lib": true, + "/var/log": true, + } + + if home := homedir.Get(); home != "" { + excludePaths[home] = true + } + + if sudoUser := os.Getenv("SUDO_USER"); sudoUser != "" { + if usr, err := user.Lookup(sudoUser); err == nil { + excludePaths[usr.HomeDir] = true + } + } + + absPath, err := filepath.Abs(path) + if err != nil { + return true, err + } + + realPath, err := filepath.EvalSymlinks(absPath) + if err != nil { + return true, err + } + + if excludePaths[realPath] { + return true, nil + } + + return false, nil +} diff --git a/vendor/github.com/containers/common/pkg/chown/chown_unix.go b/vendor/github.com/containers/common/pkg/chown/chown_unix.go new file mode 100644 index 00000000000..921927de4cd --- /dev/null +++ b/vendor/github.com/containers/common/pkg/chown/chown_unix.go @@ -0,0 +1,66 @@ +// +build !windows + +package chown + +import ( + "os" + "path/filepath" + "syscall" + + "github.com/pkg/errors" +) + +// ChangeHostPathOwnership changes the uid and gid ownership of a directory or file within the host. +// This is used by the volume U flag to change source volumes ownership +func ChangeHostPathOwnership(path string, recursive bool, uid, gid int) error { + // Validate if host path can be chowned + isDangerous, err := DangerousHostPath(path) + if err != nil { + return errors.Wrap(err, "failed to validate if host path is dangerous") + } + + if isDangerous { + return errors.Errorf("chowning host path %q is not allowed. You can manually `chown -R %d:%d %s`", path, uid, gid, path) + } + + // Chown host path + if recursive { + err := filepath.Walk(path, func(filePath string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Get current ownership + currentUID := int(f.Sys().(*syscall.Stat_t).Uid) + currentGID := int(f.Sys().(*syscall.Stat_t).Gid) + + if uid != currentUID || gid != currentGID { + return os.Lchown(filePath, uid, gid) + } + + return nil + }) + + if err != nil { + return errors.Wrap(err, "failed to chown recursively host path") + } + } else { + // Get host path info + f, err := os.Lstat(path) + if err != nil { + return errors.Wrap(err, "failed to get host path information") + } + + // Get current ownership + currentUID := int(f.Sys().(*syscall.Stat_t).Uid) + currentGID := int(f.Sys().(*syscall.Stat_t).Gid) + + if uid != currentUID || gid != currentGID { + if err := os.Lchown(path, uid, gid); err != nil { + return errors.Wrap(err, "failed to chown host path") + } + } + } + + return nil +} diff --git a/vendor/github.com/containers/common/pkg/chown/chown_windows.go b/vendor/github.com/containers/common/pkg/chown/chown_windows.go new file mode 100644 index 00000000000..0c4b8e1b5d9 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/chown/chown_windows.go @@ -0,0 +1,11 @@ +package chown + +import ( + "github.com/pkg/errors" +) + +// ChangeHostPathOwnership changes the uid and gid ownership of a directory or file within the host. +// This is used by the volume U flag to change source volumes ownership +func ChangeHostPathOwnership(path string, recursive bool, uid, gid int) error { + return errors.New("windows not supported") +} diff --git a/vendor/github.com/containers/common/pkg/completion/completion.go b/vendor/github.com/containers/common/pkg/completion/completion.go new file mode 100644 index 00000000000..c90bf540b1b --- /dev/null +++ b/vendor/github.com/containers/common/pkg/completion/completion.go @@ -0,0 +1,155 @@ +package completion + +import ( + "bufio" + "os" + "strings" + "unicode" + + "github.com/containers/common/pkg/capabilities" + "github.com/spf13/cobra" +) + +// FlagCompletions - hold flag completion functions to be applied later with CompleteCommandFlags() +type FlagCompletions map[string]func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) + +// CompleteCommandFlags - Add completion functions for each flagname in FlagCompletions. +func CompleteCommandFlags(cmd *cobra.Command, flags FlagCompletions) { + for flagName, completionFunc := range flags { + _ = cmd.RegisterFlagCompletionFunc(flagName, completionFunc) + } +} + +/* Autocomplete Functions for cobra ValidArgsFunction */ + +// AutocompleteNone - Block the default shell completion (no paths) +func AutocompleteNone(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return nil, cobra.ShellCompDirectiveNoFileComp +} + +// AutocompleteDefault - Use the default shell completion, +// allows path completion. +func AutocompleteDefault(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return nil, cobra.ShellCompDirectiveDefault +} + +// AutocompleteCapabilities - Autocomplete linux capabilities options. +// Used by --cap-add and --cap-drop. +func AutocompleteCapabilities(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + caps := capabilities.AllCapabilities() + + // convertCase will convert a string to lowercase only if the user input is lowercase + convertCase := func(s string) string { return s } + if len(toComplete) > 0 && unicode.IsLower(rune(toComplete[0])) { + convertCase = strings.ToLower + } + + // offset is used to trim "CAP_" if the user doesn't type CA... or ca... + offset := 0 + if !strings.HasPrefix(toComplete, convertCase("CA")) { + // setting the offset to 4 is safe since each cap starts with CAP_ + offset = 4 + } + + var completions []string + for _, cap := range caps { + completions = append(completions, convertCase(cap)[offset:]) + } + + // add ALL here which is also a valid argument + completions = append(completions, convertCase(capabilities.All)) + return completions, cobra.ShellCompDirectiveNoFileComp +} + +// autocompleteSubIDName - autocomplete the names in /etc/subuid or /etc/subgid +func autocompleteSubIDName(filename string) ([]string, cobra.ShellCompDirective) { + file, err := os.Open(filename) + if err != nil { + return nil, cobra.ShellCompDirectiveError + } + defer file.Close() + + var names []string + scanner := bufio.NewScanner(file) + for scanner.Scan() { + name := strings.SplitN(scanner.Text(), ":", 2)[0] + names = append(names, name) + } + if err = scanner.Err(); err != nil { + return nil, cobra.ShellCompDirectiveError + } + + return names, cobra.ShellCompDirectiveNoFileComp +} + +// AutocompleteSubgidName - Autocomplete subgidname based on the names in the /etc/subgid file. +func AutocompleteSubgidName(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return autocompleteSubIDName("/etc/subgid") +} + +// AutocompleteSubuidName - Autocomplete subuidname based on the names in the /etc/subuid file. +func AutocompleteSubuidName(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return autocompleteSubIDName("/etc/subuid") +} + +// AutocompleteArch - Autocomplete platform supported by container engines +func AutocompletePlatform(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + completions := []string{ + "linux/386", + "linux/amd64", + "linux/arm", + "linux/arm64", + "linux/ppc64", + "linux/ppc64le", + "linux/mips", + "linux/mipsle", + "linux/mips64", + "linux/mips64le", + "linux/riscv64", + "linux/s390x", + "windows/386", + "windows/amd64", + "windows/arm", + } + return completions, cobra.ShellCompDirectiveNoFileComp +} + +// AutocompleteArch - Autocomplete architectures supported by container engines +func AutocompleteArch(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + completions := []string{ + "386", + "amd64", + "arm", + "arm64", + "ppc64", + "ppc64le", + "mips", + "mipsle", + "mips64", + "mips64le", + "riscv64", + "s390x", + } + + return completions, cobra.ShellCompDirectiveNoFileComp +} + +// AutocompleteOS - Autocomplete OS supported by container engines +func AutocompleteOS(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + completions := []string{"linux", "windows"} + return completions, cobra.ShellCompDirectiveNoFileComp +} + +// AutocompleteJSONFormat - Autocomplete format flag option. +// -> "json" +func AutocompleteJSONFormat(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return []string{"json"}, cobra.ShellCompDirectiveNoFileComp +} + +// AutocompleteOneArg - Autocomplete one random arg +func AutocompleteOneArg(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + if len(args) == 1 { + return nil, cobra.ShellCompDirectiveDefault + } + return nil, cobra.ShellCompDirectiveNoFileComp +} diff --git a/vendor/github.com/containers/common/pkg/config/config.go b/vendor/github.com/containers/common/pkg/config/config.go new file mode 100644 index 00000000000..dd30abcd632 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/config/config.go @@ -0,0 +1,1228 @@ +package config + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" + "sort" + "strings" + "sync" + + "github.com/BurntSushi/toml" + "github.com/containers/common/pkg/capabilities" + "github.com/containers/storage/pkg/unshare" + units "github.com/docker/go-units" + selinux "github.com/opencontainers/selinux/go-selinux" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const ( + // _configPath is the path to the containers/containers.conf + // inside a given config directory. + _configPath = "containers/containers.conf" + // DefaultContainersConfig holds the default containers config path + DefaultContainersConfig = "/usr/share/" + _configPath + // OverrideContainersConfig holds the default config path overridden by the root user + OverrideContainersConfig = "/etc/" + _configPath + // UserOverrideContainersConfig holds the containers config path overridden by the rootless user + UserOverrideContainersConfig = ".config/" + _configPath +) + +// RuntimeStateStore is a constant indicating which state store implementation +// should be used by engine +type RuntimeStateStore int + +const ( + // InvalidStateStore is an invalid state store + InvalidStateStore RuntimeStateStore = iota + // InMemoryStateStore is an in-memory state that will not persist data + // on containers and pods between engine instances or after system + // reboot + InMemoryStateStore RuntimeStateStore = iota + // SQLiteStateStore is a state backed by a SQLite database + // It is presently disabled + SQLiteStateStore RuntimeStateStore = iota + // BoltDBStateStore is a state backed by a BoltDB database + BoltDBStateStore RuntimeStateStore = iota +) + +// ProxyEnv is a list of Proxy Environment variables +var ProxyEnv = []string{ + "http_proxy", + "https_proxy", + "ftp_proxy", + "no_proxy", + "HTTP_PROXY", + "HTTPS_PROXY", + "FTP_PROXY", + "NO_PROXY", +} + +// Config contains configuration options for container tools +type Config struct { + // Containers specify settings that configure how containers will run ont the system + Containers ContainersConfig `toml:"containers"` + // Engine specifies how the container engine based on Engine will run + Engine EngineConfig `toml:"engine"` + // Machine specifies configurations of podman machine VMs + Machine MachineConfig `toml:"machine"` + // Network section defines the configuration of CNI Plugins + Network NetworkConfig `toml:"network"` + // Secret section defines configurations for the secret management + Secrets SecretConfig `toml:"secrets"` + // ConfigMap section defines configurations for the configmaps management + ConfigMaps ConfigMapConfig `toml:"configmaps"` +} + +// ContainersConfig represents the "containers" TOML config table +// containers global options for containers tools +type ContainersConfig struct { + + // Devices to add to all containers + Devices []string `toml:"devices,omitempty"` + + // Volumes to add to all containers + Volumes []string `toml:"volumes,omitempty"` + + // ApparmorProfile is the apparmor profile name which is used as the + // default for the runtime. + ApparmorProfile string `toml:"apparmor_profile,omitempty"` + + // Annotation to add to all containers + Annotations []string `toml:"annotations,omitempty"` + + // Default way to create a cgroup namespace for the container + CgroupNS string `toml:"cgroupns,omitempty"` + + // Default cgroup configuration + Cgroups string `toml:"cgroups,omitempty"` + + // Capabilities to add to all containers. + DefaultCapabilities []string `toml:"default_capabilities,omitempty"` + + // Sysctls to add to all containers. + DefaultSysctls []string `toml:"default_sysctls,omitempty"` + + // DefaultUlimits specifies the default ulimits to apply to containers + DefaultUlimits []string `toml:"default_ulimits,omitempty"` + + // DefaultMountsFile is the path to the default mounts file for testing + DefaultMountsFile string `toml:"-"` + + // DNSServers set default DNS servers. + DNSServers []string `toml:"dns_servers,omitempty"` + + // DNSOptions set default DNS options. + DNSOptions []string `toml:"dns_options,omitempty"` + + // DNSSearches set default DNS search domains. + DNSSearches []string `toml:"dns_searches,omitempty"` + + // EnableKeyring tells the container engines whether to create + // a kernel keyring for use within the container + EnableKeyring bool `toml:"keyring,omitempty"` + + // EnableLabeling tells the container engines whether to use MAC + // Labeling to separate containers (SELinux) + EnableLabeling bool `toml:"label,omitempty"` + + // Env is the environment variable list for container process. + Env []string `toml:"env,omitempty"` + + // EnvHost Pass all host environment variables into the container. + EnvHost bool `toml:"env_host,omitempty"` + + // HTTPProxy is the proxy environment variable list to apply to container process + HTTPProxy bool `toml:"http_proxy,omitempty"` + + // Init tells container runtimes whether to run init inside the + // container that forwards signals and reaps processes. + Init bool `toml:"init,omitempty"` + + // InitPath is the path for init to run if the Init bool is enabled + InitPath string `toml:"init_path,omitempty"` + + // IPCNS way to to create a ipc namespace for the container + IPCNS string `toml:"ipcns,omitempty"` + + // LogDriver for the container. For example: k8s-file and journald + LogDriver string `toml:"log_driver,omitempty"` + + // LogSizeMax is the maximum number of bytes after which the log file + // will be truncated. It can be expressed as a human-friendly string + // that is parsed to bytes. + // Negative values indicate that the log file won't be truncated. + LogSizeMax int64 `toml:"log_size_max,omitempty,omitzero"` + + // Specifies default format tag for container log messages. + // This is useful for creating a specific tag for container log messages. + // Containers logs default to truncated container ID as a tag. + LogTag string `toml:"log_tag,omitempty"` + + // NetNS indicates how to create a network namespace for the container + NetNS string `toml:"netns,omitempty"` + + // NoHosts tells container engine whether to create its own /etc/hosts + NoHosts bool `toml:"no_hosts,omitempty"` + + // PidsLimit is the number of processes each container is restricted to + // by the cgroup process number controller. + PidsLimit int64 `toml:"pids_limit,omitempty,omitzero"` + + // PidNS indicates how to create a pid namespace for the container + PidNS string `toml:"pidns,omitempty"` + + // Copy the content from the underlying image into the newly created + // volume when the container is created instead of when it is started. + // If false, the container engine will not copy the content until + // the container is started. Setting it to true may have negative + // performance implications. + PrepareVolumeOnCreate bool `toml:"prepare_volume_on_create,omitempty"` + + // SeccompProfile is the seccomp.json profile path which is used as the + // default for the runtime. + SeccompProfile string `toml:"seccomp_profile,omitempty"` + + // ShmSize holds the size of /dev/shm. + ShmSize string `toml:"shm_size,omitempty"` + + // TZ sets the timezone inside the container + TZ string `toml:"tz,omitempty"` + + // Umask is the umask inside the container. + Umask string `toml:"umask,omitempty"` + + // UTSNS indicates how to create a UTS namespace for the container + UTSNS string `toml:"utsns,omitempty"` + + // UserNS indicates how to create a User namespace for the container + UserNS string `toml:"userns,omitempty"` + + // UserNSSize how many UIDs to allocate for automatically created UserNS + UserNSSize int `toml:"userns_size,omitempty,omitzero"` +} + +// EngineConfig contains configuration options used to set up a engine runtime +type EngineConfig struct { + // CgroupCheck indicates the configuration has been rewritten after an + // upgrade to Fedora 31 to change the default OCI runtime for cgroupv2v2. + CgroupCheck bool `toml:"cgroup_check,omitempty"` + + // CGroupManager is the CGroup Manager to use Valid values are "cgroupfs" + // and "systemd". + CgroupManager string `toml:"cgroup_manager,omitempty"` + + // NOTE: when changing this struct, make sure to update (*Config).Merge(). + + // ConmonEnvVars are environment variables to pass to the Conmon binary + // when it is launched. + ConmonEnvVars []string `toml:"conmon_env_vars,omitempty"` + + // ConmonPath is the path to the Conmon binary used for managing containers. + // The first path pointing to a valid file will be used. + ConmonPath []string `toml:"conmon_path,omitempty"` + + // CompatAPIEnforceDockerHub enforces using docker.io for completing + // short names in Podman's compatibility REST API. Note that this will + // ignore unqualified-search-registries and short-name aliases defined + // in containers-registries.conf(5). + CompatAPIEnforceDockerHub bool `toml:"compat_api_enforce_docker_hub,omitempty"` + + // DetachKeys is the sequence of keys used to detach a container. + DetachKeys string `toml:"detach_keys,omitempty"` + + // EnablePortReservation determines whether engine will reserve ports on the + // host when they are forwarded to containers. When enabled, when ports are + // forwarded to containers, they are held open by conmon as long as the + // container is running, ensuring that they cannot be reused by other + // programs on the host. However, this can cause significant memory usage if + // a container has many ports forwarded to it. Disabling this can save + // memory. + EnablePortReservation bool `toml:"enable_port_reservation,omitempty"` + + // Environment variables to be used when running the container engine (e.g., Podman, Buildah). For example "http_proxy=internal.proxy.company.com" + Env []string `toml:"env,omitempty"` + + // EventsLogFilePath is where the events log is stored. + EventsLogFilePath string `toml:"events_logfile_path,omitempty"` + + // EventsLogger determines where events should be logged. + EventsLogger string `toml:"events_logger,omitempty"` + + // graphRoot internal stores the location of the graphroot + graphRoot string + + // HelperBinariesDir is a list of directories which are used to search for + // helper binaries. + HelperBinariesDir []string `toml:"helper_binaries_dir"` + + // configuration files. When the same filename is present in in + // multiple directories, the file in the directory listed last in + // this slice takes precedence. + HooksDir []string `toml:"hooks_dir,omitempty"` + + // ImageBuildFormat (DEPRECATED) indicates the default image format to + // building container images. Should use ImageDefaultFormat + ImageBuildFormat string `toml:"image_build_format,omitempty"` + + // ImageDefaultTransport is the default transport method used to fetch + // images. + ImageDefaultTransport string `toml:"image_default_transport,omitempty"` + + // ImageParallelCopies indicates the maximum number of image layers + // to be copied simultaneously. If this is zero, container engines + // will fall back to containers/image defaults. + ImageParallelCopies uint `toml:"image_parallel_copies,omitempty,omitzero"` + + // ImageDefaultFormat specified the manifest Type (oci, v2s2, or v2s1) + // to use when pulling, pushing, building container images. By default + // image pulled and pushed match the format of the source image. + // Building/committing defaults to OCI. + ImageDefaultFormat string `toml:"image_default_format,omitempty"` + + // InfraCommand is the command run to start up a pod infra container. + InfraCommand string `toml:"infra_command,omitempty"` + + // InfraImage is the image a pod infra container will use to manage + // namespaces. + InfraImage string `toml:"infra_image,omitempty"` + + // InitPath is the path to the container-init binary. + InitPath string `toml:"init_path,omitempty"` + + // LockType is the type of locking to use. + LockType string `toml:"lock_type,omitempty"` + + // MachineEnabled indicates if Podman is running in a podman-machine VM + MachineEnabled bool `toml:"machine_enabled,omitempty"` + + // MultiImageArchive - if true, the container engine allows for storing + // archives (e.g., of the docker-archive transport) with multiple + // images. By default, Podman creates single-image archives. + MultiImageArchive bool `toml:"multi_image_archive,omitempty"` + + // Namespace is the engine namespace to use. Namespaces are used to create + // scopes to separate containers and pods in the state. When namespace is + // set, engine will only view containers and pods in the same namespace. All + // containers and pods created will default to the namespace set here. A + // namespace of "", the empty string, is equivalent to no namespace, and all + // containers and pods will be visible. The default namespace is "". + Namespace string `toml:"namespace,omitempty"` + + // NetworkCmdPath is the path to the slirp4netns binary. + NetworkCmdPath string `toml:"network_cmd_path,omitempty"` + + // NetworkCmdOptions is the default options to pass to the slirp4netns binary. + // For example "allow_host_loopback=true" + NetworkCmdOptions []string `toml:"network_cmd_options,omitempty"` + + // NoPivotRoot sets whether to set no-pivot-root in the OCI runtime. + NoPivotRoot bool `toml:"no_pivot_root,omitempty"` + + // NumLocks is the number of locks to make available for containers and + // pods. + NumLocks uint32 `toml:"num_locks,omitempty,omitzero"` + + // OCIRuntime is the OCI runtime to use. + OCIRuntime string `toml:"runtime,omitempty"` + + // OCIRuntimes are the set of configured OCI runtimes (default is runc). + OCIRuntimes map[string][]string `toml:"runtimes,omitempty"` + + // PullPolicy determines whether to pull image before creating or running a container + // default is "missing" + PullPolicy string `toml:"pull_policy,omitempty"` + + // Indicates whether the application should be running in Remote mode + Remote bool `toml:"remote,omitempty"` + + // RemoteURI is deprecated, see ActiveService + // RemoteURI containers connection information used to connect to remote system. + RemoteURI string `toml:"remote_uri,omitempty"` + + // RemoteIdentity is deprecated, ServiceDestinations + // RemoteIdentity key file for RemoteURI + RemoteIdentity string `toml:"remote_identity,omitempty"` + + // ActiveService index to Destinations added v2.0.3 + ActiveService string `toml:"active_service,omitempty"` + + // ServiceDestinations mapped by service Names + ServiceDestinations map[string]Destination `toml:"service_destinations,omitempty"` + + // RuntimePath is the path to OCI runtime binary for launching containers. + // The first path pointing to a valid file will be used This is used only + // when there are no OCIRuntime/OCIRuntimes defined. It is used only to be + // backward compatible with older versions of Podman. + RuntimePath []string `toml:"runtime_path,omitempty"` + + // RuntimeSupportsJSON is the list of the OCI runtimes that support + // --format=json. + RuntimeSupportsJSON []string `toml:"runtime_supports_json,omitempty"` + + // RuntimeSupportsNoCgroups is a list of OCI runtimes that support + // running containers without CGroups. + RuntimeSupportsNoCgroups []string `toml:"runtime_supports_nocgroup,omitempty"` + + // RuntimeSupportsKVM is a list of OCI runtimes that support + // KVM separation for containers. + RuntimeSupportsKVM []string `toml:"runtime_supports_kvm,omitempty"` + + // SetOptions contains a subset of config options. It's used to indicate if + // a given option has either been set by the user or by the parsed + // configuration file. If not, the corresponding option might be + // overwritten by values from the database. This behavior guarantees + // backwards compat with older version of libpod and Podman. + SetOptions + + // SignaturePolicyPath is the path to a signature policy to use for + // validating images. If left empty, the containers/image default signature + // policy will be used. + SignaturePolicyPath string `toml:"-"` + + // SDNotify tells container engine to allow containers to notify the host systemd of + // readiness using the SD_NOTIFY mechanism. + SDNotify bool `toml:"-"` + + // StateType is the type of the backing state store. Avoid using multiple + // values for this with the same containers/storage configuration on the + // same system. Different state types do not interact, and each will see a + // separate set of containers, which may cause conflicts in + // containers/storage. As such this is not exposed via the config file. + StateType RuntimeStateStore `toml:"-"` + + // ServiceTimeout is the number of seconds to wait without a connection + // before the `podman system service` times out and exits + ServiceTimeout uint `toml:"service_timeout,omitempty,omitzero"` + + // StaticDir is the path to a persistent directory to store container + // files. + StaticDir string `toml:"static_dir,omitempty"` + + // StopTimeout is the number of seconds to wait for container to exit + // before sending kill signal. + StopTimeout uint `toml:"stop_timeout,omitempty,omitzero"` + + // ImageCopyTmpDir is the default location for storing temporary + // container image content, Can be overridden with the TMPDIR + // environment variable. If you specify "storage", then the + // location of the container/storage tmp directory will be used. + ImageCopyTmpDir string `toml:"image_copy_tmp_dir,omitempty"` + + // TmpDir is the path to a temporary directory to store per-boot container + // files. Must be stored in a tmpfs. + TmpDir string `toml:"tmp_dir,omitempty"` + + // VolumePath is the default location that named volumes will be created + // under. This convention is followed by the default volume driver, but + // may not be by other drivers. + VolumePath string `toml:"volume_path,omitempty"` + + // VolumePlugins is a set of plugins that can be used as the backend for + // Podman named volumes. Each volume is specified as a name (what Podman + // will refer to the plugin as) mapped to a path, which must point to a + // Unix socket that conforms to the Volume Plugin specification. + VolumePlugins map[string]string `toml:"volume_plugins,omitempty"` + + // ChownCopiedFiles tells the container engine whether to chown files copied + // into a container to the container's primary uid/gid. + ChownCopiedFiles bool `toml:"chown_copied_files,omitempty"` + + // CompressionFormat is the compression format used to compress image layers. + CompressionFormat string `toml:"compression_format,omitempty"` +} + +// SetOptions contains a subset of options in a Config. It's used to indicate if +// a given option has either been set by the user or by a parsed engine +// configuration file. If not, the corresponding option might be overwritten by +// values from the database. This behavior guarantees backwards compat with +// older version of libpod and Podman. +type SetOptions struct { + // StorageConfigRunRootSet indicates if the RunRoot has been explicitly set + // by the config or by the user. It's required to guarantee backwards + // compatibility with older versions of libpod for which we must query the + // database configuration. Not included in the on-disk config. + StorageConfigRunRootSet bool `toml:"-"` + + // StorageConfigGraphRootSet indicates if the RunRoot has been explicitly + // set by the config or by the user. It's required to guarantee backwards + // compatibility with older versions of libpod for which we must query the + // database configuration. Not included in the on-disk config. + StorageConfigGraphRootSet bool `toml:"-"` + + // StorageConfigGraphDriverNameSet indicates if the GraphDriverName has been + // explicitly set by the config or by the user. It's required to guarantee + // backwards compatibility with older versions of libpod for which we must + // query the database configuration. Not included in the on-disk config. + StorageConfigGraphDriverNameSet bool `toml:"-"` + + // StaticDirSet indicates if the StaticDir has been explicitly set by the + // config or by the user. It's required to guarantee backwards compatibility + // with older versions of libpod for which we must query the database + // configuration. Not included in the on-disk config. + StaticDirSet bool `toml:"-"` + + // VolumePathSet indicates if the VolumePath has been explicitly set by the + // config or by the user. It's required to guarantee backwards compatibility + // with older versions of libpod for which we must query the database + // configuration. Not included in the on-disk config. + VolumePathSet bool `toml:"-"` + + // TmpDirSet indicates if the TmpDir has been explicitly set by the config + // or by the user. It's required to guarantee backwards compatibility with + // older versions of libpod for which we must query the database + // configuration. Not included in the on-disk config. + TmpDirSet bool `toml:"-"` +} + +// NetworkConfig represents the "network" TOML config table +type NetworkConfig struct { + // NetworkBackend determines what backend should be used for Podman's + // networking. + NetworkBackend string `toml:"network_backend,omitempty"` + + // CNIPluginDirs is where CNI plugin binaries are stored. + CNIPluginDirs []string `toml:"cni_plugin_dirs,omitempty"` + + // DefaultNetwork is the network name of the default CNI network + // to attach pods to. + DefaultNetwork string `toml:"default_network,omitempty"` + + // DefaultSubnet is the subnet to be used for the default CNI network. + // If a network with the name given in DefaultNetwork is not present + // then a new network using this subnet will be created. + // Must be a valid IPv4 CIDR block. + DefaultSubnet string `toml:"default_subnet,omitempty"` + + // NetworkConfigDir is where CNI network configuration files are stored. + NetworkConfigDir string `toml:"network_config_dir,omitempty"` +} + +// SecretConfig represents the "secret" TOML config table +type SecretConfig struct { + // Driver specifies the secret driver to use. + // Current valid value: + // * file + // * pass + Driver string `toml:"driver,omitempty"` + // Opts contains driver specific options + Opts map[string]string `toml:"opts,omitempty"` +} + +// ConfigMapConfig represents the "configmap" TOML config table +type ConfigMapConfig struct { + // Driver specifies the configmap driver to use. + // Current valid value: + // * file + // * pass + Driver string `toml:"driver,omitempty"` + // Opts contains driver specific options + Opts map[string]string `toml:"opts,omitempty"` +} + +// MachineConfig represents the "machine" TOML config table +type MachineConfig struct { + // Number of CPU's a machine is created with. + CPUs uint64 `toml:"cpus,omitempty,omitzero"` + // DiskSize is the size of the disk in GB created when init-ing a podman-machine VM + DiskSize uint64 `toml:"disk_size,omitempty,omitzero"` + // MachineImage is the image used when init-ing a podman-machine VM + Image string `toml:"image,omitempty"` + // Memory in MB a machine is created with. + Memory uint64 `toml:"memory,omitempty,omitzero"` + // Username to use for rootless podman when init-ing a podman machine VM + User string `toml:"user,omitempty"` +} + +// Destination represents destination for remote service +type Destination struct { + // URI, required. Example: ssh://root@example.com:22/run/podman/podman.sock + URI string `toml:"uri"` + + // Identity file with ssh key, optional + Identity string `toml:"identity,omitempty"` +} + +// NewConfig creates a new Config. It starts with an empty config and, if +// specified, merges the config at `userConfigPath` path. Depending if we're +// running as root or rootless, we then merge the system configuration followed +// by merging the default config (hard-coded default in memory). +// Note that the OCI runtime is hard-set to `crun` if we're running on a system +// with cgroupv2v2. Other OCI runtimes are not yet supporting cgroupv2v2. This +// might change in the future. +func NewConfig(userConfigPath string) (*Config, error) { + + // Generate the default config for the system + config, err := DefaultConfig() + if err != nil { + return nil, err + } + + // Now, gather the system configs and merge them as needed. + configs, err := systemConfigs() + if err != nil { + return nil, errors.Wrap(err, "finding config on system") + } + for _, path := range configs { + // Merge changes in later configs with the previous configs. + // Each config file that specified fields, will override the + // previous fields. + if err = readConfigFromFile(path, config); err != nil { + return nil, errors.Wrapf(err, "reading system config %q", path) + } + logrus.Debugf("Merged system config %q", path) + logrus.Tracef("%+v", config) + } + + // If the caller specified a config path to use, then we read it to + // override the system defaults. + if userConfigPath != "" { + var err error + // readConfigFromFile reads in container config in the specified + // file and then merge changes with the current default. + if err = readConfigFromFile(userConfigPath, config); err != nil { + return nil, errors.Wrapf(err, "reading user config %q", userConfigPath) + } + logrus.Debugf("Merged user config %q", userConfigPath) + logrus.Tracef("%+v", config) + } + config.addCAPPrefix() + + if err := config.Validate(); err != nil { + return nil, err + } + + if err := config.setupEnv(); err != nil { + return nil, err + } + + return config, nil +} + +// readConfigFromFile reads the specified config file at `path` and attempts to +// unmarshal its content into a Config. The config param specifies the previous +// default config. If the path, only specifies a few fields in the Toml file +// the defaults from the config parameter will be used for all other fields. +func readConfigFromFile(path string, config *Config) error { + logrus.Tracef("Reading configuration file %q", path) + meta, err := toml.DecodeFile(path, config) + if err != nil { + return errors.Wrapf(err, "decode configuration %v", path) + } + keys := meta.Undecoded() + if len(keys) > 0 { + logrus.Debugf("Failed to decode the keys %q from %q.", keys, path) + } + + return nil +} + +// addConfigs will search one level in the config dirPath for config files +// If the dirPath does not exist, addConfigs will return nil +func addConfigs(dirPath string, configs []string) ([]string, error) { + newConfigs := []string{} + + err := filepath.Walk(dirPath, + // WalkFunc to read additional configs + func(path string, info os.FileInfo, err error) error { + switch { + case err != nil: + // return error (could be a permission problem) + return err + case info == nil: + // this should only happen when err != nil but let's be sure + return nil + case info.IsDir(): + if path != dirPath { + // make sure to not recurse into sub-directories + return filepath.SkipDir + } + // ignore directories + return nil + default: + // only add *.conf files + if strings.HasSuffix(path, ".conf") { + newConfigs = append(newConfigs, path) + } + return nil + } + }, + ) + if os.IsNotExist(err) { + err = nil + } + sort.Strings(newConfigs) + return append(configs, newConfigs...), err +} + +// Returns the list of configuration files, if they exist in order of hierarchy. +// The files are read in order and each new file can/will override previous +// file settings. +func systemConfigs() ([]string, error) { + var err error + configs := []string{} + path := os.Getenv("CONTAINERS_CONF") + if path != "" { + if _, err := os.Stat(path); err != nil { + return nil, errors.Wrap(err, "CONTAINERS_CONF file") + } + return append(configs, path), nil + } + if _, err := os.Stat(DefaultContainersConfig); err == nil { + configs = append(configs, DefaultContainersConfig) + } + if _, err := os.Stat(OverrideContainersConfig); err == nil { + configs = append(configs, OverrideContainersConfig) + } + configs, err = addConfigs(OverrideContainersConfig+".d", configs) + if err != nil { + return nil, err + } + + path, err = ifRootlessConfigPath() + if err != nil { + return nil, err + } + if path != "" { + if _, err := os.Stat(path); err == nil { + configs = append(configs, path) + } + configs, err = addConfigs(path+".d", configs) + if err != nil { + return nil, err + } + } + return configs, nil +} + +// CheckCgroupsAndAdjustConfig checks if we're running rootless with the systemd +// cgroup manager. In case the user session isn't available, we're switching the +// cgroup manager to cgroupfs. Note, this only applies to rootless. +func (c *Config) CheckCgroupsAndAdjustConfig() { + if !unshare.IsRootless() || c.Engine.CgroupManager != SystemdCgroupsManager { + return + } + + session := os.Getenv("DBUS_SESSION_BUS_ADDRESS") + hasSession := session != "" + if hasSession { + for _, part := range strings.Split(session, ",") { + if strings.HasPrefix(part, "unix:path=") { + _, err := os.Stat(strings.TrimPrefix(part, "unix:path=")) + hasSession = err == nil + break + } + } + } + + if !hasSession && unshare.GetRootlessUID() != 0 { + logrus.Warningf("The cgroupv2 manager is set to systemd but there is no systemd user session available") + logrus.Warningf("For using systemd, you may need to login using an user session") + logrus.Warningf("Alternatively, you can enable lingering with: `loginctl enable-linger %d` (possibly as root)", unshare.GetRootlessUID()) + logrus.Warningf("Falling back to --cgroup-manager=cgroupfs") + c.Engine.CgroupManager = CgroupfsCgroupsManager + } +} + +func (c *Config) addCAPPrefix() { + toCAPPrefixed := func(cap string) string { + if !strings.HasPrefix(strings.ToLower(cap), "cap_") { + return "CAP_" + strings.ToUpper(cap) + } + return cap + } + for i, cap := range c.Containers.DefaultCapabilities { + c.Containers.DefaultCapabilities[i] = toCAPPrefixed(cap) + } +} + +// Validate is the main entry point for library configuration validation. +func (c *Config) Validate() error { + + if err := c.Containers.Validate(); err != nil { + return errors.Wrap(err, "validating containers config") + } + + if !c.Containers.EnableLabeling { + selinux.SetDisabled() + } + + if err := c.Engine.Validate(); err != nil { + return errors.Wrap(err, "validating engine configs") + } + + if err := c.Network.Validate(); err != nil { + return errors.Wrap(err, "validating network configs") + } + + return nil +} + +func (c *EngineConfig) findRuntime() string { + // Search for crun first followed by runc, kata, runsc + for _, name := range []string{"crun", "runc", "kata", "runsc"} { + for _, v := range c.OCIRuntimes[name] { + if _, err := os.Stat(v); err == nil { + return name + } + } + if path, err := exec.LookPath(name); err == nil { + logrus.Debugf("Found default OCI runtime %s path via PATH environment variable", path) + return name + } + } + return "" +} + +// Validate is the main entry point for Engine configuration validation +// It returns an `error` on validation failure, otherwise +// `nil`. +func (c *EngineConfig) Validate() error { + if err := c.validatePaths(); err != nil { + return err + } + + // Check if the pullPolicy from containers.conf is valid + // if it is invalid returns the error + pullPolicy := strings.ToLower(c.PullPolicy) + if _, err := ValidatePullPolicy(pullPolicy); err != nil { + return errors.Wrapf(err, "invalid pull type from containers.conf %q", c.PullPolicy) + } + return nil +} + +// Validate is the main entry point for containers configuration validation +// It returns an `error` on validation failure, otherwise +// `nil`. +func (c *ContainersConfig) Validate() error { + + if err := c.validateUlimits(); err != nil { + return err + } + + if err := c.validateDevices(); err != nil { + return err + } + + if err := c.validateTZ(); err != nil { + return err + } + + if err := c.validateUmask(); err != nil { + return err + } + + if c.LogSizeMax >= 0 && c.LogSizeMax < OCIBufSize { + return errors.Errorf("log size max should be negative or >= %d", OCIBufSize) + } + + if _, err := units.FromHumanSize(c.ShmSize); err != nil { + return errors.Errorf("invalid --shm-size %s, %q", c.ShmSize, err) + } + + return nil +} + +// Validate is the main entry point for network configuration validation. +// The parameter `onExecution` specifies if the validation should include +// execution checks. It returns an `error` on validation failure, otherwise +// `nil`. +func (c *NetworkConfig) Validate() error { + if stringsEq(c.CNIPluginDirs, DefaultCNIPluginDirs) { + return nil + } + + for _, pluginDir := range c.CNIPluginDirs { + if err := isDirectory(pluginDir); err == nil { + return nil + } + } + + return errors.Errorf("invalid cni_plugin_dirs: %s", strings.Join(c.CNIPluginDirs, ",")) +} + +// FindConmon iterates over (*Config).ConmonPath and returns the path +// to first (version) matching conmon binary. If non is found, we try +// to do a path lookup of "conmon". +func (c *Config) FindConmon() (string, error) { + foundOutdatedConmon := false + for _, path := range c.Engine.ConmonPath { + stat, err := os.Stat(path) + if err != nil { + continue + } + if stat.IsDir() { + continue + } + if err := probeConmon(path); err != nil { + logrus.Warnf("Conmon at %s invalid: %v", path, err) + foundOutdatedConmon = true + continue + } + logrus.Debugf("Using conmon: %q", path) + return path, nil + } + + // Search the $PATH as last fallback + if path, err := exec.LookPath("conmon"); err == nil { + if err := probeConmon(path); err != nil { + logrus.Warnf("Conmon at %s is invalid: %v", path, err) + foundOutdatedConmon = true + } else { + logrus.Debugf("Using conmon from $PATH: %q", path) + return path, nil + } + } + + if foundOutdatedConmon { + return "", errors.Wrapf(ErrConmonOutdated, + "please update to v%d.%d.%d or later", + _conmonMinMajorVersion, _conmonMinMinorVersion, _conmonMinPatchVersion) + } + + return "", errors.Wrapf(ErrInvalidArg, + "could not find a working conmon binary (configured options: %v)", + c.Engine.ConmonPath) +} + +// GetDefaultEnv returns the environment variables for the container. +// It will check the HTTPProxy and HostEnv booleans and add the appropriate +// environment variables to the container. +func (c *Config) GetDefaultEnv() []string { + return c.GetDefaultEnvEx(c.Containers.EnvHost, c.Containers.HTTPProxy) +} + +// GetDefaultEnvEx returns the environment variables for the container. +// It will check the HTTPProxy and HostEnv boolean parameters and return the appropriate +// environment variables for the container. +func (c *Config) GetDefaultEnvEx(envHost, httpProxy bool) []string { + var env []string + if envHost { + env = append(env, os.Environ()...) + } else if httpProxy { + for _, p := range ProxyEnv { + if val, ok := os.LookupEnv(p); ok { + env = append(env, fmt.Sprintf("%s=%s", p, val)) + } + } + } + return append(env, c.Containers.Env...) +} + +// Capabilities returns the capabilities parses the Add and Drop capability +// list from the default capabiltiies for the container +func (c *Config) Capabilities(user string, addCapabilities, dropCapabilities []string) ([]string, error) { + + userNotRoot := func(user string) bool { + if user == "" || user == "root" || user == "0" { + return false + } + return true + } + + defaultCapabilities := c.Containers.DefaultCapabilities + if userNotRoot(user) { + defaultCapabilities = []string{} + } + + return capabilities.MergeCapabilities(defaultCapabilities, addCapabilities, dropCapabilities) +} + +// Device parses device mapping string to a src, dest & permissions string +// Valid values for device looklike: +// '/dev/sdc" +// '/dev/sdc:/dev/xvdc" +// '/dev/sdc:/dev/xvdc:rwm" +// '/dev/sdc:rm" +func Device(device string) (src, dst, permissions string, err error) { + permissions = "rwm" + split := strings.Split(device, ":") + switch len(split) { + case 3: + if !IsValidDeviceMode(split[2]) { + return "", "", "", errors.Errorf("invalid device mode: %s", split[2]) + } + permissions = split[2] + fallthrough + case 2: + if IsValidDeviceMode(split[1]) { + permissions = split[1] + } else { + if split[1] == "" || split[1][0] != '/' { + return "", "", "", errors.Errorf("invalid device mode: %s", split[1]) + } + dst = split[1] + } + fallthrough + case 1: + if !strings.HasPrefix(split[0], "/dev/") { + return "", "", "", errors.Errorf("invalid device mode: %s", split[0]) + } + src = split[0] + default: + return "", "", "", errors.Errorf("invalid device specification: %s", device) + } + + if dst == "" { + dst = src + } + return src, dst, permissions, nil +} + +// IsValidDeviceMode checks if the mode for device is valid or not. +// IsValid mode is a composition of r (read), w (write), and m (mknod). +func IsValidDeviceMode(mode string) bool { + var legalDeviceMode = map[rune]bool{ + 'r': true, + 'w': true, + 'm': true, + } + if mode == "" { + return false + } + for _, c := range mode { + if !legalDeviceMode[c] { + return false + } + legalDeviceMode[c] = false + } + return true +} + +// resolveHomeDir converts a path referencing the home directory via "~" +// to an absolute path +func resolveHomeDir(path string) (string, error) { + // check if the path references the home dir to avoid work + // don't use strings.HasPrefix(path, "~") as this doesn't match "~" alone + // use strings.HasPrefix(...) to not match "something/~/something" + if !(path == "~" || strings.HasPrefix(path, "~/")) { + // path does not reference home dir -> Nothing to do + return path, nil + } + + // only get HomeDir when necessary + home, err := unshare.HomeDir() + if err != nil { + return "", err + } + + // replace the first "~" (start of path) with the HomeDir to resolve "~" + return strings.Replace(path, "~", home, 1), nil +} + +func rootlessConfigPath() (string, error) { + if configHome := os.Getenv("XDG_CONFIG_HOME"); configHome != "" { + return filepath.Join(configHome, _configPath), nil + } + home, err := unshare.HomeDir() + if err != nil { + return "", err + } + + return filepath.Join(home, UserOverrideContainersConfig), nil +} + +func stringsEq(a, b []string) bool { + + if len(a) != len(b) { + return false + } + + for i := range a { + if a[i] != b[i] { + return false + } + } + + return true +} + +var ( + configErr error + configMutex sync.Mutex + config *Config +) + +// Default returns the default container config. +// Configuration files will be read in the following files: +// * /usr/share/containers/containers.conf +// * /etc/containers/containers.conf +// * $HOME/.config/containers/containers.conf # When run in rootless mode +// Fields in latter files override defaults set in previous files and the +// default config. +// None of these files are required, and not all fields need to be specified +// in each file, only the fields you want to override. +// The system defaults container config files can be overwritten using the +// CONTAINERS_CONF environment variable. This is usually done for testing. +func Default() (*Config, error) { + configMutex.Lock() + defer configMutex.Unlock() + if config != nil || configErr != nil { + return config, configErr + } + return defConfig() +} + +func defConfig() (*Config, error) { + config, configErr = NewConfig("") + return config, configErr +} + +func Path() string { + if path := os.Getenv("CONTAINERS_CONF"); path != "" { + return path + } + if unshare.IsRootless() { + if rpath, err := rootlessConfigPath(); err == nil { + return rpath + } + return "$HOME/" + UserOverrideContainersConfig + } + return OverrideContainersConfig +} + +// ReadCustomConfig reads the custom config and only generates a config based on it +// If the custom config file does not exists, function will return an empty config +func ReadCustomConfig() (*Config, error) { + path, err := customConfigFile() + if err != nil { + return nil, err + } + newConfig := &Config{} + if _, err := os.Stat(path); err == nil { + if err := readConfigFromFile(path, newConfig); err != nil { + return nil, err + } + } else { + if !os.IsNotExist(err) { + return nil, err + } + } + return newConfig, nil +} + +// Write writes the configuration to the default file +func (c *Config) Write() error { + var err error + path, err := customConfigFile() + if err != nil { + return err + } + if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + return err + } + configFile, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0644) + if err != nil { + return err + } + defer configFile.Close() + enc := toml.NewEncoder(configFile) + if err := enc.Encode(c); err != nil { + return err + } + return nil +} + +// Reload clean the cached config and reloads the configuration from containers.conf files +// This function is meant to be used for long-running processes that need to reload potential changes made to +// the cached containers.conf files. +func Reload() (*Config, error) { + configMutex.Lock() + defer configMutex.Unlock() + return defConfig() +} + +func (c *Config) ActiveDestination() (uri, identity string, err error) { + if uri, found := os.LookupEnv("CONTAINER_HOST"); found { + if v, found := os.LookupEnv("CONTAINER_SSHKEY"); found { + identity = v + } + return uri, identity, nil + } + connEnv := os.Getenv("CONTAINER_CONNECTION") + switch { + case connEnv != "": + d, found := c.Engine.ServiceDestinations[connEnv] + if !found { + return "", "", errors.Errorf("environment variable CONTAINER_CONNECTION=%q service destination not found", connEnv) + } + return d.URI, d.Identity, nil + + case c.Engine.ActiveService != "": + d, found := c.Engine.ServiceDestinations[c.Engine.ActiveService] + if !found { + return "", "", errors.Errorf("%q service destination not found", c.Engine.ActiveService) + } + return d.URI, d.Identity, nil + case c.Engine.RemoteURI != "": + return c.Engine.RemoteURI, c.Engine.RemoteIdentity, nil + } + return "", "", errors.New("no service destination configured") +} + +// FindHelperBinary will search the given binary name in the configured directories. +// If searchPATH is set to true it will also search in $PATH. +func (c *Config) FindHelperBinary(name string, searchPATH bool) (string, error) { + dir_list := c.Engine.HelperBinariesDir + + // If set, search this directory first. This is used in testing. + if dir, found := os.LookupEnv("CONTAINERS_HELPER_BINARY_DIR"); found { + dir_list = append([]string{dir}, dir_list...) + } + + for _, path := range dir_list { + fullpath := filepath.Join(path, name) + if fi, err := os.Stat(fullpath); err == nil && fi.Mode().IsRegular() { + return fullpath, nil + } + } + if searchPATH { + return exec.LookPath(name) + } + configHint := "To resolve this error, set the helper_binaries_dir key in the `[engine]` section of containers.conf to the directory containing your helper binaries." + if len(c.Engine.HelperBinariesDir) == 0 { + return "", errors.Errorf("could not find %q because there are no helper binary directories configured. %s", name, configHint) + } + return "", errors.Errorf("could not find %q in one of %v. %s", name, c.Engine.HelperBinariesDir, configHint) +} + +// ImageCopyTmpDir default directory to store temporary image files during copy +func (c *Config) ImageCopyTmpDir() (string, error) { + if path, found := os.LookupEnv("TMPDIR"); found { + return path, nil + } + switch c.Engine.ImageCopyTmpDir { + case "": + return "", nil + case "storage": + return filepath.Join(c.Engine.graphRoot, "tmp"), nil + default: + if filepath.IsAbs(c.Engine.ImageCopyTmpDir) { + return c.Engine.ImageCopyTmpDir, nil + } + } + + return "", errors.Errorf("invalid image_copy_tmp_dir value %q (relative paths are not accepted)", c.Engine.ImageCopyTmpDir) +} + +// setupEnv sets the environment variables for the engine +func (c *Config) setupEnv() error { + for _, env := range c.Engine.Env { + splitEnv := strings.SplitN(env, "=", 2) + if len(splitEnv) != 2 { + logrus.Warnf("invalid environment variable for engine %s, valid configuration is KEY=value pair", env) + continue + } + // skip if the env is already defined + if _, ok := os.LookupEnv(splitEnv[0]); ok { + logrus.Debugf("environment variable %s is already defined, skip the settings from containers.conf", splitEnv[0]) + continue + } + if err := os.Setenv(splitEnv[0], splitEnv[1]); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/containers/common/pkg/config/config_darwin.go b/vendor/github.com/containers/common/pkg/config/config_darwin.go new file mode 100644 index 00000000000..5abb51f30cd --- /dev/null +++ b/vendor/github.com/containers/common/pkg/config/config_darwin.go @@ -0,0 +1,30 @@ +package config + +import ( + "os" +) + +// podman remote clients on darwin cannot use unshare.isRootless() to determine the configuration file locations. +func customConfigFile() (string, error) { + if path, found := os.LookupEnv("CONTAINERS_CONF"); found { + return path, nil + } + return rootlessConfigPath() +} + +func ifRootlessConfigPath() (string, error) { + return rootlessConfigPath() +} + +var defaultHelperBinariesDir = []string{ + // Homebrew install paths + "/usr/local/opt/podman/libexec", + "/opt/homebrew/bin", + "/opt/homebrew/opt/podman/libexec", + "/usr/local/bin", + // default paths + "/usr/local/libexec/podman", + "/usr/local/lib/podman", + "/usr/libexec/podman", + "/usr/lib/podman", +} diff --git a/vendor/github.com/containers/common/pkg/config/config_linux.go b/vendor/github.com/containers/common/pkg/config/config_linux.go new file mode 100644 index 00000000000..da0ae871a81 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/config/config_linux.go @@ -0,0 +1,44 @@ +package config + +import ( + "os" + + "github.com/containers/storage/pkg/unshare" + selinux "github.com/opencontainers/selinux/go-selinux" +) + +func selinuxEnabled() bool { + return selinux.GetEnabled() +} + +func customConfigFile() (string, error) { + if path, found := os.LookupEnv("CONTAINERS_CONF"); found { + return path, nil + } + if unshare.IsRootless() { + path, err := rootlessConfigPath() + if err != nil { + return "", err + } + return path, nil + } + return OverrideContainersConfig, nil +} + +func ifRootlessConfigPath() (string, error) { + if unshare.IsRootless() { + path, err := rootlessConfigPath() + if err != nil { + return "", err + } + return path, nil + } + return "", nil +} + +var defaultHelperBinariesDir = []string{ + "/usr/local/libexec/podman", + "/usr/local/lib/podman", + "/usr/libexec/podman", + "/usr/lib/podman", +} diff --git a/vendor/github.com/containers/common/pkg/config/config_local.go b/vendor/github.com/containers/common/pkg/config/config_local.go new file mode 100644 index 00000000000..21dab043f87 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/config/config_local.go @@ -0,0 +1,115 @@ +// +build !remote + +package config + +import ( + "os" + "path/filepath" + "regexp" + "strings" + "syscall" + + units "github.com/docker/go-units" + "github.com/pkg/errors" +) + +// isDirectory tests whether the given path exists and is a directory. It +// follows symlinks. +func isDirectory(path string) error { + path, err := resolveHomeDir(path) + if err != nil { + return err + } + + info, err := os.Stat(path) + if err != nil { + return err + } + + if !info.Mode().IsDir() { + // Return a PathError to be consistent with os.Stat(). + return &os.PathError{ + Op: "stat", + Path: path, + Err: syscall.ENOTDIR, + } + } + + return nil +} + +func (c *EngineConfig) validatePaths() error { + // Relative paths can cause nasty bugs, because core paths we use could + // shift between runs or even parts of the program. - The OCI runtime + // uses a different working directory than we do, for example. + if c.StaticDir != "" && !filepath.IsAbs(c.StaticDir) { + return errors.Errorf("static directory must be an absolute path - instead got %q", c.StaticDir) + } + if c.TmpDir != "" && !filepath.IsAbs(c.TmpDir) { + return errors.Errorf("temporary directory must be an absolute path - instead got %q", c.TmpDir) + } + if c.VolumePath != "" && !filepath.IsAbs(c.VolumePath) { + return errors.Errorf("volume path must be an absolute path - instead got %q", c.VolumePath) + } + return nil +} + +func (c *ContainersConfig) validateDevices() error { + for _, d := range c.Devices { + _, _, _, err := Device(d) + if err != nil { + return err + } + } + return nil +} + +func (c *ContainersConfig) validateUlimits() error { + for _, u := range c.DefaultUlimits { + ul, err := units.ParseUlimit(u) + if err != nil { + return errors.Wrapf(err, "unrecognized ulimit %s", u) + } + _, err = ul.GetRlimit() + if err != nil { + return err + } + } + return nil +} + +func (c *ContainersConfig) validateTZ() error { + if c.TZ == "local" || c.TZ == "" { + return nil + } + + lookupPaths := []string{ + "/usr/share/zoneinfo", + "/etc/zoneinfo", + } + + for _, paths := range lookupPaths { + zonePath := filepath.Join(paths, c.TZ) + if _, err := os.Stat(zonePath); err == nil { + // found zone information + return nil + } + } + + return errors.Errorf( + "find timezone %s in paths: %s", + c.TZ, strings.Join(lookupPaths, ", "), + ) +} + +func (c *ContainersConfig) validateUmask() error { + validUmask := regexp.MustCompile(`^[0-7]{1,4}$`) + if !validUmask.MatchString(c.Umask) { + return errors.Errorf("not a valid umask %s", c.Umask) + } + return nil +} + +func isRemote() bool { + return false +} diff --git a/vendor/github.com/containers/common/pkg/config/config_remote.go b/vendor/github.com/containers/common/pkg/config/config_remote.go new file mode 100644 index 00000000000..7fd9202bbfc --- /dev/null +++ b/vendor/github.com/containers/common/pkg/config/config_remote.go @@ -0,0 +1,33 @@ +// +build remote + +package config + +// isDirectory tests whether the given path exists and is a directory. It +// follows symlinks. +func isDirectory(path string) error { + return nil +} + +func isRemote() bool { + return true +} + +func (c *EngineConfig) validatePaths() error { + return nil +} + +func (c *ContainersConfig) validateDevices() error { + return nil +} + +func (c *ContainersConfig) validateUlimits() error { + return nil +} + +func (c *ContainersConfig) validateTZ() error { + return nil +} + +func (c *ContainersConfig) validateUmask() error { + return nil +} diff --git a/vendor/github.com/containers/common/pkg/config/config_unsupported.go b/vendor/github.com/containers/common/pkg/config/config_unsupported.go new file mode 100644 index 00000000000..6563fd31743 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/config/config_unsupported.go @@ -0,0 +1,7 @@ +// +build !linux + +package config + +func selinuxEnabled() bool { + return false +} diff --git a/vendor/github.com/containers/common/pkg/config/config_windows.go b/vendor/github.com/containers/common/pkg/config/config_windows.go new file mode 100644 index 00000000000..dbe7ba00d60 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/config/config_windows.go @@ -0,0 +1,19 @@ +package config + +import "os" + +// podman remote clients on windows cannot use unshare.isRootless() to determine the configuration file locations. +func customConfigFile() (string, error) { + if path, found := os.LookupEnv("CONTAINERS_CONF"); found { + return path, nil + } + return os.Getenv("APPDATA") + "\\containers\\containers.conf", nil +} + +func ifRootlessConfigPath() (string, error) { + return os.Getenv("APPDATA") + "\\containers\\containers.conf", nil +} + +var defaultHelperBinariesDir = []string{ + "C:\\Program Files\\RedHat\\Podman", +} diff --git a/vendor/github.com/containers/common/pkg/config/containers.conf b/vendor/github.com/containers/common/pkg/config/containers.conf new file mode 100644 index 00000000000..f497d2bbe35 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/config/containers.conf @@ -0,0 +1,607 @@ +# The containers configuration file specifies all of the available configuration +# command-line options/flags for container engine tools like Podman & Buildah, +# but in a TOML format that can be easily modified and versioned. + +# Please refer to containers.conf(5) for details of all configuration options. +# Not all container engines implement all of the options. +# All of the options have hard coded defaults and these options will override +# the built in defaults. Users can then override these options via the command +# line. Container engines will read containers.conf files in up to three +# locations in the following order: +# 1. /usr/share/containers/containers.conf +# 2. /etc/containers/containers.conf +# 3. $HOME/.config/containers/containers.conf (Rootless containers ONLY) +# Items specified in the latter containers.conf, if they exist, override the +# previous containers.conf settings, or the default settings. + +[containers] + +# List of annotation. Specified as +# "key = value" +# If it is empty or commented out, no annotations will be added +# +#annotations = [] + +# Used to change the name of the default AppArmor profile of container engine. +# +#apparmor_profile = "container-default" + +# Default way to to create a cgroup namespace for the container +# Options are: +# `private` Create private Cgroup Namespace for the container. +# `host` Share host Cgroup Namespace with the container. +# +#cgroupns = "private" + +# Control container cgroup configuration +# Determines whether the container will create CGroups. +# Options are: +# `enabled` Enable cgroup support within container +# `disabled` Disable cgroup support, will inherit cgroups from parent +# `no-conmon` Do not create a cgroup dedicated to conmon. +# +#cgroups = "enabled" + +# List of default capabilities for containers. If it is empty or commented out, +# the default capabilities defined in the container engine will be added. +# +default_capabilities = [ + "CHOWN", + "DAC_OVERRIDE", + "FOWNER", + "FSETID", + "KILL", + "NET_BIND_SERVICE", + "SETFCAP", + "SETGID", + "SETPCAP", + "SETUID", + "SYS_CHROOT" +] + +# A list of sysctls to be set in containers by default, +# specified as "name=value", +# for example:"net.ipv4.ping_group_range=0 0". +# +default_sysctls = [ + "net.ipv4.ping_group_range=0 0", +] + +# A list of ulimits to be set in containers by default, specified as +# "=:", for example: +# "nofile=1024:2048" +# See setrlimit(2) for a list of resource names. +# Any limit not specified here will be inherited from the process launching the +# container engine. +# Ulimits has limits for non privileged container engines. +# +#default_ulimits = [ +# "nofile=1280:2560", +#] + +# List of devices. Specified as +# "::", for example: +# "/dev/sdc:/dev/xvdc:rwm". +# If it is empty or commented out, only the default devices will be used +# +#devices = [] + +# List of default DNS options to be added to /etc/resolv.conf inside of the container. +# +#dns_options = [] + +# List of default DNS search domains to be added to /etc/resolv.conf inside of the container. +# +#dns_searches = [] + +# Set default DNS servers. +# This option can be used to override the DNS configuration passed to the +# container. The special value "none" can be specified to disable creation of +# /etc/resolv.conf in the container. +# The /etc/resolv.conf file in the image will be used without changes. +# +#dns_servers = [] + +# Environment variable list for the conmon process; used for passing necessary +# environment variables to conmon or the runtime. +# +#env = [ +# "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", +# "TERM=xterm", +#] + +# Pass all host environment variables into the container. +# +#env_host = false + +# Default proxy environment variables passed into the container. +# The environment variables passed in include: +# http_proxy, https_proxy, ftp_proxy, no_proxy, and the upper case versions of +# these. This option is needed when host system uses a proxy but container +# should not use proxy. Proxy environment variables specified for the container +# in any other way will override the values passed from the host. +# +#http_proxy = true + +# Run an init inside the container that forwards signals and reaps processes. +# +#init = false + +# Container init binary, if init=true, this is the init binary to be used for containers. +# +#init_path = "/usr/libexec/podman/catatonit" + +# Default way to to create an IPC namespace (POSIX SysV IPC) for the container +# Options are: +# `private` Create private IPC Namespace for the container. +# `host` Share host IPC Namespace with the container. +# +#ipcns = "private" + +# keyring tells the container engine whether to create +# a kernel keyring for use within the container. +# +#keyring = true + +# label tells the container engine whether to use container separation using +# MAC(SELinux) labeling or not. +# The label flag is ignored on label disabled systems. +# +#label = true + +# Logging driver for the container. Available options: k8s-file and journald. +# +#log_driver = "k8s-file" + +# Maximum size allowed for the container log file. Negative numbers indicate +# that no size limit is imposed. If positive, it must be >= 8192 to match or +# exceed conmon's read buffer. The file is truncated and re-opened so the +# limit is never exceeded. +# +#log_size_max = -1 + +# Specifies default format tag for container log messages. +# This is useful for creating a specific tag for container log messages. +# Containers logs default to truncated container ID as a tag. +# +#log_tag = "" + +# Default way to to create a Network namespace for the container +# Options are: +# `private` Create private Network Namespace for the container. +# `host` Share host Network Namespace with the container. +# `none` Containers do not use the network +# +#netns = "private" + +# Create /etc/hosts for the container. By default, container engine manage +# /etc/hosts, automatically adding the container's own IP address. +# +#no_hosts = false + +# Default way to to create a PID namespace for the container +# Options are: +# `private` Create private PID Namespace for the container. +# `host` Share host PID Namespace with the container. +# +#pidns = "private" + +# Maximum number of processes allowed in a container. +# +#pids_limit = 2048 + +# Copy the content from the underlying image into the newly created volume +# when the container is created instead of when it is started. If false, +# the container engine will not copy the content until the container is started. +# Setting it to true may have negative performance implications. +# +#prepare_volume_on_create = false + +# Path to the seccomp.json profile which is used as the default seccomp profile +# for the runtime. +# +#seccomp_profile = "/usr/share/containers/seccomp.json" + +# Size of /dev/shm. Specified as . +# Unit is optional, values: +# b (bytes), k (kilobytes), m (megabytes), or g (gigabytes). +# If the unit is omitted, the system uses bytes. +# +#shm_size = "65536k" + +# Set timezone in container. Takes IANA timezones as well as "local", +# which sets the timezone in the container to match the host machine. +# +#tz = "" + +# Set umask inside the container +# +#umask = "0022" + +# Default way to to create a User namespace for the container +# Options are: +# `auto` Create unique User Namespace for the container. +# `host` Share host User Namespace with the container. +# +#userns = "host" + +# Number of UIDs to allocate for the automatic container creation. +# UIDs are allocated from the "container" UIDs listed in +# /etc/subuid & /etc/subgid +# +#userns_size = 65536 + +# Default way to to create a UTS namespace for the container +# Options are: +# `private` Create private UTS Namespace for the container. +# `host` Share host UTS Namespace with the container. +# +#utsns = "private" + +# List of volumes. Specified as +# "::", for example: +# "/db:/var/lib/db:ro". +# If it is empty or commented out, no volumes will be added +# +#volumes = [] + +[secrets] +#driver = "file" + +[secrets.opts] +#root = "/example/directory" + +[network] + +# Network backend determines what network driver will be used to set up and tear down container networks. +# Valid values are "cni" and "netavark". +# The default value is empty which means that it will automatically choose CNI or netavark. If there are +# already containers/images or CNI networks preset it will choose CNI. +# +# Before changing this value all containers must be stopped otherwise it is likely that +# iptables rules and network interfaces might leak on the host. A reboot will fix this. +# +#network_backend = "" + +# Path to directory where CNI plugin binaries are located. +# +#cni_plugin_dirs = [ +# "/usr/local/libexec/cni", +# "/usr/libexec/cni", +# "/usr/local/lib/cni", +# "/usr/lib/cni", +# "/opt/cni/bin", +#] + +# The network name of the default network to attach pods to. +# +#default_network = "podman" + +# The default subnet for the default network given in default_network. +# If a network with that name does not exist, a new network using that name and +# this subnet will be created. +# Must be a valid IPv4 CIDR prefix. +# +#default_subnet = "10.88.0.0/16" + +# Path to the directory where network configuration files are located. +# For the CNI backend the default is "/etc/cni/net.d" as root +# and "$HOME/.config/cni/net.d" as rootless. +# For the netavark backend "/etc/containers/networks" is used as root +# and "$graphroot/networks" as rootless. +# +#network_config_dir = "/etc/cni/net.d/" + +[engine] +# Index to the active service +# +#active_service = production + +# The compression format to use when pushing an image. +# Valid options are: `gzip`, `zstd` and `zstd:chunked`. +# +#compression_format = "gzip" + + +# Cgroup management implementation used for the runtime. +# Valid options "systemd" or "cgroupfs" +# +#cgroup_manager = "systemd" + +# Environment variables to pass into conmon +# +#conmon_env_vars = [ +# "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" +#] + +# Paths to look for the conmon container manager binary +# +#conmon_path = [ +# "/usr/libexec/podman/conmon", +# "/usr/local/libexec/podman/conmon", +# "/usr/local/lib/podman/conmon", +# "/usr/bin/conmon", +# "/usr/sbin/conmon", +# "/usr/local/bin/conmon", +# "/usr/local/sbin/conmon" +#] + +# Enforces using docker.io for completing short names in Podman's compatibility +# REST API. Note that this will ignore unqualified-search-registries and +# short-name aliases defined in containers-registries.conf(5). +#compat_api_enforce_docker_hub = true + +# Specify the keys sequence used to detach a container. +# Format is a single character [a-Z] or a comma separated sequence of +# `ctrl-`, where `` is one of: +# `a-z`, `@`, `^`, `[`, `\`, `]`, `^` or `_` +# +#detach_keys = "ctrl-p,ctrl-q" + +# Determines whether engine will reserve ports on the host when they are +# forwarded to containers. When enabled, when ports are forwarded to containers, +# ports are held open by as long as the container is running, ensuring that +# they cannot be reused by other programs on the host. However, this can cause +# significant memory usage if a container has many ports forwarded to it. +# Disabling this can save memory. +# +#enable_port_reservation = true + +# Environment variables to be used when running the container engine (e.g., Podman, Buildah). +# For example "http_proxy=internal.proxy.company.com". +# Note these environment variables will not be used within the container. +# Set the env section under [containers] table, if you want to set environment variables for the container. +# +#env = [] + +# Define where event logs will be stored, when events_logger is "file". +#events_logfile_path="" + +# Selects which logging mechanism to use for container engine events. +# Valid values are `journald`, `file` and `none`. +# +#events_logger = "journald" + +# A is a list of directories which are used to search for helper binaries. +# +#helper_binaries_dir = [ +# "/usr/local/libexec/podman", +# "/usr/local/lib/podman", +# "/usr/libexec/podman", +# "/usr/lib/podman", +#] + +# Path to OCI hooks directories for automatically executed hooks. +# +#hooks_dir = [ +# "/usr/share/containers/oci/hooks.d", +#] + +# Manifest Type (oci, v2s2, or v2s1) to use when pulling, pushing, building +# container images. By default image pulled and pushed match the format of the +# source image. Building/committing defaults to OCI. +# +#image_default_format = "" + +# Default transport method for pulling and pushing for images +# +#image_default_transport = "docker://" + +# Maximum number of image layers to be copied (pulled/pushed) simultaneously. +# Not setting this field, or setting it to zero, will fall back to containers/image defaults. +# +#image_parallel_copies = 0 + +# Default command to run the infra container +# +#infra_command = "/pause" + +# Infra (pause) container image name for pod infra containers. When running a +# pod, we start a `pause` process in a container to hold open the namespaces +# associated with the pod. This container does nothing other then sleep, +# reserving the pods resources for the lifetime of the pod. By default container +# engines run a builtin container using the pause executable. If you want override +# specify an image to pull. +# +#infra_image = "" + +# Specify the locking mechanism to use; valid values are "shm" and "file". +# Change the default only if you are sure of what you are doing, in general +# "file" is useful only on platforms where cgo is not available for using the +# faster "shm" lock type. You may need to run "podman system renumber" after +# you change the lock type. +# +#lock_type** = "shm" + +# Indicates if Podman is running inside a VM via Podman Machine. +# Podman uses this value to do extra setup around networking from the +# container inside the VM to to host. +# +#machine_enabled = false + +# MultiImageArchive - if true, the container engine allows for storing archives +# (e.g., of the docker-archive transport) with multiple images. By default, +# Podman creates single-image archives. +# +#multi_image_archive = "false" + +# Default engine namespace +# If engine is joined to a namespace, it will see only containers and pods +# that were created in the same namespace, and will create new containers and +# pods in that namespace. +# The default namespace is "", which corresponds to no namespace. When no +# namespace is set, all containers and pods are visible. +# +#namespace = "" + +# Path to the slirp4netns binary +# +#network_cmd_path = "" + +# Default options to pass to the slirp4netns binary. +# For example "allow_host_loopback=true" +# +#network_cmd_options = ["enable_ipv6=true",] + +# Whether to use chroot instead of pivot_root in the runtime +# +#no_pivot_root = false + +# Number of locks available for containers and pods. +# If this is changed, a lock renumber must be performed (e.g. with the +# 'podman system renumber' command). +# +#num_locks = 2048 + +# Whether to pull new image before running a container +# +#pull_policy = "missing" + +# Indicates whether the application should be running in remote mode. This flag modifies the +# --remote option on container engines. Setting the flag to true will default +# `podman --remote=true` for access to the remote Podman service. +# +#remote = false + +# Default OCI runtime +# +#runtime = "crun" + +# List of the OCI runtimes that support --format=json. When json is supported +# engine will use it for reporting nicer errors. +# +#runtime_supports_json = ["crun", "runc", "kata", "runsc", "krun"] + +# List of the OCI runtimes that supports running containers with KVM Separation. +# +#runtime_supports_kvm = ["kata", "krun"] + +# List of the OCI runtimes that supports running containers without cgroups. +# +#runtime_supports_nocgroups = ["crun", "krun"] + +# Default location for storing temporary container image content. Can be overridden with the TMPDIR environment +# variable. If you specify "storage", then the location of the +# container/storage tmp directory will be used. +# image_copy_tmp_dir="/var/tmp" + +# Number of seconds to wait without a connection +# before the `podman system service` times out and exits +# +#service_timeout = 5 + +# Directory for persistent engine files (database, etc) +# By default, this will be configured relative to where the containers/storage +# stores containers +# Uncomment to change location from this default +# +#static_dir = "/var/lib/containers/storage/libpod" + +# Number of seconds to wait for container to exit before sending kill signal. +# +#stop_timeout = 10 + +# map of service destinations +# +#[service_destinations] +# [service_destinations.production] +# URI to access the Podman service +# Examples: +# rootless "unix://run/user/$UID/podman/podman.sock" (Default) +# rootfull "unix://run/podman/podman.sock (Default) +# remote rootless ssh://engineering.lab.company.com/run/user/1000/podman/podman.sock +# remote rootfull ssh://root@10.10.1.136:22/run/podman/podman.sock +# +# uri = "ssh://user@production.example.com/run/user/1001/podman/podman.sock" +# Path to file containing ssh identity key +# identity = "~/.ssh/id_rsa" + +# Directory for temporary files. Must be tmpfs (wiped after reboot) +# +#tmp_dir = "/run/libpod" + +# Directory for libpod named volumes. +# By default, this will be configured relative to where containers/storage +# stores containers. +# Uncomment to change location from this default. +# +#volume_path = "/var/lib/containers/storage/volumes" + +# Paths to look for a valid OCI runtime (crun, runc, kata, runsc, krun, etc) +[engine.runtimes] +#crun = [ +# "/usr/bin/crun", +# "/usr/sbin/crun", +# "/usr/local/bin/crun", +# "/usr/local/sbin/crun", +# "/sbin/crun", +# "/bin/crun", +# "/run/current-system/sw/bin/crun", +#] + +#kata = [ +# "/usr/bin/kata-runtime", +# "/usr/sbin/kata-runtime", +# "/usr/local/bin/kata-runtime", +# "/usr/local/sbin/kata-runtime", +# "/sbin/kata-runtime", +# "/bin/kata-runtime", +# "/usr/bin/kata-qemu", +# "/usr/bin/kata-fc", +#] + +#runc = [ +# "/usr/bin/runc", +# "/usr/sbin/runc", +# "/usr/local/bin/runc", +# "/usr/local/sbin/runc", +# "/sbin/runc", +# "/bin/runc", +# "/usr/lib/cri-o-runc/sbin/runc", +#] + +#runsc = [ +# "/usr/bin/runsc", +# "/usr/sbin/runsc", +# "/usr/local/bin/runsc", +# "/usr/local/sbin/runsc", +# "/bin/runsc", +# "/sbin/runsc", +# "/run/current-system/sw/bin/runsc", +#] + +#krun = [ +# "/usr/bin/krun", +# "/usr/local/bin/krun", +#] + +[engine.volume_plugins] +#testplugin = "/run/podman/plugins/test.sock" + +[machine] +# Number of CPU's a machine is created with. +# +#cpus=1 + +# The size of the disk in GB created when init-ing a podman-machine VM. +# +#disk_size=10 + +# The image used when creating a podman-machine VM. +# +#image = "testing" + +# Memory in MB a machine is created with. +# +#memory=2048 + +# The username to use and create on the podman machine OS for rootless +# container access. +# +#user = "core" + +# The [machine] table MUST be the last entry in this file. +# (Unless another table is added) +# TOML does not provide a way to end a table other than a further table being +# defined, so every key hereafter will be part of [machine] and not the +# main config. diff --git a/vendor/github.com/containers/common/pkg/config/default.go b/vendor/github.com/containers/common/pkg/config/default.go new file mode 100644 index 00000000000..2791197497e --- /dev/null +++ b/vendor/github.com/containers/common/pkg/config/default.go @@ -0,0 +1,567 @@ +package config + +import ( + "bytes" + "fmt" + "os" + "os/exec" + "path/filepath" + "regexp" + "strconv" + + "github.com/containers/common/pkg/apparmor" + "github.com/containers/common/pkg/cgroupv2" + "github.com/containers/common/pkg/util" + "github.com/containers/storage/pkg/homedir" + "github.com/containers/storage/pkg/unshare" + "github.com/containers/storage/types" + "github.com/opencontainers/selinux/go-selinux" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const ( + // _conmonMinMajorVersion is the major version required for conmon. + _conmonMinMajorVersion = 2 + + // _conmonMinMinorVersion is the minor version required for conmon. + _conmonMinMinorVersion = 0 + + // _conmonMinPatchVersion is the sub-minor version required for conmon. + _conmonMinPatchVersion = 1 + + // _conmonVersionFormatErr is used when the expected versio-format of conmon + // has changed. + _conmonVersionFormatErr = "conmon version changed format" + + // _defaultGraphRoot points to the default path of the graph root. + _defaultGraphRoot = "/var/lib/containers/storage" + + // _defaultTransport is a prefix that we apply to an image name to check + // docker hub first for the image. + _defaultTransport = "docker://" +) + +var ( + // DefaultInitPath is the default path to the container-init binary + DefaultInitPath = "/usr/libexec/podman/catatonit" + // DefaultInfraImage to use for infra container + DefaultInfraImage = "" + // DefaultRootlessSHMLockPath is the default path for rootless SHM locks + DefaultRootlessSHMLockPath = "/libpod_rootless_lock" + // DefaultDetachKeys is the default keys sequence for detaching a + // container + DefaultDetachKeys = "ctrl-p,ctrl-q" + // ErrConmonOutdated indicates the version of conmon found (whether via the configuration or $PATH) + // is out of date for the current podman version + ErrConmonOutdated = errors.New("outdated conmon version") + // ErrInvalidArg indicates that an invalid argument was passed + ErrInvalidArg = errors.New("invalid argument") + // DefaultHooksDirs defines the default hooks directory + DefaultHooksDirs = []string{"/usr/share/containers/oci/hooks.d"} + // DefaultCapabilities for the default_capabilities option in the containers.conf file + DefaultCapabilities = []string{ + "CAP_AUDIT_WRITE", + "CAP_CHOWN", + "CAP_DAC_OVERRIDE", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_MKNOD", + "CAP_NET_BIND_SERVICE", + "CAP_NET_RAW", + "CAP_SETFCAP", + "CAP_SETGID", + "CAP_SETPCAP", + "CAP_SETUID", + "CAP_SYS_CHROOT", + } + + // It may seem a bit unconventional, but it is necessary to do so + DefaultCNIPluginDirs = []string{ + "/usr/local/libexec/cni", + "/usr/libexec/cni", + "/usr/local/lib/cni", + "/usr/lib/cni", + "/opt/cni/bin", + } +) + +const ( + // _etcDir is the sysconfdir where podman should look for system config files. + // It can be overridden at build time. + _etcDir = "/etc" + // InstallPrefix is the prefix where podman will be installed. + // It can be overridden at build time. + _installPrefix = "/usr" + // CgroupfsCgroupsManager represents cgroupfs native cgroup manager + CgroupfsCgroupsManager = "cgroupfs" + // DefaultApparmorProfile specifies the default apparmor profile for the container. + DefaultApparmorProfile = apparmor.Profile + // SystemdCgroupsManager represents systemd native cgroup manager + SystemdCgroupsManager = "systemd" + // DefaultLogSizeMax is the default value for the maximum log size + // allowed for a container. Negative values mean that no limit is imposed. + DefaultLogSizeMax = -1 + // DefaultPidsLimit is the default value for maximum number of processes + // allowed inside a container + DefaultPidsLimit = 2048 + // DefaultPullPolicy pulls the image if it does not exist locally + DefaultPullPolicy = "missing" + // DefaultSignaturePolicyPath is the default value for the + // policy.json file. + DefaultSignaturePolicyPath = "/etc/containers/policy.json" + // DefaultSubnet is the subnet that will be used for the default CNI + // network. + DefaultSubnet = "10.88.0.0/16" + // DefaultRootlessSignaturePolicyPath is the location within + // XDG_CONFIG_HOME of the rootless policy.json file. + DefaultRootlessSignaturePolicyPath = "containers/policy.json" + // DefaultShmSize default value + DefaultShmSize = "65536k" + // DefaultUserNSSize default value + DefaultUserNSSize = 65536 + // OCIBufSize limits maximum LogSizeMax + OCIBufSize = 8192 + // SeccompOverridePath if this exists it overrides the default seccomp path. + SeccompOverridePath = _etcDir + "/containers/seccomp.json" + // SeccompDefaultPath defines the default seccomp path. + SeccompDefaultPath = _installPrefix + "/share/containers/seccomp.json" +) + +// DefaultConfig defines the default values from containers.conf +func DefaultConfig() (*Config, error) { + + defaultEngineConfig, err := defaultConfigFromMemory() + if err != nil { + return nil, err + } + + defaultEngineConfig.SignaturePolicyPath = DefaultSignaturePolicyPath + if unshare.IsRootless() { + configHome, err := homedir.GetConfigHome() + if err != nil { + return nil, err + } + sigPath := filepath.Join(configHome, DefaultRootlessSignaturePolicyPath) + defaultEngineConfig.SignaturePolicyPath = sigPath + if _, err := os.Stat(sigPath); err != nil { + if _, err := os.Stat(DefaultSignaturePolicyPath); err == nil { + defaultEngineConfig.SignaturePolicyPath = DefaultSignaturePolicyPath + } + } + } + + cgroupNS := "host" + if cgroup2, _ := cgroupv2.Enabled(); cgroup2 { + cgroupNS = "private" + } + + return &Config{ + Containers: ContainersConfig{ + Devices: []string{}, + Volumes: []string{}, + Annotations: []string{}, + ApparmorProfile: DefaultApparmorProfile, + CgroupNS: cgroupNS, + Cgroups: "enabled", + DefaultCapabilities: DefaultCapabilities, + DefaultSysctls: []string{}, + DefaultUlimits: getDefaultProcessLimits(), + DNSServers: []string{}, + DNSOptions: []string{}, + DNSSearches: []string{}, + EnableKeyring: true, + EnableLabeling: selinuxEnabled(), + Env: []string{ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "TERM=xterm", + }, + EnvHost: false, + HTTPProxy: true, + Init: false, + InitPath: "", + IPCNS: "private", + LogDriver: defaultLogDriver(), + LogSizeMax: DefaultLogSizeMax, + NetNS: "private", + NoHosts: false, + PidsLimit: DefaultPidsLimit, + PidNS: "private", + ShmSize: DefaultShmSize, + TZ: "", + Umask: "0022", + UTSNS: "private", + UserNSSize: DefaultUserNSSize, + }, + Network: NetworkConfig{ + DefaultNetwork: "podman", + DefaultSubnet: DefaultSubnet, + CNIPluginDirs: DefaultCNIPluginDirs, + }, + Engine: *defaultEngineConfig, + Secrets: defaultSecretConfig(), + Machine: defaultMachineConfig(), + }, nil +} + +// defaultSecretConfig returns the default secret configuration. +// Please note that the default is choosing the "file" driver. +func defaultSecretConfig() SecretConfig { + return SecretConfig{ + Driver: "file", + } +} + +// defaultMachineConfig returns the default machine configuration. +func defaultMachineConfig() MachineConfig { + return MachineConfig{ + CPUs: 1, + DiskSize: 100, + Image: getDefaultMachineImage(), + Memory: 2048, + User: getDefaultMachineUser(), + } +} + +// defaultConfigFromMemory returns a default engine configuration. Note that the +// config is different for root and rootless. It also parses the storage.conf. +func defaultConfigFromMemory() (*EngineConfig, error) { + c := new(EngineConfig) + tmp, err := defaultTmpDir() + if err != nil { + return nil, err + } + c.TmpDir = tmp + + c.EventsLogFilePath = filepath.Join(c.TmpDir, "events", "events.log") + + c.CompatAPIEnforceDockerHub = true + + if path, ok := os.LookupEnv("CONTAINERS_STORAGE_CONF"); ok { + types.SetDefaultConfigFilePath(path) + } + storeOpts, err := types.DefaultStoreOptions(unshare.IsRootless(), unshare.GetRootlessUID()) + if err != nil { + return nil, err + } + + if storeOpts.GraphRoot == "" { + logrus.Warnf("Storage configuration is unset - using hardcoded default graph root %q", _defaultGraphRoot) + storeOpts.GraphRoot = _defaultGraphRoot + } + c.graphRoot = storeOpts.GraphRoot + c.ImageCopyTmpDir = "/var/tmp" + c.StaticDir = filepath.Join(storeOpts.GraphRoot, "libpod") + c.VolumePath = filepath.Join(storeOpts.GraphRoot, "volumes") + + c.HelperBinariesDir = defaultHelperBinariesDir + c.HooksDir = DefaultHooksDirs + c.ImageDefaultTransport = _defaultTransport + c.StateType = BoltDBStateStore + + c.ImageBuildFormat = "oci" + + c.CgroupManager = defaultCgroupManager() + c.ServiceTimeout = uint(5) + c.StopTimeout = uint(10) + c.NetworkCmdOptions = []string{ + "enable_ipv6=true", + } + c.Remote = isRemote() + c.OCIRuntimes = map[string][]string{ + "crun": { + "/usr/bin/crun", + "/usr/sbin/crun", + "/usr/local/bin/crun", + "/usr/local/sbin/crun", + "/sbin/crun", + "/bin/crun", + "/run/current-system/sw/bin/crun", + }, + "runc": { + "/usr/bin/runc", + "/usr/sbin/runc", + "/usr/local/bin/runc", + "/usr/local/sbin/runc", + "/sbin/runc", + "/bin/runc", + "/usr/lib/cri-o-runc/sbin/runc", + "/run/current-system/sw/bin/runc", + }, + "kata": { + "/usr/bin/kata-runtime", + "/usr/sbin/kata-runtime", + "/usr/local/bin/kata-runtime", + "/usr/local/sbin/kata-runtime", + "/sbin/kata-runtime", + "/bin/kata-runtime", + "/usr/bin/kata-qemu", + "/usr/bin/kata-fc", + }, + "runsc": { + "/usr/bin/runsc", + "/usr/sbin/runsc", + "/usr/local/bin/runsc", + "/usr/local/sbin/runsc", + "/bin/runsc", + "/sbin/runsc", + "/run/current-system/sw/bin/runsc", + }, + "krun": { + "/usr/bin/krun", + "/usr/local/bin/krun", + }, + } + // Needs to be called after populating c.OCIRuntimes + c.OCIRuntime = c.findRuntime() + + c.ConmonEnvVars = []string{ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + } + c.ConmonPath = []string{ + "/usr/libexec/podman/conmon", + "/usr/local/libexec/podman/conmon", + "/usr/local/lib/podman/conmon", + "/usr/bin/conmon", + "/usr/sbin/conmon", + "/usr/local/bin/conmon", + "/usr/local/sbin/conmon", + "/run/current-system/sw/bin/conmon", + } + c.PullPolicy = DefaultPullPolicy + c.RuntimeSupportsJSON = []string{ + "crun", + "runc", + "kata", + "runsc", + "krun", + } + c.RuntimeSupportsNoCgroups = []string{"crun", "krun"} + c.RuntimeSupportsKVM = []string{"kata", "kata-runtime", "kata-qemu", "kata-fc", "krun"} + c.InitPath = DefaultInitPath + c.NoPivotRoot = false + + c.InfraImage = DefaultInfraImage + c.EnablePortReservation = true + c.NumLocks = 2048 + c.EventsLogger = defaultEventsLogger() + c.DetachKeys = DefaultDetachKeys + c.SDNotify = false + // TODO - ideally we should expose a `type LockType string` along with + // constants. + c.LockType = "shm" + c.MachineEnabled = false + c.ChownCopiedFiles = true + + return c, nil +} + +func defaultTmpDir() (string, error) { + if !unshare.IsRootless() { + return "/run/libpod", nil + } + + runtimeDir, err := util.GetRuntimeDir() + if err != nil { + return "", err + } + libpodRuntimeDir := filepath.Join(runtimeDir, "libpod") + + if err := os.Mkdir(libpodRuntimeDir, 0700|os.ModeSticky); err != nil { + if !os.IsExist(err) { + return "", err + } else if err := os.Chmod(libpodRuntimeDir, 0700|os.ModeSticky); err != nil { + // The directory already exist, just set the sticky bit + return "", errors.Wrap(err, "set sticky bit on") + } + } + return filepath.Join(libpodRuntimeDir, "tmp"), nil +} + +// probeConmon calls conmon --version and verifies it is a new enough version for +// the runtime expectations the container engine currently has. +func probeConmon(conmonBinary string) error { + cmd := exec.Command(conmonBinary, "--version") + var out bytes.Buffer + cmd.Stdout = &out + err := cmd.Run() + if err != nil { + return err + } + r := regexp.MustCompile(`^conmon version (?P\d+).(?P\d+).(?P\d+)`) + + matches := r.FindStringSubmatch(out.String()) + if len(matches) != 4 { + return errors.Wrap(err, _conmonVersionFormatErr) + } + major, err := strconv.Atoi(matches[1]) + if err != nil { + return errors.Wrap(err, _conmonVersionFormatErr) + } + if major < _conmonMinMajorVersion { + return ErrConmonOutdated + } + if major > _conmonMinMajorVersion { + return nil + } + + minor, err := strconv.Atoi(matches[2]) + if err != nil { + return errors.Wrap(err, _conmonVersionFormatErr) + } + if minor < _conmonMinMinorVersion { + return ErrConmonOutdated + } + if minor > _conmonMinMinorVersion { + return nil + } + + patch, err := strconv.Atoi(matches[3]) + if err != nil { + return errors.Wrap(err, _conmonVersionFormatErr) + } + if patch < _conmonMinPatchVersion { + return ErrConmonOutdated + } + if patch > _conmonMinPatchVersion { + return nil + } + + return nil +} + +// NetNS returns the default network namespace +func (c *Config) NetNS() string { + return c.Containers.NetNS +} + +// SecurityOptions returns the default security options +func (c *Config) SecurityOptions() []string { + securityOpts := []string{} + if c.Containers.SeccompProfile != "" && c.Containers.SeccompProfile != SeccompDefaultPath { + securityOpts = append(securityOpts, fmt.Sprintf("seccomp=%s", c.Containers.SeccompProfile)) + } + if apparmor.IsEnabled() && c.Containers.ApparmorProfile != "" { + securityOpts = append(securityOpts, fmt.Sprintf("apparmor=%s", c.Containers.ApparmorProfile)) + } + if selinux.GetEnabled() && !c.Containers.EnableLabeling { + securityOpts = append(securityOpts, fmt.Sprintf("label=%s", selinux.DisableSecOpt()[0])) + } + return securityOpts +} + +// Sysctls returns the default sysctls +func (c *Config) Sysctls() []string { + return c.Containers.DefaultSysctls +} + +// Volumes returns the default additional volumes for containersvolumes +func (c *Config) Volumes() []string { + return c.Containers.Volumes +} + +// Devices returns the default additional devices for containers +func (c *Config) Devices() []string { + return c.Containers.Devices +} + +// DNSServers returns the default DNS servers to add to resolv.conf in containers +func (c *Config) DNSServers() []string { + return c.Containers.DNSServers +} + +// DNSSerches returns the default DNS searches to add to resolv.conf in containers +func (c *Config) DNSSearches() []string { + return c.Containers.DNSSearches +} + +// DNSOptions returns the default DNS options to add to resolv.conf in containers +func (c *Config) DNSOptions() []string { + return c.Containers.DNSOptions +} + +// Env returns the default additional environment variables to add to containers +func (c *Config) Env() []string { + return c.Containers.Env +} + +// InitPath returns the default init path to add to containers +func (c *Config) InitPath() string { + return c.Containers.InitPath +} + +// IPCNS returns the default IPC Namespace configuration to run containers with +func (c *Config) IPCNS() string { + return c.Containers.IPCNS +} + +// PIDNS returns the default PID Namespace configuration to run containers with +func (c *Config) PidNS() string { + return c.Containers.PidNS +} + +// CgroupNS returns the default Cgroup Namespace configuration to run containers with +func (c *Config) CgroupNS() string { + return c.Containers.CgroupNS +} + +// Cgroups returns whether to containers with cgroup confinement +func (c *Config) Cgroups() string { + return c.Containers.Cgroups +} + +// UTSNS returns the default UTS Namespace configuration to run containers with +func (c *Config) UTSNS() string { + return c.Containers.UTSNS +} + +// ShmSize returns the default size for temporary file systems to use in containers +func (c *Config) ShmSize() string { + return c.Containers.ShmSize +} + +// Ulimits returns the default ulimits to use in containers +func (c *Config) Ulimits() []string { + return c.Containers.DefaultUlimits +} + +// PidsLimit returns the default maximum number of pids to use in containers +func (c *Config) PidsLimit() int64 { + if unshare.IsRootless() { + if c.Engine.CgroupManager != SystemdCgroupsManager { + return 0 + } + cgroup2, _ := cgroupv2.Enabled() + if !cgroup2 { + return 0 + } + } + + return c.Containers.PidsLimit +} + +// DetachKeys returns the default detach keys to detach from a container +func (c *Config) DetachKeys() string { + return c.Engine.DetachKeys +} + +// Tz returns the timezone in the container +func (c *Config) TZ() string { + return c.Containers.TZ +} + +func (c *Config) Umask() string { + return c.Containers.Umask +} + +// LogDriver returns the logging driver to be used +// currently k8s-file or journald +func (c *Config) LogDriver() string { + return c.Containers.LogDriver +} + +// MachineEnabled returns if podman is running inside a VM or not +func (c *Config) MachineEnabled() bool { + return c.Engine.MachineEnabled +} diff --git a/vendor/github.com/containers/common/pkg/config/default_linux.go b/vendor/github.com/containers/common/pkg/config/default_linux.go new file mode 100644 index 00000000000..cc2d0fe3eb9 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/config/default_linux.go @@ -0,0 +1,50 @@ +package config + +import ( + "fmt" + "io/ioutil" + "strconv" + "strings" + + "golang.org/x/sys/unix" +) + +const ( + oldMaxSize = uint64(1048576) +) + +// getDefaultMachineImage returns the default machine image stream +// On Linux/Mac, this returns the FCOS stream +func getDefaultMachineImage() string { + return "testing" +} + +// getDefaultMachineUser returns the user to use for rootless podman +func getDefaultMachineUser() string { + return "core" +} + +// getDefaultProcessLimits returns the nproc for the current process in ulimits format +// Note that nfile sometimes cannot be set to unlimited, and the limit is hardcoded +// to (oldMaxSize) 1048576 (2^20), see: http://stackoverflow.com/a/1213069/1811501 +// In rootless containers this will fail, and the process will just use its current limits +func getDefaultProcessLimits() []string { + rlim := unix.Rlimit{Cur: oldMaxSize, Max: oldMaxSize} + oldrlim := rlim + // Attempt to set file limit and process limit to pid_max in OS + dat, err := ioutil.ReadFile("/proc/sys/kernel/pid_max") + if err == nil { + val := strings.TrimSuffix(string(dat), "\n") + max, err := strconv.ParseUint(val, 10, 64) + if err == nil { + rlim = unix.Rlimit{Cur: uint64(max), Max: uint64(max)} + } + } + defaultLimits := []string{} + if err := unix.Setrlimit(unix.RLIMIT_NPROC, &rlim); err == nil { + defaultLimits = append(defaultLimits, fmt.Sprintf("nproc=%d:%d", rlim.Cur, rlim.Max)) + } else if err := unix.Setrlimit(unix.RLIMIT_NPROC, &oldrlim); err == nil { + defaultLimits = append(defaultLimits, fmt.Sprintf("nproc=%d:%d", oldrlim.Cur, oldrlim.Max)) + } + return defaultLimits +} diff --git a/vendor/github.com/containers/common/pkg/config/default_unsupported.go b/vendor/github.com/containers/common/pkg/config/default_unsupported.go new file mode 100644 index 00000000000..1aa7f6ef3d9 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/config/default_unsupported.go @@ -0,0 +1,24 @@ +// +build !linux,!windows + +package config + +// getDefaultMachineImage returns the default machine image stream +// On Linux/Mac, this returns the FCOS stream +func getDefaultMachineImage() string { + return "testing" +} + +// getDefaultMachineUser returns the user to use for rootless podman +func getDefaultMachineUser() string { + return "core" +} + +// isCgroup2UnifiedMode returns whether we are running in cgroup2 mode. +func isCgroup2UnifiedMode() (isUnified bool, isUnifiedErr error) { + return false, nil +} + +// getDefaultProcessLimits returns the nofile and nproc for the current process in ulimits format +func getDefaultProcessLimits() []string { + return []string{} +} diff --git a/vendor/github.com/containers/common/pkg/config/default_windows.go b/vendor/github.com/containers/common/pkg/config/default_windows.go new file mode 100644 index 00000000000..28f102f1c50 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/config/default_windows.go @@ -0,0 +1,22 @@ +package config + +// getDefaultImage returns the default machine image stream +// On Windows this refers to the Fedora major release number +func getDefaultMachineImage() string { + return "35" +} + +// getDefaultMachineUser returns the user to use for rootless podman +func getDefaultMachineUser() string { + return "user" +} + +// isCgroup2UnifiedMode returns whether we are running in cgroup2 mode. +func isCgroup2UnifiedMode() (isUnified bool, isUnifiedErr error) { + return false, nil +} + +// getDefaultProcessLimits returns the nofile and nproc for the current process in ulimits format +func getDefaultProcessLimits() []string { + return []string{} +} diff --git a/vendor/github.com/containers/common/pkg/config/nosystemd.go b/vendor/github.com/containers/common/pkg/config/nosystemd.go new file mode 100644 index 00000000000..f64b2dfc618 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/config/nosystemd.go @@ -0,0 +1,28 @@ +// +build !systemd !cgo + +package config + +const ( + // DefaultLogDriver is the default type of log files + DefaultLogDriver = "k8s-file" +) + +func defaultCgroupManager() string { + return CgroupfsCgroupsManager +} + +func defaultEventsLogger() string { + return "file" +} + +func defaultLogDriver() string { + return DefaultLogDriver +} + +func useSystemd() bool { + return false +} + +func useJournald() bool { + return false +} diff --git a/vendor/github.com/containers/common/pkg/config/pull_policy.go b/vendor/github.com/containers/common/pkg/config/pull_policy.go new file mode 100644 index 00000000000..8c1f0ec2907 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/config/pull_policy.go @@ -0,0 +1,95 @@ +package config + +import ( + "fmt" + + "github.com/pkg/errors" +) + +// PullPolicy determines how and which images are being pulled from a container +// registry (i.e., docker transport only). +// +// Supported string values are: +// * "always" <-> PullPolicyAlways +// * "missing" <-> PullPolicyMissing +// * "newer" <-> PullPolicyNewer +// * "never" <-> PullPolicyNever +type PullPolicy int + +const ( + // Always pull the image. + PullPolicyAlways PullPolicy = iota + // Pull the image only if it could not be found in the local containers + // storage. + PullPolicyMissing + // Never pull the image but use the one from the local containers + // storage. + PullPolicyNever + // Pull if the image on the registry is new than the one in the local + // containers storage. An image is considered to be newer when the + // digests are different. Comparing the time stamps is prone to + // errors. + PullPolicyNewer + + // Ideally this should be the first `ioata` but backwards compatibility + // prevents us from changing the values. + PullPolicyUnsupported = -1 +) + +// String converts a PullPolicy into a string. +// +// Supported string values are: +// * "always" <-> PullPolicyAlways +// * "missing" <-> PullPolicyMissing +// * "newer" <-> PullPolicyNewer +// * "never" <-> PullPolicyNever +func (p PullPolicy) String() string { + switch p { + case PullPolicyAlways: + return "always" + case PullPolicyMissing: + return "missing" + case PullPolicyNewer: + return "newer" + case PullPolicyNever: + return "never" + } + return fmt.Sprintf("unrecognized policy %d", p) +} + +// Validate returns if the pull policy is not supported. +func (p PullPolicy) Validate() error { + switch p { + case PullPolicyAlways, PullPolicyMissing, PullPolicyNewer, PullPolicyNever: + return nil + default: + return errors.Errorf("unsupported pull policy %d", p) + } +} + +// ParsePullPolicy parses the string into a pull policy. +// +// Supported string values are: +// * "always" <-> PullPolicyAlways +// * "missing" <-> PullPolicyMissing (also "ifnotpresent" and "") +// * "newer" <-> PullPolicyNewer (also "ifnewer") +// * "never" <-> PullPolicyNever +func ParsePullPolicy(s string) (PullPolicy, error) { + switch s { + case "always", "Always": + return PullPolicyAlways, nil + case "missing", "Missing", "ifnotpresent", "IfNotPresent", "": + return PullPolicyMissing, nil + case "newer", "Newer", "ifnewer", "IfNewer": + return PullPolicyNewer, nil + case "never", "Never": + return PullPolicyNever, nil + default: + return PullPolicyUnsupported, errors.Errorf("unsupported pull policy %q", s) + } +} + +// Deprecated: please use `ParsePullPolicy` instead. +func ValidatePullPolicy(s string) (PullPolicy, error) { + return ParsePullPolicy(s) +} diff --git a/vendor/github.com/containers/common/pkg/config/systemd.go b/vendor/github.com/containers/common/pkg/config/systemd.go new file mode 100644 index 00000000000..186e8b343c9 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/config/systemd.go @@ -0,0 +1,87 @@ +// +build systemd,cgo + +package config + +import ( + "io/ioutil" + "path/filepath" + "strings" + "sync" + + "github.com/containers/common/pkg/cgroupv2" + "github.com/containers/storage/pkg/unshare" +) + +var ( + systemdOnce sync.Once + usesSystemd bool + journaldOnce sync.Once + usesJournald bool +) + +const ( + // DefaultLogDriver is the default type of log files + DefaultLogDriver = "journald" +) + +func defaultCgroupManager() string { + if !useSystemd() { + return CgroupfsCgroupsManager + } + enabled, err := cgroupv2.Enabled() + if err == nil && !enabled && unshare.IsRootless() { + return CgroupfsCgroupsManager + } + + return SystemdCgroupsManager +} + +func defaultEventsLogger() string { + if useJournald() { + return "journald" + } + return "file" +} + +func defaultLogDriver() string { + if useJournald() { + return "journald" + } + return "k8s-file" +} + +func useSystemd() bool { + systemdOnce.Do(func() { + dat, err := ioutil.ReadFile("/proc/1/comm") + if err == nil { + val := strings.TrimSuffix(string(dat), "\n") + usesSystemd = (val == "systemd") + } + return + }) + return usesSystemd +} + +func useJournald() bool { + journaldOnce.Do(func() { + if !useSystemd() { + return + } + for _, root := range []string{"/run/log/journal", "/var/log/journal"} { + dirs, err := ioutil.ReadDir(root) + if err != nil { + continue + } + for _, d := range dirs { + if d.IsDir() { + if _, err := ioutil.ReadDir(filepath.Join(root, d.Name())); err == nil { + usesJournald = true + return + } + } + } + } + return + }) + return usesJournald +} diff --git a/vendor/github.com/containers/common/pkg/download/download.go b/vendor/github.com/containers/common/pkg/download/download.go new file mode 100644 index 00000000000..abf4c87739e --- /dev/null +++ b/vendor/github.com/containers/common/pkg/download/download.go @@ -0,0 +1,31 @@ +package download + +import ( + "fmt" + "io" + "io/ioutil" + "net/http" +) + +// FromURL downloads the specified source to a file in tmpdir (OS defaults if +// empty). +func FromURL(tmpdir, source string) (string, error) { + tmp, err := ioutil.TempFile(tmpdir, "") + if err != nil { + return "", fmt.Errorf("creating temporary download file: %w", err) + } + defer tmp.Close() + + response, err := http.Get(source) // nolint:noctx + if err != nil { + return "", fmt.Errorf("downloading %s: %w", source, err) + } + defer response.Body.Close() + + _, err = io.Copy(tmp, response.Body) + if err != nil { + return "", fmt.Errorf("copying %s to %s: %w", source, tmp.Name(), err) + } + + return tmp.Name(), nil +} diff --git a/vendor/github.com/containers/common/pkg/filters/filters.go b/vendor/github.com/containers/common/pkg/filters/filters.go new file mode 100644 index 00000000000..e26e056adf9 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/filters/filters.go @@ -0,0 +1,118 @@ +package filters + +import ( + "encoding/json" + "fmt" + "net/http" + "strings" + "time" + + "github.com/containers/common/pkg/timetype" + "github.com/pkg/errors" +) + +// ComputeUntilTimestamp extracts until timestamp from filters +func ComputeUntilTimestamp(filterValues []string) (time.Time, error) { + invalid := time.Time{} + if len(filterValues) != 1 { + return invalid, errors.Errorf("specify exactly one timestamp for until") + } + ts, err := timetype.GetTimestamp(filterValues[0], time.Now()) + if err != nil { + return invalid, err + } + seconds, nanoseconds, err := timetype.ParseTimestamps(ts, 0) + if err != nil { + return invalid, err + } + return time.Unix(seconds, nanoseconds), nil +} + +// filtersFromRequests extracts the "filters" parameter from the specified +// http.Request. The parameter can either be a `map[string][]string` as done +// in new versions of Docker and libpod, or a `map[string]map[string]bool` as +// done in older versions of Docker. We have to do a bit of Yoga to support +// both - just as Docker does as well. +// +// Please refer to https://github.com/containers/podman/issues/6899 for some +// background. +func FiltersFromRequest(r *http.Request) ([]string, error) { + var ( + compatFilters map[string]map[string]bool + filters map[string][]string + libpodFilters []string + raw []byte + ) + + if _, found := r.URL.Query()["filters"]; found { + raw = []byte(r.Form.Get("filters")) + } else if _, found := r.URL.Query()["Filters"]; found { + raw = []byte(r.Form.Get("Filters")) + } else { + return []string{}, nil + } + + // Backwards compat with older versions of Docker. + if err := json.Unmarshal(raw, &compatFilters); err == nil { + for filterKey, filterMap := range compatFilters { + for filterValue, toAdd := range filterMap { + if toAdd { + libpodFilters = append(libpodFilters, fmt.Sprintf("%s=%s", filterKey, filterValue)) + } + } + } + return libpodFilters, nil + } + + if err := json.Unmarshal(raw, &filters); err != nil { + return nil, err + } + + for filterKey, filterSlice := range filters { + f := filterKey + for _, filterValue := range filterSlice { + f += "=" + filterValue + } + libpodFilters = append(libpodFilters, f) + } + + return libpodFilters, nil +} + +// PrepareFilters prepares a *map[string][]string of filters to be later searched +// in lipod and compat API to get desired filters +func PrepareFilters(r *http.Request) (map[string][]string, error) { + filtersList, err := FiltersFromRequest(r) + if err != nil { + return nil, err + } + filterMap := map[string][]string{} + for _, filter := range filtersList { + split := strings.SplitN(filter, "=", 2) + if len(split) > 1 { + filterMap[split[0]] = append(filterMap[split[0]], split[1]) + } + } + return filterMap, nil +} + +// MatchLabelFilters matches labels and returns true if they are valid +func MatchLabelFilters(filterValues []string, labels map[string]string) bool { +outer: + for _, filterValue := range filterValues { + filterArray := strings.SplitN(filterValue, "=", 2) + filterKey := filterArray[0] + if len(filterArray) > 1 { + filterValue = filterArray[1] + } else { + filterValue = "" + } + for labelKey, labelValue := range labels { + if labelKey == filterKey && (filterValue == "" || labelValue == filterValue) { + continue outer + } + } + return false + } + return true +} diff --git a/vendor/github.com/containers/common/pkg/manifests/errors.go b/vendor/github.com/containers/common/pkg/manifests/errors.go new file mode 100644 index 00000000000..8398d7efcfe --- /dev/null +++ b/vendor/github.com/containers/common/pkg/manifests/errors.go @@ -0,0 +1,16 @@ +package manifests + +import ( + "errors" +) + +var ( + // ErrDigestNotFound is returned when we look for an image instance + // with a particular digest in a list or index, and fail to find it. + ErrDigestNotFound = errors.New("no image instance matching the specified digest was found in the list or index") + // ErrManifestTypeNotSupported is returned when we attempt to parse a + // manifest with a known MIME type as a list or index, or when we attempt + // to serialize a list or index to a manifest with a MIME type that we + // don't know how to encode. + ErrManifestTypeNotSupported = errors.New("manifest type not supported") +) diff --git a/vendor/github.com/containers/common/pkg/manifests/manifests.go b/vendor/github.com/containers/common/pkg/manifests/manifests.go new file mode 100644 index 00000000000..5c2836893a5 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/manifests/manifests.go @@ -0,0 +1,495 @@ +package manifests + +import ( + "encoding/json" + "os" + + "github.com/containers/image/v5/manifest" + digest "github.com/opencontainers/go-digest" + imgspec "github.com/opencontainers/image-spec/specs-go" + v1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +// List is a generic interface for manipulating a manifest list or an image +// index. +type List interface { + AddInstance(manifestDigest digest.Digest, manifestSize int64, manifestType, os, architecture, osVersion string, osFeatures []string, variant string, features []string, annotations []string) error + Remove(instanceDigest digest.Digest) error + + SetURLs(instanceDigest digest.Digest, urls []string) error + URLs(instanceDigest digest.Digest) ([]string, error) + + SetAnnotations(instanceDigest *digest.Digest, annotations map[string]string) error + Annotations(instanceDigest *digest.Digest) (map[string]string, error) + + SetOS(instanceDigest digest.Digest, os string) error + OS(instanceDigest digest.Digest) (string, error) + + SetArchitecture(instanceDigest digest.Digest, arch string) error + Architecture(instanceDigest digest.Digest) (string, error) + + SetOSVersion(instanceDigest digest.Digest, osVersion string) error + OSVersion(instanceDigest digest.Digest) (string, error) + + SetVariant(instanceDigest digest.Digest, variant string) error + Variant(instanceDigest digest.Digest) (string, error) + + SetFeatures(instanceDigest digest.Digest, features []string) error + Features(instanceDigest digest.Digest) ([]string, error) + + SetOSFeatures(instanceDigest digest.Digest, osFeatures []string) error + OSFeatures(instanceDigest digest.Digest) ([]string, error) + + Serialize(mimeType string) ([]byte, error) + Instances() []digest.Digest + OCIv1() *v1.Index + Docker() *manifest.Schema2List + + findDocker(instanceDigest digest.Digest) (*manifest.Schema2ManifestDescriptor, error) + findOCIv1(instanceDigest digest.Digest) (*v1.Descriptor, error) +} + +type list struct { + docker manifest.Schema2List + oci v1.Index +} + +// OCIv1 returns the list as a Docker schema 2 list. The returned structure should NOT be modified. +func (l *list) Docker() *manifest.Schema2List { + return &l.docker +} + +// OCIv1 returns the list as an OCI image index. The returned structure should NOT be modified. +func (l *list) OCIv1() *v1.Index { + return &l.oci +} + +// Create creates a new list. +func Create() List { + return &list{ + docker: manifest.Schema2List{ + SchemaVersion: 2, + MediaType: manifest.DockerV2ListMediaType, + }, + oci: v1.Index{ + Versioned: imgspec.Versioned{SchemaVersion: 2}, + MediaType: v1.MediaTypeImageIndex, + }, + } +} + +// AddInstance adds an entry for the specified manifest digest, with assorted +// additional information specified in parameters, to the list or index. +func (l *list) AddInstance(manifestDigest digest.Digest, manifestSize int64, manifestType, osName, architecture, osVersion string, osFeatures []string, variant string, features []string, annotations []string) error { + if err := l.Remove(manifestDigest); err != nil && !os.IsNotExist(errors.Cause(err)) { + return err + } + + schema2platform := manifest.Schema2PlatformSpec{ + Architecture: architecture, + OS: osName, + OSVersion: osVersion, + OSFeatures: osFeatures, + Variant: variant, + Features: features, + } + l.docker.Manifests = append(l.docker.Manifests, manifest.Schema2ManifestDescriptor{ + Schema2Descriptor: manifest.Schema2Descriptor{ + MediaType: manifestType, + Size: manifestSize, + Digest: manifestDigest, + }, + Platform: schema2platform, + }) + + ociv1platform := v1.Platform{ + Architecture: architecture, + OS: osName, + OSVersion: osVersion, + OSFeatures: osFeatures, + Variant: variant, + } + l.oci.Manifests = append(l.oci.Manifests, v1.Descriptor{ + MediaType: manifestType, + Size: manifestSize, + Digest: manifestDigest, + Platform: &ociv1platform, + }) + + return nil +} + +// Remove filters out any instances in the list which match the specified digest. +func (l *list) Remove(instanceDigest digest.Digest) error { + err := errors.Wrapf(os.ErrNotExist, "no instance matching digest %q found in manifest list", instanceDigest) + newDockerManifests := make([]manifest.Schema2ManifestDescriptor, 0, len(l.docker.Manifests)) + for i := range l.docker.Manifests { + if l.docker.Manifests[i].Digest != instanceDigest { + newDockerManifests = append(newDockerManifests, l.docker.Manifests[i]) + } else { + err = nil + } + } + l.docker.Manifests = newDockerManifests + newOCIv1Manifests := make([]v1.Descriptor, 0, len(l.oci.Manifests)) + for i := range l.oci.Manifests { + if l.oci.Manifests[i].Digest != instanceDigest { + newOCIv1Manifests = append(newOCIv1Manifests, l.oci.Manifests[i]) + } else { + err = nil + } + } + l.oci.Manifests = newOCIv1Manifests + return err +} + +func (l *list) findDocker(instanceDigest digest.Digest) (*manifest.Schema2ManifestDescriptor, error) { + for i := range l.docker.Manifests { + if l.docker.Manifests[i].Digest == instanceDigest { + return &l.docker.Manifests[i], nil + } + } + return nil, errors.Wrapf(ErrDigestNotFound, "no Docker manifest matching digest %q was found in list", instanceDigest.String()) +} + +func (l *list) findOCIv1(instanceDigest digest.Digest) (*v1.Descriptor, error) { + for i := range l.oci.Manifests { + if l.oci.Manifests[i].Digest == instanceDigest { + return &l.oci.Manifests[i], nil + } + } + return nil, errors.Wrapf(ErrDigestNotFound, "no OCI manifest matching digest %q was found in list", instanceDigest.String()) +} + +// SetURLs sets the URLs where the manifest might also be found. +func (l *list) SetURLs(instanceDigest digest.Digest, urls []string) error { + oci, err := l.findOCIv1(instanceDigest) + if err != nil { + return err + } + docker, err := l.findDocker(instanceDigest) + if err != nil { + return err + } + oci.URLs = append([]string{}, urls...) + docker.URLs = append([]string{}, urls...) + return nil +} + +// URLs retrieves the locations from which this object might possibly be downloaded. +func (l *list) URLs(instanceDigest digest.Digest) ([]string, error) { + oci, err := l.findOCIv1(instanceDigest) + if err != nil { + return nil, err + } + return append([]string{}, oci.URLs...), nil +} + +// SetAnnotations sets annotations on the image index, or on a specific manifest. +// The field is specific to the OCI image index format, and is not present in Docker manifest lists. +func (l *list) SetAnnotations(instanceDigest *digest.Digest, annotations map[string]string) error { + a := &l.oci.Annotations + if instanceDigest != nil { + oci, err := l.findOCIv1(*instanceDigest) + if err != nil { + return err + } + a = &oci.Annotations + } + (*a) = make(map[string]string) + for k, v := range annotations { + (*a)[k] = v + } + return nil +} + +// Annotations retrieves the annotations which have been set on the image index, or on one instance. +// The field is specific to the OCI image index format, and is not present in Docker manifest lists. +func (l *list) Annotations(instanceDigest *digest.Digest) (map[string]string, error) { + a := l.oci.Annotations + if instanceDigest != nil { + oci, err := l.findOCIv1(*instanceDigest) + if err != nil { + return nil, err + } + a = oci.Annotations + } + annotations := make(map[string]string) + for k, v := range a { + annotations[k] = v + } + return annotations, nil +} + +// SetOS sets the OS field in the platform information associated with the instance with the specified digest. +func (l *list) SetOS(instanceDigest digest.Digest, os string) error { + docker, err := l.findDocker(instanceDigest) + if err != nil { + return err + } + oci, err := l.findOCIv1(instanceDigest) + if err != nil { + return err + } + docker.Platform.OS = os + oci.Platform.OS = os + return nil +} + +// OS retrieves the OS field in the platform information associated with the instance with the specified digest. +func (l *list) OS(instanceDigest digest.Digest) (string, error) { + oci, err := l.findOCIv1(instanceDigest) + if err != nil { + return "", err + } + return oci.Platform.OS, nil +} + +// SetArchitecture sets the Architecture field in the platform information associated with the instance with the specified digest. +func (l *list) SetArchitecture(instanceDigest digest.Digest, arch string) error { + docker, err := l.findDocker(instanceDigest) + if err != nil { + return err + } + oci, err := l.findOCIv1(instanceDigest) + if err != nil { + return err + } + docker.Platform.Architecture = arch + oci.Platform.Architecture = arch + return nil +} + +// Architecture retrieves the Architecture field in the platform information associated with the instance with the specified digest. +func (l *list) Architecture(instanceDigest digest.Digest) (string, error) { + oci, err := l.findOCIv1(instanceDigest) + if err != nil { + return "", err + } + return oci.Platform.Architecture, nil +} + +// SetOSVersion sets the OSVersion field in the platform information associated with the instance with the specified digest. +func (l *list) SetOSVersion(instanceDigest digest.Digest, osVersion string) error { + docker, err := l.findDocker(instanceDigest) + if err != nil { + return err + } + oci, err := l.findOCIv1(instanceDigest) + if err != nil { + return err + } + docker.Platform.OSVersion = osVersion + oci.Platform.OSVersion = osVersion + return nil +} + +// OSVersion retrieves the OSVersion field in the platform information associated with the instance with the specified digest. +func (l *list) OSVersion(instanceDigest digest.Digest) (string, error) { + oci, err := l.findOCIv1(instanceDigest) + if err != nil { + return "", err + } + return oci.Platform.OSVersion, nil +} + +// SetVariant sets the Variant field in the platform information associated with the instance with the specified digest. +func (l *list) SetVariant(instanceDigest digest.Digest, variant string) error { + docker, err := l.findDocker(instanceDigest) + if err != nil { + return err + } + oci, err := l.findOCIv1(instanceDigest) + if err != nil { + return err + } + docker.Platform.Variant = variant + oci.Platform.Variant = variant + return nil +} + +// Variant retrieves the Variant field in the platform information associated with the instance with the specified digest. +func (l *list) Variant(instanceDigest digest.Digest) (string, error) { + oci, err := l.findOCIv1(instanceDigest) + if err != nil { + return "", err + } + return oci.Platform.Variant, nil +} + +// SetFeatures sets the features list in the platform information associated with the instance with the specified digest. +// The field is specific to the Docker manifest list format, and is not present in OCI's image indexes. +func (l *list) SetFeatures(instanceDigest digest.Digest, features []string) error { + docker, err := l.findDocker(instanceDigest) + if err != nil { + return err + } + docker.Platform.Features = append([]string{}, features...) + // no OCI equivalent + return nil +} + +// Features retrieves the features list from the platform information associated with the instance with the specified digest. +// The field is specific to the Docker manifest list format, and is not present in OCI's image indexes. +func (l *list) Features(instanceDigest digest.Digest) ([]string, error) { + docker, err := l.findDocker(instanceDigest) + if err != nil { + return nil, err + } + return append([]string{}, docker.Platform.Features...), nil +} + +// SetOSFeatures sets the OS features list in the platform information associated with the instance with the specified digest. +func (l *list) SetOSFeatures(instanceDigest digest.Digest, osFeatures []string) error { + docker, err := l.findDocker(instanceDigest) + if err != nil { + return err + } + oci, err := l.findOCIv1(instanceDigest) + if err != nil { + return err + } + docker.Platform.OSFeatures = append([]string{}, osFeatures...) + oci.Platform.OSFeatures = append([]string{}, osFeatures...) + return nil +} + +// OSFeatures retrieves the OS features list from the platform information associated with the instance with the specified digest. +func (l *list) OSFeatures(instanceDigest digest.Digest) ([]string, error) { + oci, err := l.findOCIv1(instanceDigest) + if err != nil { + return nil, err + } + return append([]string{}, oci.Platform.OSFeatures...), nil +} + +// FromBlob builds a list from an encoded manifest list or image index. +func FromBlob(manifestBytes []byte) (List, error) { + manifestType := manifest.GuessMIMEType(manifestBytes) + list := &list{ + docker: manifest.Schema2List{ + SchemaVersion: 2, + MediaType: manifest.DockerV2ListMediaType, + }, + oci: v1.Index{ + Versioned: imgspec.Versioned{SchemaVersion: 2}, + MediaType: v1.MediaTypeImageIndex, + }, + } + switch manifestType { + default: + return nil, errors.Wrapf(ErrManifestTypeNotSupported, "unable to load manifest list: unsupported format %q", manifestType) + case manifest.DockerV2ListMediaType: + if err := json.Unmarshal(manifestBytes, &list.docker); err != nil { + return nil, errors.Wrapf(err, "unable to parse Docker manifest list from image") + } + for _, m := range list.docker.Manifests { + list.oci.Manifests = append(list.oci.Manifests, v1.Descriptor{ + MediaType: m.Schema2Descriptor.MediaType, + Size: m.Schema2Descriptor.Size, + Digest: m.Schema2Descriptor.Digest, + Platform: &v1.Platform{ + Architecture: m.Platform.Architecture, + OS: m.Platform.OS, + OSVersion: m.Platform.OSVersion, + OSFeatures: m.Platform.OSFeatures, + Variant: m.Platform.Variant, + }, + }) + } + case v1.MediaTypeImageIndex: + if err := json.Unmarshal(manifestBytes, &list.oci); err != nil { + return nil, errors.Wrapf(err, "unable to parse OCIv1 manifest list") + } + for _, m := range list.oci.Manifests { + platform := m.Platform + if platform == nil { + platform = &v1.Platform{} + } + list.docker.Manifests = append(list.docker.Manifests, manifest.Schema2ManifestDescriptor{ + Schema2Descriptor: manifest.Schema2Descriptor{ + MediaType: m.MediaType, + Size: m.Size, + Digest: m.Digest, + }, + Platform: manifest.Schema2PlatformSpec{ + Architecture: platform.Architecture, + OS: platform.OS, + OSVersion: platform.OSVersion, + OSFeatures: platform.OSFeatures, + Variant: platform.Variant, + }, + }) + } + } + return list, nil +} + +func (l *list) preferOCI() bool { + // If we have any data that's only in the OCI format, use that. + for _, m := range l.oci.Manifests { + if len(m.URLs) > 0 { + return true + } + if len(m.Annotations) > 0 { + return true + } + } + // If we have any data that's only in the Docker format, use that. + for _, m := range l.docker.Manifests { + if len(m.Platform.Features) > 0 { + return false + } + } + // If we have no manifests, remember that the Docker format is + // explicitly typed, so use that. Otherwise, default to using the OCI + // format. + return len(l.docker.Manifests) != 0 +} + +// Serialize encodes the list using the specified format, or by selecting one +// which it thinks is appropriate. +func (l *list) Serialize(mimeType string) ([]byte, error) { + var manifestBytes []byte + switch mimeType { + case "": + if l.preferOCI() { + manifest, err := json.Marshal(&l.oci) + if err != nil { + return nil, errors.Wrapf(err, "error marshalling OCI image index") + } + manifestBytes = manifest + } else { + manifest, err := json.Marshal(&l.docker) + if err != nil { + return nil, errors.Wrapf(err, "error marshalling Docker manifest list") + } + manifestBytes = manifest + } + case v1.MediaTypeImageIndex: + manifest, err := json.Marshal(&l.oci) + if err != nil { + return nil, errors.Wrapf(err, "error marshalling OCI image index") + } + manifestBytes = manifest + case manifest.DockerV2ListMediaType: + manifest, err := json.Marshal(&l.docker) + if err != nil { + return nil, errors.Wrapf(err, "error marshalling Docker manifest list") + } + manifestBytes = manifest + default: + return nil, errors.Wrapf(ErrManifestTypeNotSupported, "serializing list to type %q not implemented", mimeType) + } + return manifestBytes, nil +} + +// Instances returns the list of image instances mentioned in this list. +func (l *list) Instances() []digest.Digest { + instances := make([]digest.Digest, 0, len(l.oci.Manifests)) + for _, instance := range l.oci.Manifests { + instances = append(instances, instance.Digest) + } + return instances +} diff --git a/vendor/github.com/containers/common/pkg/parse/parse.go b/vendor/github.com/containers/common/pkg/parse/parse.go new file mode 100644 index 00000000000..5d826e80514 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/parse/parse.go @@ -0,0 +1,182 @@ +package parse + +// this package contains functions that parse and validate +// user input and is shared either amongst container engine subcommands + +import ( + "os" + "path" + "path/filepath" + "strings" + + "github.com/pkg/errors" +) + +// ValidateVolumeOpts validates a volume's options +func ValidateVolumeOpts(options []string) ([]string, error) { + var foundRootPropagation, foundRWRO, foundLabelChange, bindType, foundExec, foundDev, foundSuid, foundChown, foundUpperDir, foundWorkDir int + finalOpts := make([]string, 0, len(options)) + for _, opt := range options { + // support advanced options like upperdir=/path, workdir=/path + if strings.Contains(opt, "upperdir") { + foundUpperDir++ + if foundUpperDir > 1 { + return nil, errors.Errorf("invalid options %q, can only specify 1 upperdir per overlay", strings.Join(options, ", ")) + } + finalOpts = append(finalOpts, opt) + continue + } + if strings.Contains(opt, "workdir") { + foundWorkDir++ + if foundWorkDir > 1 { + return nil, errors.Errorf("invalid options %q, can only specify 1 workdir per overlay", strings.Join(options, ", ")) + } + finalOpts = append(finalOpts, opt) + continue + } + + switch opt { + case "noexec", "exec": + foundExec++ + if foundExec > 1 { + return nil, errors.Errorf("invalid options %q, can only specify 1 'noexec' or 'exec' option", strings.Join(options, ", ")) + } + case "nodev", "dev": + foundDev++ + if foundDev > 1 { + return nil, errors.Errorf("invalid options %q, can only specify 1 'nodev' or 'dev' option", strings.Join(options, ", ")) + } + case "nosuid", "suid": + foundSuid++ + if foundSuid > 1 { + return nil, errors.Errorf("invalid options %q, can only specify 1 'nosuid' or 'suid' option", strings.Join(options, ", ")) + } + case "rw", "ro": + foundRWRO++ + if foundRWRO > 1 { + return nil, errors.Errorf("invalid options %q, can only specify 1 'rw' or 'ro' option", strings.Join(options, ", ")) + } + case "z", "Z", "O": + foundLabelChange++ + if foundLabelChange > 1 { + return nil, errors.Errorf("invalid options %q, can only specify 1 'z', 'Z', or 'O' option", strings.Join(options, ", ")) + } + case "U": + foundChown++ + if foundChown > 1 { + return nil, errors.Errorf("invalid options %q, can only specify 1 'U' option", strings.Join(options, ", ")) + } + case "private", "rprivate", "shared", "rshared", "slave", "rslave", "unbindable", "runbindable": + foundRootPropagation++ + if foundRootPropagation > 1 { + return nil, errors.Errorf("invalid options %q, can only specify 1 '[r]shared', '[r]private', '[r]slave' or '[r]unbindable' option", strings.Join(options, ", ")) + } + case "bind", "rbind": + bindType++ + if bindType > 1 { + return nil, errors.Errorf("invalid options %q, can only specify 1 '[r]bind' option", strings.Join(options, ", ")) + } + case "cached", "delegated": + // The discarded ops are OS X specific volume options + // introduced in a recent Docker version. + // They have no meaning on Linux, so here we silently + // drop them. This matches Docker's behavior (the options + // are intended to be always safe to use, even not on OS + // X). + continue + case "idmap": + default: + return nil, errors.Errorf("invalid option type %q", opt) + } + finalOpts = append(finalOpts, opt) + } + return finalOpts, nil +} + +// Device parses device mapping string to a src, dest & permissions string +// Valid values for device looklike: +// '/dev/sdc" +// '/dev/sdc:/dev/xvdc" +// '/dev/sdc:/dev/xvdc:rwm" +// '/dev/sdc:rm" +func Device(device string) (src, dest, permissions string, err error) { + permissions = "rwm" + arr := strings.Split(device, ":") + switch len(arr) { + case 3: + if !isValidDeviceMode(arr[2]) { + return "", "", "", errors.Errorf("invalid device mode: %s", arr[2]) + } + permissions = arr[2] + fallthrough + case 2: + if isValidDeviceMode(arr[1]) { + permissions = arr[1] + } else { + if arr[1] == "" || arr[1][0] != '/' { + return "", "", "", errors.Errorf("invalid device mode: %s", arr[1]) + } + dest = arr[1] + } + fallthrough + case 1: + if len(arr[0]) > 0 { + src = arr[0] + break + } + fallthrough + default: + return "", "", "", errors.Errorf("invalid device specification: %s", device) + } + + if dest == "" { + dest = src + } + return src, dest, permissions, nil +} + +// isValidDeviceMode checks if the mode for device is valid or not. +// isValid mode is a composition of r (read), w (write), and m (mknod). +func isValidDeviceMode(mode string) bool { + var legalDeviceMode = map[rune]bool{ + 'r': true, + 'w': true, + 'm': true, + } + if mode == "" { + return false + } + for _, c := range mode { + if !legalDeviceMode[c] { + return false + } + legalDeviceMode[c] = false + } + return true +} + +// ValidateVolumeHostDir validates a volume mount's source directory +func ValidateVolumeHostDir(hostDir string) error { + if hostDir == "" { + return errors.New("host directory cannot be empty") + } + if filepath.IsAbs(hostDir) { + if _, err := os.Stat(hostDir); err != nil { + return err + } + } + // If hostDir is not an absolute path, that means the user wants to create a + // named volume. This will be done later on in the code. + return nil +} + +// ValidateVolumeCtrDir validates a volume mount's destination directory. +func ValidateVolumeCtrDir(ctrDir string) error { + if ctrDir == "" { + return errors.New("container directory cannot be empty") + } + if !path.IsAbs(ctrDir) { + return errors.Errorf("invalid container path %q, must be an absolute path", ctrDir) + } + return nil +} diff --git a/vendor/github.com/containers/common/pkg/parse/parse_unix.go b/vendor/github.com/containers/common/pkg/parse/parse_unix.go new file mode 100644 index 00000000000..ce4446a1b00 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/parse/parse_unix.go @@ -0,0 +1,50 @@ +// +build linux darwin + +package parse + +import ( + "os" + "path/filepath" + + "github.com/containers/storage/pkg/unshare" + "github.com/opencontainers/runc/libcontainer/devices" + "github.com/pkg/errors" +) + +func DeviceFromPath(device string) ([]devices.Device, error) { + var devs []devices.Device + src, dst, permissions, err := Device(device) + if err != nil { + return nil, err + } + if unshare.IsRootless() && src != dst { + return nil, errors.Errorf("Renaming device %s to %s is not supported in rootless containers", src, dst) + } + srcInfo, err := os.Stat(src) + if err != nil { + return nil, err + } + + if !srcInfo.IsDir() { + + dev, err := devices.DeviceFromPath(src, permissions) + if err != nil { + return nil, errors.Wrapf(err, "%s is not a valid device", src) + } + dev.Path = dst + devs = append(devs, *dev) + return devs, nil + } + + // If source device is a directory + srcDevices, err := devices.GetDevices(src) + if err != nil { + return nil, errors.Wrapf(err, "error getting source devices from directory %s", src) + } + for _, d := range srcDevices { + d.Path = filepath.Join(dst, filepath.Base(d.Path)) + d.Permissions = devices.Permissions(permissions) + devs = append(devs, *d) + } + return devs, nil +} diff --git a/vendor/github.com/containers/common/pkg/retry/retry.go b/vendor/github.com/containers/common/pkg/retry/retry.go new file mode 100644 index 00000000000..a9573e4e8a8 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/retry/retry.go @@ -0,0 +1,108 @@ +package retry + +import ( + "context" + "io" + "math" + "net" + "net/url" + "syscall" + "time" + + "github.com/docker/distribution/registry/api/errcode" + errcodev2 "github.com/docker/distribution/registry/api/v2" + "github.com/hashicorp/go-multierror" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// RetryOptions defines the option to retry +type RetryOptions struct { + MaxRetry int // The number of times to possibly retry + Delay time.Duration // The delay to use between retries, if set +} + +// RetryIfNecessary retries the operation in exponential backoff with the retryOptions +func RetryIfNecessary(ctx context.Context, operation func() error, retryOptions *RetryOptions) error { + err := operation() + for attempt := 0; err != nil && isRetryable(err) && attempt < retryOptions.MaxRetry; attempt++ { + delay := time.Duration(int(math.Pow(2, float64(attempt)))) * time.Second + if retryOptions.Delay != 0 { + delay = retryOptions.Delay + } + logrus.Warnf("Failed, retrying in %s ... (%d/%d). Error: %v", delay, attempt+1, retryOptions.MaxRetry, err) + select { + case <-time.After(delay): + break + case <-ctx.Done(): + return err + } + err = operation() + } + return err +} + +func isRetryable(err error) bool { + err = errors.Cause(err) + + switch err { + case nil: + return false + case context.Canceled, context.DeadlineExceeded: + return false + default: // continue + } + + type unwrapper interface { + Unwrap() error + } + + switch e := err.(type) { + + case errcode.Error: + switch e.Code { + case errcode.ErrorCodeUnauthorized, errcode.ErrorCodeDenied, + errcodev2.ErrorCodeNameUnknown, errcodev2.ErrorCodeManifestUnknown: + return false + } + return true + case *net.OpError: + return isRetryable(e.Err) + case *url.Error: // This includes errors returned by the net/http client. + if e.Err == io.EOF { // Happens when a server accepts a HTTP connection and sends EOF + return true + } + return isRetryable(e.Err) + case syscall.Errno: + return isErrnoRetryable(e) + case errcode.Errors: + // if this error is a group of errors, process them all in turn + for i := range e { + if !isRetryable(e[i]) { + return false + } + } + return true + case *multierror.Error: + // if this error is a group of errors, process them all in turn + for i := range e.Errors { + if !isRetryable(e.Errors[i]) { + return false + } + } + return true + case unwrapper: // Test this last, because various error types might implement .Unwrap() + err = e.Unwrap() + return isRetryable(err) + } + + return false +} + +func isErrnoRetryable(e error) bool { + switch e { + case syscall.ECONNREFUSED, syscall.EINTR, syscall.EAGAIN, syscall.EBUSY, syscall.ENETDOWN, syscall.ENETUNREACH, syscall.ENETRESET, syscall.ECONNABORTED, syscall.ECONNRESET, syscall.ETIMEDOUT, syscall.EHOSTDOWN, syscall.EHOSTUNREACH: + return true + } + return isErrnoERESTART(e) +} diff --git a/vendor/github.com/containers/common/pkg/retry/retry_linux.go b/vendor/github.com/containers/common/pkg/retry/retry_linux.go new file mode 100644 index 00000000000..b7942f7f4e0 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/retry/retry_linux.go @@ -0,0 +1,9 @@ +package retry + +import ( + "syscall" +) + +func isErrnoERESTART(e error) bool { + return e == syscall.ERESTART +} diff --git a/vendor/github.com/containers/common/pkg/retry/retry_unsupported.go b/vendor/github.com/containers/common/pkg/retry/retry_unsupported.go new file mode 100644 index 00000000000..6769809755b --- /dev/null +++ b/vendor/github.com/containers/common/pkg/retry/retry_unsupported.go @@ -0,0 +1,7 @@ +// +build !linux + +package retry + +func isErrnoERESTART(e error) bool { + return false +} diff --git a/vendor/github.com/containers/common/pkg/seccomp/conversion.go b/vendor/github.com/containers/common/pkg/seccomp/conversion.go new file mode 100644 index 00000000000..4c25cb1b1cc --- /dev/null +++ b/vendor/github.com/containers/common/pkg/seccomp/conversion.go @@ -0,0 +1,197 @@ +// NOTE: this package has originally been copied from +// github.com/opencontainers/runc and modified to work for other use cases + +package seccomp + +import ( + "fmt" + + "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" +) + +var ( + goArchToSeccompArchMap = map[string]Arch{ + "386": ArchX86, + "amd64": ArchX86_64, + "amd64p32": ArchX32, + "arm": ArchARM, + "arm64": ArchAARCH64, + "mips": ArchMIPS, + "mips64": ArchMIPS64, + "mips64le": ArchMIPSEL64, + "mips64p32": ArchMIPS64N32, + "mips64p32le": ArchMIPSEL64N32, + "mipsle": ArchMIPSEL, + "ppc": ArchPPC, + "ppc64": ArchPPC64, + "ppc64le": ArchPPC64LE, + "s390": ArchS390, + "s390x": ArchS390X, + } + specArchToLibseccompArchMap = map[specs.Arch]string{ + specs.ArchX86: "x86", + specs.ArchX86_64: "amd64", + specs.ArchX32: "x32", + specs.ArchARM: "arm", + specs.ArchAARCH64: "arm64", + specs.ArchMIPS: "mips", + specs.ArchMIPS64: "mips64", + specs.ArchMIPS64N32: "mips64n32", + specs.ArchMIPSEL: "mipsel", + specs.ArchMIPSEL64: "mipsel64", + specs.ArchMIPSEL64N32: "mipsel64n32", + specs.ArchPPC: "ppc", + specs.ArchPPC64: "ppc64", + specs.ArchPPC64LE: "ppc64le", + specs.ArchS390: "s390", + specs.ArchS390X: "s390x", + } + specArchToSeccompArchMap = map[specs.Arch]Arch{ + specs.ArchX86: ArchX86, + specs.ArchX86_64: ArchX86_64, + specs.ArchX32: ArchX32, + specs.ArchARM: ArchARM, + specs.ArchAARCH64: ArchAARCH64, + specs.ArchMIPS: ArchMIPS, + specs.ArchMIPS64: ArchMIPS64, + specs.ArchMIPS64N32: ArchMIPS64N32, + specs.ArchMIPSEL: ArchMIPSEL, + specs.ArchMIPSEL64: ArchMIPSEL64, + specs.ArchMIPSEL64N32: ArchMIPSEL64N32, + specs.ArchPPC: ArchPPC, + specs.ArchPPC64: ArchPPC64, + specs.ArchPPC64LE: ArchPPC64LE, + specs.ArchS390: ArchS390, + specs.ArchS390X: ArchS390X, + } + specActionToSeccompActionMap = map[specs.LinuxSeccompAction]Action{ + specs.ActKill: ActKill, + // TODO: wait for this PR to get merged: + // https://github.com/opencontainers/runtime-spec/pull/1064 + // specs.ActKillProcess ActKillProcess, + // specs.ActKillThread ActKillThread, + specs.ActErrno: ActErrno, + specs.ActTrap: ActTrap, + specs.ActAllow: ActAllow, + specs.ActTrace: ActTrace, + specs.ActLog: ActLog, + } + specOperatorToSeccompOperatorMap = map[specs.LinuxSeccompOperator]Operator{ + specs.OpNotEqual: OpNotEqual, + specs.OpLessThan: OpLessThan, + specs.OpLessEqual: OpLessEqual, + specs.OpEqualTo: OpEqualTo, + specs.OpGreaterEqual: OpGreaterEqual, + specs.OpGreaterThan: OpGreaterThan, + specs.OpMaskedEqual: OpMaskedEqual, + } +) + +// GoArchToSeccompArch converts a runtime.GOARCH to a seccomp `Arch`. The +// function returns an error if the architecture conversion is not supported. +func GoArchToSeccompArch(goArch string) (Arch, error) { + arch, ok := goArchToSeccompArchMap[goArch] + if !ok { + return "", fmt.Errorf("unsupported go arch provided: %s", goArch) + } + return arch, nil +} + +// specToSeccomp converts a `LinuxSeccomp` spec into a `Seccomp` struct. +func specToSeccomp(spec *specs.LinuxSeccomp) (*Seccomp, error) { + res := &Seccomp{ + Syscalls: []*Syscall{}, + } + + for _, arch := range spec.Architectures { + newArch, err := specArchToSeccompArch(arch) + if err != nil { + return nil, errors.Wrap(err, "convert spec arch") + } + res.Architectures = append(res.Architectures, newArch) + } + + // Convert default action + newDefaultAction, err := specActionToSeccompAction(spec.DefaultAction) + if err != nil { + return nil, errors.Wrap(err, "convert default action") + } + res.DefaultAction = newDefaultAction + res.DefaultErrnoRet = spec.DefaultErrnoRet + + // Loop through all syscall blocks and convert them to the internal format + for _, call := range spec.Syscalls { + newAction, err := specActionToSeccompAction(call.Action) + if err != nil { + return nil, errors.Wrap(err, "convert action") + } + + for _, name := range call.Names { + newCall := Syscall{ + Name: name, + Action: newAction, + ErrnoRet: call.ErrnoRet, + Args: []*Arg{}, + } + + // Loop through all the arguments of the syscall and convert them + for _, arg := range call.Args { + newOp, err := specOperatorToSeccompOperator(arg.Op) + if err != nil { + return nil, errors.Wrap(err, "convert operator") + } + + newArg := Arg{ + Index: arg.Index, + Value: arg.Value, + ValueTwo: arg.ValueTwo, + Op: newOp, + } + + newCall.Args = append(newCall.Args, &newArg) + } + res.Syscalls = append(res.Syscalls, &newCall) + } + } + + return res, nil +} + +// specArchToLibseccompArch converts a spec arch into a libseccomp one. +func specArchToLibseccompArch(arch specs.Arch) (string, error) { + if res, ok := specArchToLibseccompArchMap[arch]; ok { + return res, nil + } + return "", errors.Errorf( + "architecture %q is not valid for libseccomp", arch, + ) +} + +// specArchToSeccompArch converts a spec arch into an internal one. +func specArchToSeccompArch(arch specs.Arch) (Arch, error) { + if res, ok := specArchToSeccompArchMap[arch]; ok { + return res, nil + } + return "", errors.Errorf("architecture %q is not valid", arch) +} + +// specActionToSeccompAction converts a spec action into a seccomp one. +func specActionToSeccompAction(action specs.LinuxSeccompAction) (Action, error) { + if res, ok := specActionToSeccompActionMap[action]; ok { + return res, nil + } + return "", errors.Errorf( + "spec action %q is not valid internal action", action, + ) +} + +// specOperatorToSeccompOperator converts a spec operator into a seccomp one. +func specOperatorToSeccompOperator(operator specs.LinuxSeccompOperator) (Operator, error) { + if op, ok := specOperatorToSeccompOperatorMap[operator]; ok { + return op, nil + } + return "", errors.Errorf( + "spec operator %q is not a valid internal operator", operator, + ) +} diff --git a/vendor/github.com/containers/common/pkg/seccomp/default_linux.go b/vendor/github.com/containers/common/pkg/seccomp/default_linux.go new file mode 100644 index 00000000000..d196384f033 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/seccomp/default_linux.go @@ -0,0 +1,886 @@ +// SPDX-License-Identifier: Apache-2.0 + +// Copyright 2013-2018 Docker, Inc. + +package seccomp + +import ( + "golang.org/x/sys/unix" +) + +func arches() []Architecture { + return []Architecture{ + { + Arch: ArchX86_64, + SubArches: []Arch{ArchX86, ArchX32}, + }, + { + Arch: ArchAARCH64, + SubArches: []Arch{ArchARM}, + }, + { + Arch: ArchMIPS64, + SubArches: []Arch{ArchMIPS, ArchMIPS64N32}, + }, + { + Arch: ArchMIPS64N32, + SubArches: []Arch{ArchMIPS, ArchMIPS64}, + }, + { + Arch: ArchMIPSEL64, + SubArches: []Arch{ArchMIPSEL, ArchMIPSEL64N32}, + }, + { + Arch: ArchMIPSEL64N32, + SubArches: []Arch{ArchMIPSEL, ArchMIPSEL64}, + }, + { + Arch: ArchS390X, + SubArches: []Arch{ArchS390}, + }, + } +} + +// DefaultProfile defines the allowlist for the default seccomp profile. +func DefaultProfile() *Seccomp { + einval := uint(unix.EINVAL) + enosys := uint(unix.ENOSYS) + eperm := uint(unix.EPERM) + + syscalls := []*Syscall{ + { + Names: []string{ + "bdflush", + "io_pgetevents", + "kexec_file_load", + "kexec_load", + "migrate_pages", + "move_pages", + "nfsservctl", + "nice", + "oldfstat", + "oldlstat", + "oldolduname", + "oldstat", + "olduname", + "pciconfig_iobase", + "pciconfig_read", + "pciconfig_write", + "sgetmask", + "ssetmask", + "swapcontext", + "swapoff", + "swapon", + "sysfs", + "uselib", + "userfaultfd", + "ustat", + "vm86", + "vm86old", + "vmsplice", + }, + Action: ActErrno, + Errno: "EPERM", + ErrnoRet: &eperm, + Args: []*Arg{}, + }, + { + Names: []string{ + "_llseek", + "_newselect", + "accept", + "accept4", + "access", + "adjtimex", + "alarm", + "bind", + "brk", + "capget", + "capset", + "chdir", + "chmod", + "chown", + "chown32", + "clock_adjtime", + "clock_adjtime64", + "clock_getres", + "clock_getres_time64", + "clock_gettime", + "clock_gettime64", + "clock_nanosleep", + "clock_nanosleep_time64", + "clone", + "clone3", + "close", + "close_range", + "connect", + "copy_file_range", + "creat", + "dup", + "dup2", + "dup3", + "epoll_create", + "epoll_create1", + "epoll_ctl", + "epoll_ctl_old", + "epoll_pwait", + "epoll_pwait2", + "epoll_wait", + "epoll_wait_old", + "eventfd", + "eventfd2", + "execve", + "execveat", + "exit", + "exit_group", + "faccessat", + "faccessat2", + "fadvise64", + "fadvise64_64", + "fallocate", + "fanotify_mark", + "fchdir", + "fchmod", + "fchmodat", + "fchown", + "fchown32", + "fchownat", + "fcntl", + "fcntl64", + "fdatasync", + "fgetxattr", + "flistxattr", + "flock", + "fork", + "fremovexattr", + "fsconfig", + "fsetxattr", + "fsmount", + "fsopen", + "fspick", + "fstat", + "fstat64", + "fstatat64", + "fstatfs", + "fstatfs64", + "fsync", + "ftruncate", + "ftruncate64", + "futex", + "futex_time64", + "futimesat", + "get_robust_list", + "get_thread_area", + "getcpu", + "getcwd", + "getdents", + "getdents64", + "getegid", + "getegid32", + "geteuid", + "geteuid32", + "getgid", + "getgid32", + "getgroups", + "getgroups32", + "getitimer", + "get_mempolicy", + "getpeername", + "getpgid", + "getpgrp", + "getpid", + "getppid", + "getpriority", + "getrandom", + "getresgid", + "getresgid32", + "getresuid", + "getresuid32", + "getrlimit", + "getrusage", + "getsid", + "getsockname", + "getsockopt", + "gettid", + "gettimeofday", + "getuid", + "getuid32", + "getxattr", + "inotify_add_watch", + "inotify_init", + "inotify_init1", + "inotify_rm_watch", + "io_cancel", + "io_destroy", + "io_getevents", + "io_setup", + "io_submit", + "ioctl", + "ioprio_get", + "ioprio_set", + "ipc", + "keyctl", + "kill", + "lchown", + "lchown32", + "lgetxattr", + "link", + "linkat", + "listen", + "listxattr", + "llistxattr", + "lremovexattr", + "lseek", + "lsetxattr", + "lstat", + "lstat64", + "madvise", + "mbind", + "memfd_create", + "memfd_secret", + "mincore", + "mkdir", + "mkdirat", + "mknod", + "mknodat", + "mlock", + "mlock2", + "mlockall", + "mmap", + "mmap2", + "mount", + "move_mount", + "mprotect", + "mq_getsetattr", + "mq_notify", + "mq_open", + "mq_timedreceive", + "mq_timedreceive_time64", + "mq_timedsend", + "mq_timedsend_time64", + "mq_unlink", + "mremap", + "msgctl", + "msgget", + "msgrcv", + "msgsnd", + "msync", + "munlock", + "munlockall", + "munmap", + "name_to_handle_at", + "nanosleep", + "newfstatat", + "open", + "openat", + "openat2", + "open_tree", + "pause", + "pidfd_getfd", + "pidfd_open", + "pidfd_send_signal", + "pipe", + "pipe2", + "pivot_root", + "pkey_alloc", + "pkey_free", + "pkey_mprotect", + "poll", + "ppoll", + "ppoll_time64", + "prctl", + "pread64", + "preadv", + "preadv2", + "prlimit64", + "pselect6", + "pselect6_time64", + "pwrite64", + "pwritev", + "pwritev2", + "read", + "readahead", + "readdir", + "readlink", + "readlinkat", + "readv", + "reboot", + "recv", + "recvfrom", + "recvmmsg", + "recvmmsg_time64", + "recvmsg", + "remap_file_pages", + "removexattr", + "rename", + "renameat", + "renameat2", + "restart_syscall", + "rmdir", + "rseq", + "rt_sigaction", + "rt_sigpending", + "rt_sigprocmask", + "rt_sigqueueinfo", + "rt_sigreturn", + "rt_sigsuspend", + "rt_sigtimedwait", + "rt_sigtimedwait_time64", + "rt_tgsigqueueinfo", + "sched_get_priority_max", + "sched_get_priority_min", + "sched_getaffinity", + "sched_getattr", + "sched_getparam", + "sched_getscheduler", + "sched_rr_get_interval", + "sched_rr_get_interval_time64", + "sched_setaffinity", + "sched_setattr", + "sched_setparam", + "sched_setscheduler", + "sched_yield", + "seccomp", + "select", + "semctl", + "semget", + "semop", + "semtimedop", + "semtimedop_time64", + "send", + "sendfile", + "sendfile64", + "sendmmsg", + "sendmsg", + "sendto", + "setns", + "set_mempolicy", + "set_robust_list", + "set_thread_area", + "set_tid_address", + "setfsgid", + "setfsgid32", + "setfsuid", + "setfsuid32", + "setgid", + "setgid32", + "setgroups", + "setgroups32", + "setitimer", + "setpgid", + "setpriority", + "setregid", + "setregid32", + "setresgid", + "setresgid32", + "setresuid", + "setresuid32", + "setreuid", + "setreuid32", + "setrlimit", + "setsid", + "setsockopt", + "setuid", + "setuid32", + "setxattr", + "shmat", + "shmctl", + "shmdt", + "shmget", + "shutdown", + "sigaltstack", + "signalfd", + "signalfd4", + "sigreturn", + "socketcall", + "socketpair", + "splice", + "stat", + "stat64", + "statfs", + "statfs64", + "statx", + "symlink", + "symlinkat", + "sync", + "sync_file_range", + "syncfs", + "sysinfo", + "syslog", + "tee", + "tgkill", + "time", + "timer_create", + "timer_delete", + "timer_getoverrun", + "timer_gettime", + "timer_gettime64", + "timer_settime", + "timer_settime64", + "timerfd_create", + "timerfd_gettime", + "timerfd_gettime64", + "timerfd_settime", + "timerfd_settime64", + "times", + "tkill", + "truncate", + "truncate64", + "ugetrlimit", + "umask", + "umount", + "umount2", + "uname", + "unlink", + "unlinkat", + "unshare", + "utime", + "utimensat", + "utimensat_time64", + "utimes", + "vfork", + "wait4", + "waitid", + "waitpid", + "write", + "writev", + }, + Action: ActAllow, + Args: []*Arg{}, + }, + { + Names: []string{"personality"}, + Action: ActAllow, + Args: []*Arg{ + { + Index: 0, + Value: 0x0, + Op: OpEqualTo, + }, + }, + }, + { + Names: []string{"personality"}, + Action: ActAllow, + Args: []*Arg{ + { + Index: 0, + Value: 0x0008, + Op: OpEqualTo, + }, + }, + }, + { + Names: []string{"personality"}, + Action: ActAllow, + Args: []*Arg{ + { + Index: 0, + Value: 0x20000, + Op: OpEqualTo, + }, + }, + }, + { + Names: []string{"personality"}, + Action: ActAllow, + Args: []*Arg{ + { + Index: 0, + Value: 0x20008, + Op: OpEqualTo, + }, + }, + }, + { + Names: []string{"personality"}, + Action: ActAllow, + Args: []*Arg{ + { + Index: 0, + Value: 0xffffffff, + Op: OpEqualTo, + }, + }, + }, + { + Names: []string{ + "sync_file_range2", + }, + Action: ActAllow, + Args: []*Arg{}, + Includes: Filter{ + Arches: []string{"ppc64le"}, + }, + }, + { + Names: []string{ + "arm_fadvise64_64", + "arm_sync_file_range", + "sync_file_range2", + "breakpoint", + "cacheflush", + "set_tls", + }, + Action: ActAllow, + Args: []*Arg{}, + Includes: Filter{ + Arches: []string{"arm", "arm64"}, + }, + }, + { + Names: []string{ + "arch_prctl", + }, + Action: ActAllow, + Args: []*Arg{}, + Includes: Filter{ + Arches: []string{"amd64", "x32"}, + }, + }, + { + Names: []string{ + "modify_ldt", + }, + Action: ActAllow, + Args: []*Arg{}, + Includes: Filter{ + Arches: []string{"amd64", "x32", "x86"}, + }, + }, + { + Names: []string{ + "s390_pci_mmio_read", + "s390_pci_mmio_write", + "s390_runtime_instr", + }, + Action: ActAllow, + Args: []*Arg{}, + Includes: Filter{ + Arches: []string{"s390", "s390x"}, + }, + }, + { + Names: []string{ + "open_by_handle_at", + }, + Action: ActAllow, + Args: []*Arg{}, + Includes: Filter{ + Caps: []string{"CAP_DAC_READ_SEARCH"}, + }, + }, + { + Names: []string{ + "open_by_handle_at", + }, + Action: ActErrno, + Errno: "EPERM", + ErrnoRet: &eperm, + Args: []*Arg{}, + Excludes: Filter{ + Caps: []string{"CAP_DAC_READ_SEARCH"}, + }, + }, + { + Names: []string{ + "bpf", + "fanotify_init", + "lookup_dcookie", + "perf_event_open", + "quotactl", + "setdomainname", + "sethostname", + "setns", + }, + Action: ActAllow, + Args: []*Arg{}, + Includes: Filter{ + Caps: []string{"CAP_SYS_ADMIN"}, + }, + }, + { + Names: []string{ + "bpf", + "fanotify_init", + "lookup_dcookie", + "perf_event_open", + "quotactl", + "setdomainname", + "sethostname", + "setns", + }, + Action: ActErrno, + Errno: "EPERM", + ErrnoRet: &eperm, + Args: []*Arg{}, + Excludes: Filter{ + Caps: []string{"CAP_SYS_ADMIN"}, + }, + }, + { + Names: []string{ + "chroot", + }, + Action: ActAllow, + Args: []*Arg{}, + Includes: Filter{ + Caps: []string{"CAP_SYS_CHROOT"}, + }, + }, + { + Names: []string{ + "chroot", + }, + Action: ActErrno, + Errno: "EPERM", + ErrnoRet: &eperm, + Args: []*Arg{}, + Excludes: Filter{ + Caps: []string{"CAP_SYS_CHROOT"}, + }, + }, + { + Names: []string{ + "delete_module", + "init_module", + "finit_module", + "query_module", + }, + Action: ActAllow, + Args: []*Arg{}, + Includes: Filter{ + Caps: []string{"CAP_SYS_MODULE"}, + }, + }, + { + Names: []string{ + "delete_module", + "init_module", + "finit_module", + "query_module", + }, + Action: ActErrno, + Errno: "EPERM", + ErrnoRet: &eperm, + Args: []*Arg{}, + Excludes: Filter{ + Caps: []string{"CAP_SYS_MODULE"}, + }, + }, + { + Names: []string{ + "acct", + }, + Action: ActAllow, + Args: []*Arg{}, + Includes: Filter{ + Caps: []string{"CAP_SYS_PACCT"}, + }, + }, + { + Names: []string{ + "acct", + }, + Action: ActErrno, + Errno: "EPERM", + ErrnoRet: &eperm, + Args: []*Arg{}, + Excludes: Filter{ + Caps: []string{"CAP_SYS_PACCT"}, + }, + }, + { + Names: []string{ + "kcmp", + "process_madvise", + "process_vm_readv", + "process_vm_writev", + "ptrace", + }, + Action: ActAllow, + Args: []*Arg{}, + Includes: Filter{ + Caps: []string{"CAP_SYS_PTRACE"}, + }, + }, + { + Names: []string{ + "kcmp", + "process_madvise", + "process_vm_readv", + "process_vm_writev", + "ptrace", + }, + Action: ActErrno, + Errno: "EPERM", + ErrnoRet: &eperm, + Args: []*Arg{}, + Excludes: Filter{ + Caps: []string{"CAP_SYS_PTRACE"}, + }, + }, + { + Names: []string{ + "iopl", + "ioperm", + }, + Action: ActAllow, + Args: []*Arg{}, + Includes: Filter{ + Caps: []string{"CAP_SYS_RAWIO"}, + }, + }, + { + Names: []string{ + "iopl", + "ioperm", + }, + Action: ActErrno, + Errno: "EPERM", + ErrnoRet: &eperm, + Args: []*Arg{}, + Excludes: Filter{ + Caps: []string{"CAP_SYS_RAWIO"}, + }, + }, + { + Names: []string{ + "settimeofday", + "stime", + "clock_settime", + "clock_settime64", + }, + Action: ActAllow, + Args: []*Arg{}, + Includes: Filter{ + Caps: []string{"CAP_SYS_TIME"}, + }, + }, + { + Names: []string{ + "settimeofday", + "stime", + "clock_settime", + "clock_settime64", + }, + Action: ActErrno, + Errno: "EPERM", + ErrnoRet: &eperm, + Args: []*Arg{}, + Excludes: Filter{ + Caps: []string{"CAP_SYS_TIME"}, + }, + }, + { + Names: []string{ + "vhangup", + }, + Action: ActAllow, + Args: []*Arg{}, + Includes: Filter{ + Caps: []string{"CAP_SYS_TTY_CONFIG"}, + }, + }, + { + Names: []string{ + "vhangup", + }, + Action: ActErrno, + Errno: "EPERM", + ErrnoRet: &eperm, + Args: []*Arg{}, + Excludes: Filter{ + Caps: []string{"CAP_SYS_TTY_CONFIG"}, + }, + }, + { + Names: []string{ + "socket", + }, + Action: ActErrno, + Errno: "EINVAL", + ErrnoRet: &einval, + Args: []*Arg{ + { + Index: 0, + Value: unix.AF_NETLINK, + Op: OpEqualTo, + }, + { + Index: 2, + Value: unix.NETLINK_AUDIT, + Op: OpEqualTo, + }, + }, + Excludes: Filter{ + Caps: []string{"CAP_AUDIT_WRITE"}, + }, + }, + { + Names: []string{ + "socket", + }, + Action: ActAllow, + Args: []*Arg{ + { + Index: 2, + Value: unix.NETLINK_AUDIT, + Op: OpNotEqual, + }, + }, + Excludes: Filter{ + Caps: []string{"CAP_AUDIT_WRITE"}, + }, + }, + { + Names: []string{ + "socket", + }, + Action: ActAllow, + Args: []*Arg{ + { + Index: 0, + Value: unix.AF_NETLINK, + Op: OpNotEqual, + }, + }, + Excludes: Filter{ + Caps: []string{"CAP_AUDIT_WRITE"}, + }, + }, + { + Names: []string{ + "socket", + }, + Action: ActAllow, + Args: []*Arg{ + { + Index: 2, + Value: unix.NETLINK_AUDIT, + Op: OpNotEqual, + }, + }, + Excludes: Filter{ + Caps: []string{"CAP_AUDIT_WRITE"}, + }, + }, + { + Names: []string{ + "socket", + }, + Action: ActAllow, + Includes: Filter{ + Caps: []string{"CAP_AUDIT_WRITE"}, + }, + }, + } + + return &Seccomp{ + DefaultAction: ActErrno, + DefaultErrno: "ENOSYS", + DefaultErrnoRet: &enosys, + ArchMap: arches(), + Syscalls: syscalls, + } +} diff --git a/vendor/github.com/containers/common/pkg/seccomp/errno_list.go b/vendor/github.com/containers/common/pkg/seccomp/errno_list.go new file mode 100644 index 00000000000..a1009012df3 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/seccomp/errno_list.go @@ -0,0 +1,93 @@ +// +build linux,seccomp + +package seccomp + +import ( + "golang.org/x/sys/unix" +) + +// Error table +var errnoArch = map[string]uint{ + "EPERM": uint(unix.EPERM), + "ENOENT": uint(unix.ENOENT), + "ESRCH": uint(unix.ESRCH), + "EIO": uint(unix.EIO), + "ENXIO": uint(unix.ENXIO), + "E2BIG": uint(unix.E2BIG), + "ENOEXEC": uint(unix.ENOEXEC), + "EBADF": uint(unix.EBADF), + "ECHILD": uint(unix.ECHILD), + "EDEADLK": uint(unix.EDEADLK), + "ENOMEM": uint(unix.ENOMEM), + "EACCES": uint(unix.EACCES), + "EFAULT": uint(unix.EFAULT), + "ENOTBLK": uint(unix.ENOTBLK), + "EBUSY": uint(unix.EBUSY), + "EEXIST": uint(unix.EEXIST), + "EXDEV": uint(unix.EXDEV), + "ENODEV": uint(unix.ENODEV), + "ENOTDIR": uint(unix.ENOTDIR), + "EISDIR": uint(unix.EISDIR), + "EINVAL": uint(unix.EINVAL), + "ENFILE": uint(unix.ENFILE), + "EMFILE": uint(unix.EMFILE), + "ENOTTY": uint(unix.ENOTTY), + "ETXTBSY": uint(unix.ETXTBSY), + "EFBIG": uint(unix.EFBIG), + "ENOSPC": uint(unix.ENOSPC), + "ESPIPE": uint(unix.ESPIPE), + "EROFS": uint(unix.EROFS), + "EMLINK": uint(unix.EMLINK), + "EPIPE": uint(unix.EPIPE), + "EDOM": uint(unix.EDOM), + "ERANGE": uint(unix.ERANGE), + "EAGAIN": uint(unix.EAGAIN), + "EINPROGRESS": uint(unix.EINPROGRESS), + "EALREADY": uint(unix.EALREADY), + "ENOTSOCK": uint(unix.ENOTSOCK), + "EDESTADDRREQ": uint(unix.EDESTADDRREQ), + "EMSGSIZE": uint(unix.EMSGSIZE), + "EPROTOTYPE": uint(unix.EPROTOTYPE), + "ENOPROTOOPT": uint(unix.ENOPROTOOPT), + "EPROTONOSUPPORT": uint(unix.EPROTONOSUPPORT), + "ESOCKTNOSUPPORT": uint(unix.ESOCKTNOSUPPORT), + "EOPNOTSUPP": uint(unix.EOPNOTSUPP), + "EPFNOSUPPORT": uint(unix.EPFNOSUPPORT), + "EAFNOSUPPORT": uint(unix.EAFNOSUPPORT), + "EADDRINUSE": uint(unix.EADDRINUSE), + "EADDRNOTAVAIL": uint(unix.EADDRNOTAVAIL), + "ENETDOWN": uint(unix.ENETDOWN), + "ENETUNREACH": uint(unix.ENETUNREACH), + "ENETRESET": uint(unix.ENETRESET), + "ECONNABORTED": uint(unix.ECONNABORTED), + "ECONNRESET": uint(unix.ECONNRESET), + "ENOBUFS": uint(unix.ENOBUFS), + "EISCONN": uint(unix.EISCONN), + "ENOTCONN": uint(unix.ENOTCONN), + "ESHUTDOWN": uint(unix.ESHUTDOWN), + "ETOOMANYREFS": uint(unix.ETOOMANYREFS), + "ETIMEDOUT": uint(unix.ETIMEDOUT), + "ECONNREFUSED": uint(unix.ECONNREFUSED), + "ELOOP": uint(unix.ELOOP), + "ENAMETOOLONG": uint(unix.ENAMETOOLONG), + "EHOSTDOWN": uint(unix.EHOSTDOWN), + "EHOSTUNREACH": uint(unix.EHOSTUNREACH), + "ENOTEMPTY": uint(unix.ENOTEMPTY), + "EUSERS": uint(unix.EUSERS), + "EDQUOT": uint(unix.EDQUOT), + "ESTALE": uint(unix.ESTALE), + "EREMOTE": uint(unix.EREMOTE), + "ENOLCK": uint(unix.ENOLCK), + "ENOSYS": uint(unix.ENOSYS), + "EILSEQ": uint(unix.EILSEQ), + "ENOMEDIUM": uint(unix.ENOMEDIUM), + "EMEDIUMTYPE": uint(unix.EMEDIUMTYPE), + "EOVERFLOW": uint(unix.EOVERFLOW), + "ECANCELED": uint(unix.ECANCELED), + "EIDRM": uint(unix.EIDRM), + "ENOMSG": uint(unix.ENOMSG), + "ENOTSUP": uint(unix.ENOTSUP), + "EBADMSG": uint(unix.EBADMSG), + "ENOTRECOVERABLE": uint(unix.ENOTRECOVERABLE), + "EOWNERDEAD": uint(unix.EOWNERDEAD), +} diff --git a/vendor/github.com/containers/common/pkg/seccomp/filter.go b/vendor/github.com/containers/common/pkg/seccomp/filter.go new file mode 100644 index 00000000000..90da99f0a45 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/seccomp/filter.go @@ -0,0 +1,237 @@ +// +build seccomp + +// NOTE: this package has originally been copied from +// github.com/opencontainers/runc and modified to work for other use cases + +package seccomp + +import ( + specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" + libseccomp "github.com/seccomp/libseccomp-golang" + "golang.org/x/sys/unix" +) + +// NOTE: this package has originally been copied from +// github.com/opencontainers/runc and modified to work for other use cases + +var ( + // ErrSpecNil is a possible return error from BuildFilter() and occurs if + // the provided spec is nil. + ErrSpecNil = errors.New("spec is nil") + + // ErrSpecEmpty is a possible return error from BuildFilter() and occurs if + // the provided spec has neither a DefaultAction nor any syscalls. + ErrSpecEmpty = errors.New("spec contains neither a default action nor any syscalls") +) + +// BuildFilter does a basic validation for the provided seccomp profile +// string and returns a filter for it. +func BuildFilter(spec *specs.LinuxSeccomp) (*libseccomp.ScmpFilter, error) { + // Sanity checking to allow consumers to act accordingly + if spec == nil { + return nil, ErrSpecNil + } + if spec.DefaultAction == "" && len(spec.Syscalls) == 0 { + return nil, ErrSpecEmpty + } + + profile, err := specToSeccomp(spec) + if err != nil { + return nil, errors.Wrap(err, "convert spec to seccomp profile") + } + + defaultAction, err := toAction(profile.DefaultAction, profile.DefaultErrnoRet) + if err != nil { + return nil, errors.Wrapf(err, "convert default action %s", profile.DefaultAction) + } + + filter, err := libseccomp.NewFilter(defaultAction) + if err != nil { + return nil, errors.Wrapf(err, "create filter for default action %s", defaultAction) + } + + // Add extra architectures + for _, arch := range spec.Architectures { + libseccompArch, err := specArchToLibseccompArch(arch) + if err != nil { + return nil, errors.Wrap(err, "convert spec arch") + } + + scmpArch, err := libseccomp.GetArchFromString(libseccompArch) + if err != nil { + return nil, errors.Wrapf(err, "validate Seccomp architecture %s", arch) + } + + if err := filter.AddArch(scmpArch); err != nil { + return nil, errors.Wrap(err, "add architecture to seccomp filter") + } + } + + // Unset no new privs bit + if err := filter.SetNoNewPrivsBit(false); err != nil { + return nil, errors.Wrap(err, "set no new privileges flag") + } + + // Add a rule for each syscall + for _, call := range profile.Syscalls { + if call == nil { + return nil, errors.New("encountered nil syscall while initializing seccomp") + } + + if err = matchSyscall(filter, call); err != nil { + return nil, errors.Wrap(err, "filter matches syscall") + } + } + + return filter, nil +} + +func matchSyscall(filter *libseccomp.ScmpFilter, call *Syscall) error { + if call == nil || filter == nil { + return errors.New("cannot use nil as syscall to block") + } + + if call.Name == "" { + return errors.New("empty string is not a valid syscall") + } + + // If we can't resolve the syscall, assume it's not supported on this kernel + // Ignore it, don't error out + callNum, err := libseccomp.GetSyscallFromName(call.Name) + if err != nil { + return nil + } + + // Convert the call's action to the libseccomp equivalent + callAct, err := toAction(call.Action, call.ErrnoRet) + if err != nil { + return errors.Wrapf(err, "convert action %s", call.Action) + } + + // Unconditional match - just add the rule + if len(call.Args) == 0 { + if err = filter.AddRule(callNum, callAct); err != nil { + return errors.Wrapf(err, "add seccomp filter rule for syscall %s", call.Name) + } + } else { + // Linux system calls can have at most 6 arguments + const syscallMaxArguments int = 6 + + // If two or more arguments have the same condition, + // Revert to old behavior, adding each condition as a separate rule + argCounts := make([]uint, syscallMaxArguments) + conditions := []libseccomp.ScmpCondition{} + + for _, cond := range call.Args { + newCond, err := toCondition(cond) + if err != nil { + return errors.Wrapf(err, "create seccomp syscall condition for syscall %s", call.Name) + } + + argCounts[cond.Index] += 1 + + conditions = append(conditions, newCond) + } + + hasMultipleArgs := false + for _, count := range argCounts { + if count > 1 { + hasMultipleArgs = true + break + } + } + + if hasMultipleArgs { + // Revert to old behavior + // Add each condition attached to a separate rule + for _, cond := range conditions { + condArr := []libseccomp.ScmpCondition{cond} + + if err = filter.AddRuleConditional(callNum, callAct, condArr); err != nil { + return errors.Wrapf(err, "add seccomp rule for syscall %s", call.Name) + } + } + } else if err = filter.AddRuleConditional(callNum, callAct, conditions); err != nil { + // No conditions share same argument + // Use new, proper behavior + return errors.Wrapf(err, "add seccomp rule for syscall %s", call.Name) + } + } + + return nil +} + +// toAction converts an internal `Action` type to a `libseccomp.ScmpAction` +// type. +func toAction(act Action, errnoRet *uint) (libseccomp.ScmpAction, error) { + switch act { + case ActKill: + return libseccomp.ActKill, nil + case ActKillProcess: + return libseccomp.ActKillProcess, nil + case ActErrno: + if errnoRet != nil { + return libseccomp.ActErrno.SetReturnCode(int16(*errnoRet)), nil + } + return libseccomp.ActErrno.SetReturnCode(int16(unix.EPERM)), nil + case ActTrap: + return libseccomp.ActTrap, nil + case ActAllow: + return libseccomp.ActAllow, nil + case ActTrace: + if errnoRet != nil { + return libseccomp.ActTrace.SetReturnCode(int16(*errnoRet)), nil + } + return libseccomp.ActTrace.SetReturnCode(int16(unix.EPERM)), nil + case ActLog: + return libseccomp.ActLog, nil + default: + return libseccomp.ActInvalid, errors.Errorf("invalid action %s", act) + } +} + +// toCondition converts an internal `Arg` type to a `libseccomp.ScmpCondition` +// type. +func toCondition(arg *Arg) (cond libseccomp.ScmpCondition, err error) { + if arg == nil { + return cond, errors.New("cannot convert nil to syscall condition") + } + + op, err := toCompareOp(arg.Op) + if err != nil { + return cond, errors.Wrap(err, "convert compare operator") + } + + condition, err := libseccomp.MakeCondition( + arg.Index, op, arg.Value, arg.ValueTwo, + ) + if err != nil { + return cond, errors.Wrap(err, "make condition") + } + + return condition, nil +} + +// toCompareOp converts an internal `Operator` type to a +// `libseccomp.ScmpCompareOp`. +func toCompareOp(op Operator) (libseccomp.ScmpCompareOp, error) { + switch op { + case OpEqualTo: + return libseccomp.CompareEqual, nil + case OpNotEqual: + return libseccomp.CompareNotEqual, nil + case OpGreaterThan: + return libseccomp.CompareGreater, nil + case OpGreaterEqual: + return libseccomp.CompareGreaterEqual, nil + case OpLessThan: + return libseccomp.CompareLess, nil + case OpLessEqual: + return libseccomp.CompareLessOrEqual, nil + case OpMaskedEqual: + return libseccomp.CompareMaskedEqual, nil + default: + return libseccomp.CompareInvalid, errors.Errorf("invalid operator %s", op) + } +} diff --git a/vendor/github.com/containers/common/pkg/seccomp/seccomp.json b/vendor/github.com/containers/common/pkg/seccomp/seccomp.json new file mode 100644 index 00000000000..9314eb3cc5e --- /dev/null +++ b/vendor/github.com/containers/common/pkg/seccomp/seccomp.json @@ -0,0 +1,1041 @@ +{ + "defaultAction": "SCMP_ACT_ERRNO", + "defaultErrnoRet": 38, + "defaultErrno": "ENOSYS", + "archMap": [ + { + "architecture": "SCMP_ARCH_X86_64", + "subArchitectures": [ + "SCMP_ARCH_X86", + "SCMP_ARCH_X32" + ] + }, + { + "architecture": "SCMP_ARCH_AARCH64", + "subArchitectures": [ + "SCMP_ARCH_ARM" + ] + }, + { + "architecture": "SCMP_ARCH_MIPS64", + "subArchitectures": [ + "SCMP_ARCH_MIPS", + "SCMP_ARCH_MIPS64N32" + ] + }, + { + "architecture": "SCMP_ARCH_MIPS64N32", + "subArchitectures": [ + "SCMP_ARCH_MIPS", + "SCMP_ARCH_MIPS64" + ] + }, + { + "architecture": "SCMP_ARCH_MIPSEL64", + "subArchitectures": [ + "SCMP_ARCH_MIPSEL", + "SCMP_ARCH_MIPSEL64N32" + ] + }, + { + "architecture": "SCMP_ARCH_MIPSEL64N32", + "subArchitectures": [ + "SCMP_ARCH_MIPSEL", + "SCMP_ARCH_MIPSEL64" + ] + }, + { + "architecture": "SCMP_ARCH_S390X", + "subArchitectures": [ + "SCMP_ARCH_S390" + ] + } + ], + "syscalls": [ + { + "names": [ + "bdflush", + "io_pgetevents", + "kexec_file_load", + "kexec_load", + "migrate_pages", + "move_pages", + "nfsservctl", + "nice", + "oldfstat", + "oldlstat", + "oldolduname", + "oldstat", + "olduname", + "pciconfig_iobase", + "pciconfig_read", + "pciconfig_write", + "sgetmask", + "ssetmask", + "swapcontext", + "swapoff", + "swapon", + "sysfs", + "uselib", + "userfaultfd", + "ustat", + "vm86", + "vm86old", + "vmsplice" + ], + "action": "SCMP_ACT_ERRNO", + "args": [], + "comment": "", + "includes": {}, + "excludes": {}, + "errnoRet": 1, + "errno": "EPERM" + }, + { + "names": [ + "_llseek", + "_newselect", + "accept", + "accept4", + "access", + "adjtimex", + "alarm", + "bind", + "brk", + "capget", + "capset", + "chdir", + "chmod", + "chown", + "chown32", + "clock_adjtime", + "clock_adjtime64", + "clock_getres", + "clock_getres_time64", + "clock_gettime", + "clock_gettime64", + "clock_nanosleep", + "clock_nanosleep_time64", + "clone", + "clone3", + "close", + "close_range", + "connect", + "copy_file_range", + "creat", + "dup", + "dup2", + "dup3", + "epoll_create", + "epoll_create1", + "epoll_ctl", + "epoll_ctl_old", + "epoll_pwait", + "epoll_pwait2", + "epoll_wait", + "epoll_wait_old", + "eventfd", + "eventfd2", + "execve", + "execveat", + "exit", + "exit_group", + "faccessat", + "faccessat2", + "fadvise64", + "fadvise64_64", + "fallocate", + "fanotify_mark", + "fchdir", + "fchmod", + "fchmodat", + "fchown", + "fchown32", + "fchownat", + "fcntl", + "fcntl64", + "fdatasync", + "fgetxattr", + "flistxattr", + "flock", + "fork", + "fremovexattr", + "fsconfig", + "fsetxattr", + "fsmount", + "fsopen", + "fspick", + "fstat", + "fstat64", + "fstatat64", + "fstatfs", + "fstatfs64", + "fsync", + "ftruncate", + "ftruncate64", + "futex", + "futex_time64", + "futimesat", + "get_robust_list", + "get_thread_area", + "getcpu", + "getcwd", + "getdents", + "getdents64", + "getegid", + "getegid32", + "geteuid", + "geteuid32", + "getgid", + "getgid32", + "getgroups", + "getgroups32", + "getitimer", + "get_mempolicy", + "getpeername", + "getpgid", + "getpgrp", + "getpid", + "getppid", + "getpriority", + "getrandom", + "getresgid", + "getresgid32", + "getresuid", + "getresuid32", + "getrlimit", + "getrusage", + "getsid", + "getsockname", + "getsockopt", + "gettid", + "gettimeofday", + "getuid", + "getuid32", + "getxattr", + "inotify_add_watch", + "inotify_init", + "inotify_init1", + "inotify_rm_watch", + "io_cancel", + "io_destroy", + "io_getevents", + "io_setup", + "io_submit", + "ioctl", + "ioprio_get", + "ioprio_set", + "ipc", + "keyctl", + "kill", + "lchown", + "lchown32", + "lgetxattr", + "link", + "linkat", + "listen", + "listxattr", + "llistxattr", + "lremovexattr", + "lseek", + "lsetxattr", + "lstat", + "lstat64", + "madvise", + "mbind", + "memfd_create", + "memfd_secret", + "mincore", + "mkdir", + "mkdirat", + "mknod", + "mknodat", + "mlock", + "mlock2", + "mlockall", + "mmap", + "mmap2", + "mount", + "move_mount", + "mprotect", + "mq_getsetattr", + "mq_notify", + "mq_open", + "mq_timedreceive", + "mq_timedreceive_time64", + "mq_timedsend", + "mq_timedsend_time64", + "mq_unlink", + "mremap", + "msgctl", + "msgget", + "msgrcv", + "msgsnd", + "msync", + "munlock", + "munlockall", + "munmap", + "name_to_handle_at", + "nanosleep", + "newfstatat", + "open", + "openat", + "openat2", + "open_tree", + "pause", + "pidfd_getfd", + "pidfd_open", + "pidfd_send_signal", + "pipe", + "pipe2", + "pivot_root", + "pkey_alloc", + "pkey_free", + "pkey_mprotect", + "poll", + "ppoll", + "ppoll_time64", + "prctl", + "pread64", + "preadv", + "preadv2", + "prlimit64", + "pselect6", + "pselect6_time64", + "pwrite64", + "pwritev", + "pwritev2", + "read", + "readahead", + "readdir", + "readlink", + "readlinkat", + "readv", + "reboot", + "recv", + "recvfrom", + "recvmmsg", + "recvmmsg_time64", + "recvmsg", + "remap_file_pages", + "removexattr", + "rename", + "renameat", + "renameat2", + "restart_syscall", + "rmdir", + "rseq", + "rt_sigaction", + "rt_sigpending", + "rt_sigprocmask", + "rt_sigqueueinfo", + "rt_sigreturn", + "rt_sigsuspend", + "rt_sigtimedwait", + "rt_sigtimedwait_time64", + "rt_tgsigqueueinfo", + "sched_get_priority_max", + "sched_get_priority_min", + "sched_getaffinity", + "sched_getattr", + "sched_getparam", + "sched_getscheduler", + "sched_rr_get_interval", + "sched_rr_get_interval_time64", + "sched_setaffinity", + "sched_setattr", + "sched_setparam", + "sched_setscheduler", + "sched_yield", + "seccomp", + "select", + "semctl", + "semget", + "semop", + "semtimedop", + "semtimedop_time64", + "send", + "sendfile", + "sendfile64", + "sendmmsg", + "sendmsg", + "sendto", + "setns", + "set_mempolicy", + "set_robust_list", + "set_thread_area", + "set_tid_address", + "setfsgid", + "setfsgid32", + "setfsuid", + "setfsuid32", + "setgid", + "setgid32", + "setgroups", + "setgroups32", + "setitimer", + "setpgid", + "setpriority", + "setregid", + "setregid32", + "setresgid", + "setresgid32", + "setresuid", + "setresuid32", + "setreuid", + "setreuid32", + "setrlimit", + "setsid", + "setsockopt", + "setuid", + "setuid32", + "setxattr", + "shmat", + "shmctl", + "shmdt", + "shmget", + "shutdown", + "sigaltstack", + "signalfd", + "signalfd4", + "sigreturn", + "socketcall", + "socketpair", + "splice", + "stat", + "stat64", + "statfs", + "statfs64", + "statx", + "symlink", + "symlinkat", + "sync", + "sync_file_range", + "syncfs", + "sysinfo", + "syslog", + "tee", + "tgkill", + "time", + "timer_create", + "timer_delete", + "timer_getoverrun", + "timer_gettime", + "timer_gettime64", + "timer_settime", + "timer_settime64", + "timerfd_create", + "timerfd_gettime", + "timerfd_gettime64", + "timerfd_settime", + "timerfd_settime64", + "times", + "tkill", + "truncate", + "truncate64", + "ugetrlimit", + "umask", + "umount", + "umount2", + "uname", + "unlink", + "unlinkat", + "unshare", + "utime", + "utimensat", + "utimensat_time64", + "utimes", + "vfork", + "wait4", + "waitid", + "waitpid", + "write", + "writev" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": {}, + "excludes": {} + }, + { + "names": [ + "personality" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 0, + "valueTwo": 0, + "op": "SCMP_CMP_EQ" + } + ], + "comment": "", + "includes": {}, + "excludes": {} + }, + { + "names": [ + "personality" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 8, + "valueTwo": 0, + "op": "SCMP_CMP_EQ" + } + ], + "comment": "", + "includes": {}, + "excludes": {} + }, + { + "names": [ + "personality" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 131072, + "valueTwo": 0, + "op": "SCMP_CMP_EQ" + } + ], + "comment": "", + "includes": {}, + "excludes": {} + }, + { + "names": [ + "personality" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 131080, + "valueTwo": 0, + "op": "SCMP_CMP_EQ" + } + ], + "comment": "", + "includes": {}, + "excludes": {} + }, + { + "names": [ + "personality" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 4294967295, + "valueTwo": 0, + "op": "SCMP_CMP_EQ" + } + ], + "comment": "", + "includes": {}, + "excludes": {} + }, + { + "names": [ + "sync_file_range2" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "arches": [ + "ppc64le" + ] + }, + "excludes": {} + }, + { + "names": [ + "arm_fadvise64_64", + "arm_sync_file_range", + "sync_file_range2", + "breakpoint", + "cacheflush", + "set_tls" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "arches": [ + "arm", + "arm64" + ] + }, + "excludes": {} + }, + { + "names": [ + "arch_prctl" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "arches": [ + "amd64", + "x32" + ] + }, + "excludes": {} + }, + { + "names": [ + "modify_ldt" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "arches": [ + "amd64", + "x32", + "x86" + ] + }, + "excludes": {} + }, + { + "names": [ + "s390_pci_mmio_read", + "s390_pci_mmio_write", + "s390_runtime_instr" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "arches": [ + "s390", + "s390x" + ] + }, + "excludes": {} + }, + { + "names": [ + "open_by_handle_at" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "caps": [ + "CAP_DAC_READ_SEARCH" + ] + }, + "excludes": {} + }, + { + "names": [ + "open_by_handle_at" + ], + "action": "SCMP_ACT_ERRNO", + "args": [], + "comment": "", + "includes": {}, + "excludes": { + "caps": [ + "CAP_DAC_READ_SEARCH" + ] + }, + "errnoRet": 1, + "errno": "EPERM" + }, + { + "names": [ + "bpf", + "fanotify_init", + "lookup_dcookie", + "perf_event_open", + "quotactl", + "setdomainname", + "sethostname", + "setns" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "caps": [ + "CAP_SYS_ADMIN" + ] + }, + "excludes": {} + }, + { + "names": [ + "bpf", + "fanotify_init", + "lookup_dcookie", + "perf_event_open", + "quotactl", + "setdomainname", + "sethostname", + "setns" + ], + "action": "SCMP_ACT_ERRNO", + "args": [], + "comment": "", + "includes": {}, + "excludes": { + "caps": [ + "CAP_SYS_ADMIN" + ] + }, + "errnoRet": 1, + "errno": "EPERM" + }, + { + "names": [ + "chroot" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "caps": [ + "CAP_SYS_CHROOT" + ] + }, + "excludes": {} + }, + { + "names": [ + "chroot" + ], + "action": "SCMP_ACT_ERRNO", + "args": [], + "comment": "", + "includes": {}, + "excludes": { + "caps": [ + "CAP_SYS_CHROOT" + ] + }, + "errnoRet": 1, + "errno": "EPERM" + }, + { + "names": [ + "delete_module", + "init_module", + "finit_module", + "query_module" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "caps": [ + "CAP_SYS_MODULE" + ] + }, + "excludes": {} + }, + { + "names": [ + "delete_module", + "init_module", + "finit_module", + "query_module" + ], + "action": "SCMP_ACT_ERRNO", + "args": [], + "comment": "", + "includes": {}, + "excludes": { + "caps": [ + "CAP_SYS_MODULE" + ] + }, + "errnoRet": 1, + "errno": "EPERM" + }, + { + "names": [ + "acct" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "caps": [ + "CAP_SYS_PACCT" + ] + }, + "excludes": {} + }, + { + "names": [ + "acct" + ], + "action": "SCMP_ACT_ERRNO", + "args": [], + "comment": "", + "includes": {}, + "excludes": { + "caps": [ + "CAP_SYS_PACCT" + ] + }, + "errnoRet": 1, + "errno": "EPERM" + }, + { + "names": [ + "kcmp", + "process_madvise", + "process_vm_readv", + "process_vm_writev", + "ptrace" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "caps": [ + "CAP_SYS_PTRACE" + ] + }, + "excludes": {} + }, + { + "names": [ + "kcmp", + "process_madvise", + "process_vm_readv", + "process_vm_writev", + "ptrace" + ], + "action": "SCMP_ACT_ERRNO", + "args": [], + "comment": "", + "includes": {}, + "excludes": { + "caps": [ + "CAP_SYS_PTRACE" + ] + }, + "errnoRet": 1, + "errno": "EPERM" + }, + { + "names": [ + "iopl", + "ioperm" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "caps": [ + "CAP_SYS_RAWIO" + ] + }, + "excludes": {} + }, + { + "names": [ + "iopl", + "ioperm" + ], + "action": "SCMP_ACT_ERRNO", + "args": [], + "comment": "", + "includes": {}, + "excludes": { + "caps": [ + "CAP_SYS_RAWIO" + ] + }, + "errnoRet": 1, + "errno": "EPERM" + }, + { + "names": [ + "settimeofday", + "stime", + "clock_settime", + "clock_settime64" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "caps": [ + "CAP_SYS_TIME" + ] + }, + "excludes": {} + }, + { + "names": [ + "settimeofday", + "stime", + "clock_settime", + "clock_settime64" + ], + "action": "SCMP_ACT_ERRNO", + "args": [], + "comment": "", + "includes": {}, + "excludes": { + "caps": [ + "CAP_SYS_TIME" + ] + }, + "errnoRet": 1, + "errno": "EPERM" + }, + { + "names": [ + "vhangup" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "caps": [ + "CAP_SYS_TTY_CONFIG" + ] + }, + "excludes": {} + }, + { + "names": [ + "vhangup" + ], + "action": "SCMP_ACT_ERRNO", + "args": [], + "comment": "", + "includes": {}, + "excludes": { + "caps": [ + "CAP_SYS_TTY_CONFIG" + ] + }, + "errnoRet": 1, + "errno": "EPERM" + }, + { + "names": [ + "socket" + ], + "action": "SCMP_ACT_ERRNO", + "args": [ + { + "index": 0, + "value": 16, + "valueTwo": 0, + "op": "SCMP_CMP_EQ" + }, + { + "index": 2, + "value": 9, + "valueTwo": 0, + "op": "SCMP_CMP_EQ" + } + ], + "comment": "", + "includes": {}, + "excludes": { + "caps": [ + "CAP_AUDIT_WRITE" + ] + }, + "errnoRet": 22, + "errno": "EINVAL" + }, + { + "names": [ + "socket" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 2, + "value": 9, + "valueTwo": 0, + "op": "SCMP_CMP_NE" + } + ], + "comment": "", + "includes": {}, + "excludes": { + "caps": [ + "CAP_AUDIT_WRITE" + ] + } + }, + { + "names": [ + "socket" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 16, + "valueTwo": 0, + "op": "SCMP_CMP_NE" + } + ], + "comment": "", + "includes": {}, + "excludes": { + "caps": [ + "CAP_AUDIT_WRITE" + ] + } + }, + { + "names": [ + "socket" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 2, + "value": 9, + "valueTwo": 0, + "op": "SCMP_CMP_NE" + } + ], + "comment": "", + "includes": {}, + "excludes": { + "caps": [ + "CAP_AUDIT_WRITE" + ] + } + }, + { + "names": [ + "socket" + ], + "action": "SCMP_ACT_ALLOW", + "args": null, + "comment": "", + "includes": { + "caps": [ + "CAP_AUDIT_WRITE" + ] + }, + "excludes": {} + } + ] +} \ No newline at end of file diff --git a/vendor/github.com/containers/common/pkg/seccomp/seccomp_linux.go b/vendor/github.com/containers/common/pkg/seccomp/seccomp_linux.go new file mode 100644 index 00000000000..0c022ac7ab9 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/seccomp/seccomp_linux.go @@ -0,0 +1,218 @@ +// +build seccomp + +// SPDX-License-Identifier: Apache-2.0 + +// Copyright 2013-2018 Docker, Inc. + +package seccomp + +import ( + "encoding/json" + "errors" + "fmt" + "strconv" + + "github.com/opencontainers/runtime-spec/specs-go" + libseccomp "github.com/seccomp/libseccomp-golang" +) + +//go:generate go run -tags 'seccomp' generate.go + +// GetDefaultProfile returns the default seccomp profile. +func GetDefaultProfile(rs *specs.Spec) (*specs.LinuxSeccomp, error) { + return setupSeccomp(DefaultProfile(), rs) +} + +// LoadProfile takes a json string and decodes the seccomp profile. +func LoadProfile(body string, rs *specs.Spec) (*specs.LinuxSeccomp, error) { + var config Seccomp + if err := json.Unmarshal([]byte(body), &config); err != nil { + return nil, fmt.Errorf("decoding seccomp profile failed: %v", err) + } + return setupSeccomp(&config, rs) +} + +// LoadProfileFromBytes takes a byte slice and decodes the seccomp profile. +func LoadProfileFromBytes(body []byte, rs *specs.Spec) (*specs.LinuxSeccomp, error) { + config := &Seccomp{} + if err := json.Unmarshal(body, config); err != nil { + return nil, fmt.Errorf("decoding seccomp profile failed: %v", err) + } + return setupSeccomp(config, rs) +} + +// LoadProfileFromConfig takes a Seccomp struct and a spec to retrieve a LinuxSeccomp +func LoadProfileFromConfig(config *Seccomp, specgen *specs.Spec) (*specs.LinuxSeccomp, error) { + return setupSeccomp(config, specgen) +} + +var nativeToSeccomp = map[string]Arch{ + "amd64": ArchX86_64, + "arm64": ArchAARCH64, + "mips64": ArchMIPS64, + "mips64n32": ArchMIPS64N32, + "mipsel64": ArchMIPSEL64, + "mipsel64n32": ArchMIPSEL64N32, + "s390x": ArchS390X, +} + +// inSlice tests whether a string is contained in a slice of strings or not. +// Comparison is case sensitive +func inSlice(slice []string, s string) bool { + for _, ss := range slice { + if s == ss { + return true + } + } + return false +} + +func getArchitectures(config *Seccomp, newConfig *specs.LinuxSeccomp) error { + if len(config.Architectures) != 0 && len(config.ArchMap) != 0 { + return errors.New("'architectures' and 'archMap' were specified in the seccomp profile, use either 'architectures' or 'archMap'") + } + + // if config.Architectures == 0 then libseccomp will figure out the architecture to use + if len(config.Architectures) != 0 { + for _, a := range config.Architectures { + newConfig.Architectures = append(newConfig.Architectures, specs.Arch(a)) + } + } + return nil +} + +func getErrno(errno string, def *uint) (*uint, error) { + if errno == "" { + return def, nil + } + v, err := strconv.ParseUint(errno, 10, 32) + if err == nil { + v2 := uint(v) + return &v2, nil + } + + v2, found := errnoArch[errno] + if !found { + return nil, fmt.Errorf("unknown errno %s", errno) + } + return &v2, nil +} + +func setupSeccomp(config *Seccomp, rs *specs.Spec) (*specs.LinuxSeccomp, error) { + if config == nil { + return nil, nil + } + + // No default action specified, no syscalls listed, assume seccomp disabled + if config.DefaultAction == "" && len(config.Syscalls) == 0 { + return nil, nil + } + + newConfig := &specs.LinuxSeccomp{} + + var arch string + var native, err = libseccomp.GetNativeArch() + if err == nil { + arch = native.String() + } + + if err := getArchitectures(config, newConfig); err != nil { + return nil, err + } + + if len(config.ArchMap) != 0 { + for _, a := range config.ArchMap { + seccompArch, ok := nativeToSeccomp[arch] + if ok { + if a.Arch == seccompArch { + newConfig.Architectures = append(newConfig.Architectures, specs.Arch(a.Arch)) + for _, sa := range a.SubArches { + newConfig.Architectures = append(newConfig.Architectures, specs.Arch(sa)) + } + break + } + } + } + } + + newConfig.DefaultAction = specs.LinuxSeccompAction(config.DefaultAction) + + newConfig.DefaultErrnoRet, err = getErrno(config.DefaultErrno, config.DefaultErrnoRet) + if err != nil { + return nil, err + } + +Loop: + // Loop through all syscall blocks and convert them to libcontainer format after filtering them + for _, call := range config.Syscalls { + if len(call.Excludes.Arches) > 0 { + if inSlice(call.Excludes.Arches, arch) { + continue Loop + } + } + if len(call.Excludes.Caps) > 0 { + for _, c := range call.Excludes.Caps { + if rs != nil && rs.Process != nil && rs.Process.Capabilities != nil && inSlice(rs.Process.Capabilities.Bounding, c) { + continue Loop + } + } + } + if len(call.Includes.Arches) > 0 { + if !inSlice(call.Includes.Arches, arch) { + continue Loop + } + } + if len(call.Includes.Caps) > 0 { + for _, c := range call.Includes.Caps { + if rs != nil && rs.Process != nil && rs.Process.Capabilities != nil && !inSlice(rs.Process.Capabilities.Bounding, c) { + continue Loop + } + } + } + + if call.Name != "" && len(call.Names) != 0 { + return nil, errors.New("'name' and 'names' were specified in the seccomp profile, use either 'name' or 'names'") + } + + errno, err := getErrno(call.Errno, call.ErrnoRet) + if err != nil { + return nil, err + } + + if call.Name != "" { + newConfig.Syscalls = append(newConfig.Syscalls, createSpecsSyscall([]string{call.Name}, call.Action, call.Args, errno)) + } + + if len(call.Names) > 0 { + newConfig.Syscalls = append(newConfig.Syscalls, createSpecsSyscall(call.Names, call.Action, call.Args, errno)) + } + } + + return newConfig, nil +} + +func createSpecsSyscall(names []string, action Action, args []*Arg, errnoRet *uint) specs.LinuxSyscall { + newCall := specs.LinuxSyscall{ + Names: names, + Action: specs.LinuxSeccompAction(action), + ErrnoRet: errnoRet, + } + + // Loop through all the arguments of the syscall and convert them + for _, arg := range args { + newArg := specs.LinuxSeccompArg{ + Index: arg.Index, + Value: arg.Value, + ValueTwo: arg.ValueTwo, + Op: specs.LinuxSeccompOperator(arg.Op), + } + + newCall.Args = append(newCall.Args, newArg) + } + return newCall +} + +// IsEnabled returns true if seccomp is enabled for the host. +func IsEnabled() bool { + return IsSupported() +} diff --git a/vendor/github.com/containers/common/pkg/seccomp/seccomp_unsupported.go b/vendor/github.com/containers/common/pkg/seccomp/seccomp_unsupported.go new file mode 100644 index 00000000000..8b23ee2c04d --- /dev/null +++ b/vendor/github.com/containers/common/pkg/seccomp/seccomp_unsupported.go @@ -0,0 +1,46 @@ +// +build !linux !seccomp + +// SPDX-License-Identifier: Apache-2.0 + +// Copyright 2013-2018 Docker, Inc. + +package seccomp + +import ( + "errors" + + "github.com/opencontainers/runtime-spec/specs-go" +) + +var errNotSupported = errors.New("seccomp not enabled in this build") + +// LoadProfile returns an error on unsuppored systems +func LoadProfile(body string, rs *specs.Spec) (*specs.LinuxSeccomp, error) { + return nil, errNotSupported +} + +// GetDefaultProfile returns an error on unsuppored systems +func GetDefaultProfile(rs *specs.Spec) (*specs.LinuxSeccomp, error) { + return nil, errNotSupported +} + +// LoadProfileFromBytes takes a byte slice and decodes the seccomp profile. +func LoadProfileFromBytes(body []byte, rs *specs.Spec) (*specs.LinuxSeccomp, error) { + return nil, errNotSupported +} + +// LoadProfileFromConfig takes a Seccomp struct and a spec to retrieve a LinuxSeccomp +func LoadProfileFromConfig(config *Seccomp, specgen *specs.Spec) (*specs.LinuxSeccomp, error) { + return nil, errNotSupported +} + +// IsEnabled returns true if seccomp is enabled for the host. +func IsEnabled() bool { + return false +} + +// IsSupported returns true if the system has been configured to support +// seccomp. +func IsSupported() bool { + return false +} diff --git a/vendor/github.com/containers/common/pkg/seccomp/supported.go b/vendor/github.com/containers/common/pkg/seccomp/supported.go new file mode 100644 index 00000000000..86e1b66bbe3 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/seccomp/supported.go @@ -0,0 +1,49 @@ +// +build linux,seccomp + +package seccomp + +import ( + "sync" + + "golang.org/x/sys/unix" +) + +var ( + supported bool + supOnce sync.Once +) + +// IsSupported returns true if the system has been configured to support +// seccomp (including the check for CONFIG_SECCOMP_FILTER kernel option). +func IsSupported() bool { + // Excerpts from prctl(2), section ERRORS: + // + // EACCES + // option is PR_SET_SECCOMP and arg2 is SECCOMP_MODE_FILTER, but + // the process does not have the CAP_SYS_ADMIN capability or has + // not set the no_new_privs attribute <...>. + // <...> + // EFAULT + // option is PR_SET_SECCOMP, arg2 is SECCOMP_MODE_FILTER, the + // system was built with CONFIG_SECCOMP_FILTER, and arg3 is an + // invalid address. + // <...> + // EINVAL + // option is PR_SET_SECCOMP or PR_GET_SECCOMP, and the kernel + // was not configured with CONFIG_SECCOMP. + // + // EINVAL + // option is PR_SET_SECCOMP, arg2 is SECCOMP_MODE_FILTER, + // and the kernel was not configured with CONFIG_SECCOMP_FILTER. + // + // + // Meaning, in case these kernel options are set (this is what we check + // for here), we will get some other error (most probably EACCES or + // EFAULT). IOW, EINVAL means "seccomp not supported", any other error + // means it is supported. + + supOnce.Do(func() { + supported = unix.Prctl(unix.PR_SET_SECCOMP, unix.SECCOMP_MODE_FILTER, 0, 0, 0) != unix.EINVAL + }) + return supported +} diff --git a/vendor/github.com/containers/common/pkg/seccomp/types.go b/vendor/github.com/containers/common/pkg/seccomp/types.go new file mode 100644 index 00000000000..a8a9e9d4f7c --- /dev/null +++ b/vendor/github.com/containers/common/pkg/seccomp/types.go @@ -0,0 +1,117 @@ +package seccomp + +// SPDX-License-Identifier: Apache-2.0 + +// Copyright 2013-2018 Docker, Inc. + +// Seccomp represents the config for a seccomp profile for syscall restriction. +type Seccomp struct { + DefaultAction Action `json:"defaultAction"` + + // DefaultErrnoRet is obsolete, please use DefaultErrno + DefaultErrnoRet *uint `json:"defaultErrnoRet,omitempty"` + DefaultErrno string `json:"defaultErrno,omitempty"` + + // Architectures is kept to maintain backward compatibility with the old + // seccomp profile. + Architectures []Arch `json:"architectures,omitempty"` + ArchMap []Architecture `json:"archMap,omitempty"` + Syscalls []*Syscall `json:"syscalls"` +} + +// Architecture is used to represent a specific architecture +// and its sub-architectures +type Architecture struct { + Arch Arch `json:"architecture"` + SubArches []Arch `json:"subArchitectures"` +} + +// Arch used for architectures +type Arch string + +// Additional architectures permitted to be used for system calls +// By default only the native architecture of the kernel is permitted +const ( + ArchNative Arch = "SCMP_ARCH_NATIVE" + ArchX86 Arch = "SCMP_ARCH_X86" + ArchX86_64 Arch = "SCMP_ARCH_X86_64" + ArchX32 Arch = "SCMP_ARCH_X32" + ArchARM Arch = "SCMP_ARCH_ARM" + ArchAARCH64 Arch = "SCMP_ARCH_AARCH64" + ArchMIPS Arch = "SCMP_ARCH_MIPS" + ArchMIPS64 Arch = "SCMP_ARCH_MIPS64" + ArchMIPS64N32 Arch = "SCMP_ARCH_MIPS64N32" + ArchMIPSEL Arch = "SCMP_ARCH_MIPSEL" + ArchMIPSEL64 Arch = "SCMP_ARCH_MIPSEL64" + ArchMIPSEL64N32 Arch = "SCMP_ARCH_MIPSEL64N32" + ArchPPC Arch = "SCMP_ARCH_PPC" + ArchPPC64 Arch = "SCMP_ARCH_PPC64" + ArchPPC64LE Arch = "SCMP_ARCH_PPC64LE" + ArchS390 Arch = "SCMP_ARCH_S390" + ArchS390X Arch = "SCMP_ARCH_S390X" + ArchPARISC Arch = "SCMP_ARCH_PARISC" + ArchPARISC64 Arch = "SCMP_ARCH_PARISC64" + ArchRISCV64 Arch = "SCMP_ARCH_RISCV64" +) + +// Action taken upon Seccomp rule match +type Action string + +// Define actions for Seccomp rules +const ( + // ActKill results in termination of the thread that made the system call. + ActKill Action = "SCMP_ACT_KILL" + // ActKillProcess results in termination of the entire process. + ActKillProcess Action = "SCMP_ACT_KILL_PROCESS" + // ActKillThread kills the thread that violated the rule. It is the same as + // ActKill. All other threads from the same thread group will continue to + // execute. + ActKillThread Action = "SCMP_ACT_KILL_THREAD" + ActTrap Action = "SCMP_ACT_TRAP" + ActErrno Action = "SCMP_ACT_ERRNO" + ActTrace Action = "SCMP_ACT_TRACE" + ActAllow Action = "SCMP_ACT_ALLOW" + ActLog Action = "SCMP_ACT_LOG" +) + +// Operator used to match syscall arguments in Seccomp +type Operator string + +// Define operators for syscall arguments in Seccomp +const ( + OpNotEqual Operator = "SCMP_CMP_NE" + OpLessThan Operator = "SCMP_CMP_LT" + OpLessEqual Operator = "SCMP_CMP_LE" + OpEqualTo Operator = "SCMP_CMP_EQ" + OpGreaterEqual Operator = "SCMP_CMP_GE" + OpGreaterThan Operator = "SCMP_CMP_GT" + OpMaskedEqual Operator = "SCMP_CMP_MASKED_EQ" +) + +// Arg used for matching specific syscall arguments in Seccomp +type Arg struct { + Index uint `json:"index"` + Value uint64 `json:"value"` + ValueTwo uint64 `json:"valueTwo"` + Op Operator `json:"op"` +} + +// Filter is used to conditionally apply Seccomp rules +type Filter struct { + Caps []string `json:"caps,omitempty"` + Arches []string `json:"arches,omitempty"` +} + +// Syscall is used to match a group of syscalls in Seccomp +type Syscall struct { + Name string `json:"name,omitempty"` + Names []string `json:"names,omitempty"` + Action Action `json:"action"` + Args []*Arg `json:"args"` + Comment string `json:"comment"` + Includes Filter `json:"includes"` + Excludes Filter `json:"excludes"` + // ErrnoRet is obsolete, please use Errno + ErrnoRet *uint `json:"errnoRet,omitempty"` + Errno string `json:"errno,omitempty"` +} diff --git a/vendor/github.com/containers/common/pkg/seccomp/validate.go b/vendor/github.com/containers/common/pkg/seccomp/validate.go new file mode 100644 index 00000000000..1c5c4edc64e --- /dev/null +++ b/vendor/github.com/containers/common/pkg/seccomp/validate.go @@ -0,0 +1,29 @@ +// +build seccomp + +package seccomp + +import ( + "encoding/json" + + "github.com/pkg/errors" +) + +// ValidateProfile does a basic validation for the provided seccomp profile +// string. +func ValidateProfile(content string) error { + profile := &Seccomp{} + if err := json.Unmarshal([]byte(content), &profile); err != nil { + return errors.Wrap(err, "decoding seccomp profile") + } + + spec, err := setupSeccomp(profile, nil) + if err != nil { + return errors.Wrap(err, "create seccomp spec") + } + + if _, err := BuildFilter(spec); err != nil { + return errors.Wrap(err, "build seccomp filter") + } + + return nil +} diff --git a/vendor/github.com/containers/common/pkg/signal/signal_common.go b/vendor/github.com/containers/common/pkg/signal/signal_common.go new file mode 100644 index 00000000000..7c266290945 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/signal/signal_common.go @@ -0,0 +1,41 @@ +package signal + +import ( + "fmt" + "strconv" + "strings" + "syscall" +) + +// ParseSignal translates a string to a valid syscall signal. +// It returns an error if the signal map doesn't include the given signal. +func ParseSignal(rawSignal string) (syscall.Signal, error) { + s, err := strconv.Atoi(rawSignal) + if err == nil { + if s == 0 { + return -1, fmt.Errorf("invalid signal: %s", rawSignal) + } + return syscall.Signal(s), nil + } + sig, ok := signalMap[strings.TrimPrefix(strings.ToUpper(rawSignal), "SIG")] + if !ok { + return -1, fmt.Errorf("invalid signal: %s", rawSignal) + } + return sig, nil +} + +// ParseSignalNameOrNumber translates a string to a valid syscall signal. Input +// can be a name or number representation i.e. "KILL" "9" +func ParseSignalNameOrNumber(rawSignal string) (syscall.Signal, error) { + basename := strings.TrimPrefix(rawSignal, "-") + s, err := ParseSignal(basename) + if err == nil { + return s, nil + } + for k, v := range signalMap { + if strings.EqualFold(k, basename) { + return v, nil + } + } + return -1, fmt.Errorf("invalid signal: %s", basename) +} diff --git a/vendor/github.com/containers/common/pkg/signal/signal_linux.go b/vendor/github.com/containers/common/pkg/signal/signal_linux.go new file mode 100644 index 00000000000..305b9d21f41 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/signal/signal_linux.go @@ -0,0 +1,108 @@ +// +build linux +// +build !mips,!mipsle,!mips64,!mips64le + +// Signal handling for Linux only. +package signal + +// Copyright 2013-2018 Docker, Inc. + +// NOTE: this package has originally been copied from github.com/docker/docker. + +import ( + "os" + "os/signal" + "syscall" + + "golang.org/x/sys/unix" +) + +const ( + sigrtmin = 34 + sigrtmax = 64 + + SIGWINCH = syscall.SIGWINCH // For cross-compilation with Windows +) + +// signalMap is a map of Linux signals. +var signalMap = map[string]syscall.Signal{ + "ABRT": unix.SIGABRT, + "ALRM": unix.SIGALRM, + "BUS": unix.SIGBUS, + "CHLD": unix.SIGCHLD, + "CLD": unix.SIGCLD, + "CONT": unix.SIGCONT, + "FPE": unix.SIGFPE, + "HUP": unix.SIGHUP, + "ILL": unix.SIGILL, + "INT": unix.SIGINT, + "IO": unix.SIGIO, + "IOT": unix.SIGIOT, + "KILL": unix.SIGKILL, + "PIPE": unix.SIGPIPE, + "POLL": unix.SIGPOLL, + "PROF": unix.SIGPROF, + "PWR": unix.SIGPWR, + "QUIT": unix.SIGQUIT, + "SEGV": unix.SIGSEGV, + "STKFLT": unix.SIGSTKFLT, + "STOP": unix.SIGSTOP, + "SYS": unix.SIGSYS, + "TERM": unix.SIGTERM, + "TRAP": unix.SIGTRAP, + "TSTP": unix.SIGTSTP, + "TTIN": unix.SIGTTIN, + "TTOU": unix.SIGTTOU, + "URG": unix.SIGURG, + "USR1": unix.SIGUSR1, + "USR2": unix.SIGUSR2, + "VTALRM": unix.SIGVTALRM, + "WINCH": unix.SIGWINCH, + "XCPU": unix.SIGXCPU, + "XFSZ": unix.SIGXFSZ, + "RTMIN": sigrtmin, + "RTMIN+1": sigrtmin + 1, + "RTMIN+2": sigrtmin + 2, + "RTMIN+3": sigrtmin + 3, + "RTMIN+4": sigrtmin + 4, + "RTMIN+5": sigrtmin + 5, + "RTMIN+6": sigrtmin + 6, + "RTMIN+7": sigrtmin + 7, + "RTMIN+8": sigrtmin + 8, + "RTMIN+9": sigrtmin + 9, + "RTMIN+10": sigrtmin + 10, + "RTMIN+11": sigrtmin + 11, + "RTMIN+12": sigrtmin + 12, + "RTMIN+13": sigrtmin + 13, + "RTMIN+14": sigrtmin + 14, + "RTMIN+15": sigrtmin + 15, + "RTMAX-14": sigrtmax - 14, + "RTMAX-13": sigrtmax - 13, + "RTMAX-12": sigrtmax - 12, + "RTMAX-11": sigrtmax - 11, + "RTMAX-10": sigrtmax - 10, + "RTMAX-9": sigrtmax - 9, + "RTMAX-8": sigrtmax - 8, + "RTMAX-7": sigrtmax - 7, + "RTMAX-6": sigrtmax - 6, + "RTMAX-5": sigrtmax - 5, + "RTMAX-4": sigrtmax - 4, + "RTMAX-3": sigrtmax - 3, + "RTMAX-2": sigrtmax - 2, + "RTMAX-1": sigrtmax - 1, + "RTMAX": sigrtmax, +} + +// CatchAll catches all signals and relays them to the specified channel. +func CatchAll(sigc chan os.Signal) { + handledSigs := make([]os.Signal, 0, len(signalMap)) + for _, s := range signalMap { + handledSigs = append(handledSigs, s) + } + signal.Notify(sigc, handledSigs...) +} + +// StopCatch stops catching the signals and closes the specified channel. +func StopCatch(sigc chan os.Signal) { + signal.Stop(sigc) + close(sigc) +} diff --git a/vendor/github.com/containers/common/pkg/signal/signal_linux_mipsx.go b/vendor/github.com/containers/common/pkg/signal/signal_linux_mipsx.go new file mode 100644 index 00000000000..45c9d5af1e6 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/signal/signal_linux_mipsx.go @@ -0,0 +1,108 @@ +// +build linux +// +build mips mipsle mips64 mips64le + +// Special signal handling for mips architecture +package signal + +// Copyright 2013-2018 Docker, Inc. + +// NOTE: this package has originally been copied from github.com/docker/docker. + +import ( + "os" + "os/signal" + "syscall" + + "golang.org/x/sys/unix" +) + +const ( + sigrtmin = 34 + sigrtmax = 127 + + SIGWINCH = syscall.SIGWINCH +) + +// signalMap is a map of Linux signals. +var signalMap = map[string]syscall.Signal{ + "ABRT": unix.SIGABRT, + "ALRM": unix.SIGALRM, + "BUS": unix.SIGBUS, + "CHLD": unix.SIGCHLD, + "CLD": unix.SIGCLD, + "CONT": unix.SIGCONT, + "FPE": unix.SIGFPE, + "HUP": unix.SIGHUP, + "ILL": unix.SIGILL, + "INT": unix.SIGINT, + "IO": unix.SIGIO, + "IOT": unix.SIGIOT, + "KILL": unix.SIGKILL, + "PIPE": unix.SIGPIPE, + "POLL": unix.SIGPOLL, + "PROF": unix.SIGPROF, + "PWR": unix.SIGPWR, + "QUIT": unix.SIGQUIT, + "SEGV": unix.SIGSEGV, + "EMT": unix.SIGEMT, + "STOP": unix.SIGSTOP, + "SYS": unix.SIGSYS, + "TERM": unix.SIGTERM, + "TRAP": unix.SIGTRAP, + "TSTP": unix.SIGTSTP, + "TTIN": unix.SIGTTIN, + "TTOU": unix.SIGTTOU, + "URG": unix.SIGURG, + "USR1": unix.SIGUSR1, + "USR2": unix.SIGUSR2, + "VTALRM": unix.SIGVTALRM, + "WINCH": unix.SIGWINCH, + "XCPU": unix.SIGXCPU, + "XFSZ": unix.SIGXFSZ, + "RTMIN": sigrtmin, + "RTMIN+1": sigrtmin + 1, + "RTMIN+2": sigrtmin + 2, + "RTMIN+3": sigrtmin + 3, + "RTMIN+4": sigrtmin + 4, + "RTMIN+5": sigrtmin + 5, + "RTMIN+6": sigrtmin + 6, + "RTMIN+7": sigrtmin + 7, + "RTMIN+8": sigrtmin + 8, + "RTMIN+9": sigrtmin + 9, + "RTMIN+10": sigrtmin + 10, + "RTMIN+11": sigrtmin + 11, + "RTMIN+12": sigrtmin + 12, + "RTMIN+13": sigrtmin + 13, + "RTMIN+14": sigrtmin + 14, + "RTMIN+15": sigrtmin + 15, + "RTMAX-14": sigrtmax - 14, + "RTMAX-13": sigrtmax - 13, + "RTMAX-12": sigrtmax - 12, + "RTMAX-11": sigrtmax - 11, + "RTMAX-10": sigrtmax - 10, + "RTMAX-9": sigrtmax - 9, + "RTMAX-8": sigrtmax - 8, + "RTMAX-7": sigrtmax - 7, + "RTMAX-6": sigrtmax - 6, + "RTMAX-5": sigrtmax - 5, + "RTMAX-4": sigrtmax - 4, + "RTMAX-3": sigrtmax - 3, + "RTMAX-2": sigrtmax - 2, + "RTMAX-1": sigrtmax - 1, + "RTMAX": sigrtmax, +} + +// CatchAll catches all signals and relays them to the specified channel. +func CatchAll(sigc chan os.Signal) { + handledSigs := make([]os.Signal, 0, len(signalMap)) + for _, s := range signalMap { + handledSigs = append(handledSigs, s) + } + signal.Notify(sigc, handledSigs...) +} + +// StopCatch stops catching the signals and closes the specified channel. +func StopCatch(sigc chan os.Signal) { + signal.Stop(sigc) + close(sigc) +} diff --git a/vendor/github.com/containers/common/pkg/signal/signal_unsupported.go b/vendor/github.com/containers/common/pkg/signal/signal_unsupported.go new file mode 100644 index 00000000000..9d1733c02d5 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/signal/signal_unsupported.go @@ -0,0 +1,99 @@ +// +build !linux + +// Signal handling for Linux only. +package signal + +import ( + "os" + "syscall" +) + +const ( + sigrtmin = 34 + sigrtmax = 64 + + SIGWINCH = syscall.Signal(0xff) +) + +// signalMap is a map of Linux signals. +// These constants are sourced from the Linux version of golang.org/x/sys/unix +// (I don't see much risk of this changing). +// This should work as long as Podman only runs containers on Linux, which seems +// a safe assumption for now. +var signalMap = map[string]syscall.Signal{ + "ABRT": syscall.Signal(0x6), + "ALRM": syscall.Signal(0xe), + "BUS": syscall.Signal(0x7), + "CHLD": syscall.Signal(0x11), + "CLD": syscall.Signal(0x11), + "CONT": syscall.Signal(0x12), + "FPE": syscall.Signal(0x8), + "HUP": syscall.Signal(0x1), + "ILL": syscall.Signal(0x4), + "INT": syscall.Signal(0x2), + "IO": syscall.Signal(0x1d), + "IOT": syscall.Signal(0x6), + "KILL": syscall.Signal(0x9), + "PIPE": syscall.Signal(0xd), + "POLL": syscall.Signal(0x1d), + "PROF": syscall.Signal(0x1b), + "PWR": syscall.Signal(0x1e), + "QUIT": syscall.Signal(0x3), + "SEGV": syscall.Signal(0xb), + "STKFLT": syscall.Signal(0x10), + "STOP": syscall.Signal(0x13), + "SYS": syscall.Signal(0x1f), + "TERM": syscall.Signal(0xf), + "TRAP": syscall.Signal(0x5), + "TSTP": syscall.Signal(0x14), + "TTIN": syscall.Signal(0x15), + "TTOU": syscall.Signal(0x16), + "URG": syscall.Signal(0x17), + "USR1": syscall.Signal(0xa), + "USR2": syscall.Signal(0xc), + "VTALRM": syscall.Signal(0x1a), + "WINCH": syscall.Signal(0x1c), + "XCPU": syscall.Signal(0x18), + "XFSZ": syscall.Signal(0x19), + "RTMIN": sigrtmin, + "RTMIN+1": sigrtmin + 1, + "RTMIN+2": sigrtmin + 2, + "RTMIN+3": sigrtmin + 3, + "RTMIN+4": sigrtmin + 4, + "RTMIN+5": sigrtmin + 5, + "RTMIN+6": sigrtmin + 6, + "RTMIN+7": sigrtmin + 7, + "RTMIN+8": sigrtmin + 8, + "RTMIN+9": sigrtmin + 9, + "RTMIN+10": sigrtmin + 10, + "RTMIN+11": sigrtmin + 11, + "RTMIN+12": sigrtmin + 12, + "RTMIN+13": sigrtmin + 13, + "RTMIN+14": sigrtmin + 14, + "RTMIN+15": sigrtmin + 15, + "RTMAX-14": sigrtmax - 14, + "RTMAX-13": sigrtmax - 13, + "RTMAX-12": sigrtmax - 12, + "RTMAX-11": sigrtmax - 11, + "RTMAX-10": sigrtmax - 10, + "RTMAX-9": sigrtmax - 9, + "RTMAX-8": sigrtmax - 8, + "RTMAX-7": sigrtmax - 7, + "RTMAX-6": sigrtmax - 6, + "RTMAX-5": sigrtmax - 5, + "RTMAX-4": sigrtmax - 4, + "RTMAX-3": sigrtmax - 3, + "RTMAX-2": sigrtmax - 2, + "RTMAX-1": sigrtmax - 1, + "RTMAX": sigrtmax, +} + +// CatchAll catches all signals and relays them to the specified channel. +func CatchAll(sigc chan os.Signal) { + panic("Unsupported on non-linux platforms") +} + +// StopCatch stops catching the signals and closes the specified channel. +func StopCatch(sigc chan os.Signal) { + panic("Unsupported on non-linux platforms") +} diff --git a/vendor/github.com/containers/common/pkg/subscriptions/mounts.conf b/vendor/github.com/containers/common/pkg/subscriptions/mounts.conf new file mode 100644 index 00000000000..b7cde9d8a3b --- /dev/null +++ b/vendor/github.com/containers/common/pkg/subscriptions/mounts.conf @@ -0,0 +1 @@ +/usr/share/rhel/secrets:/run/secrets diff --git a/vendor/github.com/containers/common/pkg/subscriptions/subscriptions.go b/vendor/github.com/containers/common/pkg/subscriptions/subscriptions.go new file mode 100644 index 00000000000..3c0d2b237d2 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/subscriptions/subscriptions.go @@ -0,0 +1,386 @@ +package subscriptions + +import ( + "bufio" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/containers/common/pkg/umask" + "github.com/containers/storage/pkg/idtools" + rspec "github.com/opencontainers/runtime-spec/specs-go" + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +var ( + // DefaultMountsFile holds the default mount paths in the form + // "host_path:container_path" + DefaultMountsFile = "/usr/share/containers/mounts.conf" + // OverrideMountsFile holds the default mount paths in the form + // "host_path:container_path" overridden by the user + OverrideMountsFile = "/etc/containers/mounts.conf" + // UserOverrideMountsFile holds the default mount paths in the form + // "host_path:container_path" overridden by the rootless user + UserOverrideMountsFile = filepath.Join(os.Getenv("HOME"), ".config/containers/mounts.conf") +) + +// subscriptionData stores the name of the file and the content read from it +type subscriptionData struct { + name string + data []byte + mode os.FileMode + dirMode os.FileMode +} + +// saveTo saves subscription data to given directory +func (s subscriptionData) saveTo(dir string) error { + path := filepath.Join(dir, s.name) + if err := os.MkdirAll(filepath.Dir(path), s.dirMode); err != nil { + return err + } + return ioutil.WriteFile(path, s.data, s.mode) +} + +func readAll(root, prefix string, parentMode os.FileMode) ([]subscriptionData, error) { + path := filepath.Join(root, prefix) + + data := []subscriptionData{} + + files, err := ioutil.ReadDir(path) + if err != nil { + if os.IsNotExist(err) { + return data, nil + } + + return nil, err + } + + for _, f := range files { + fileData, err := readFileOrDir(root, filepath.Join(prefix, f.Name()), parentMode) + if err != nil { + // If the file did not exist, might be a dangling symlink + // Ignore the error + if os.IsNotExist(err) { + continue + } + return nil, err + } + data = append(data, fileData...) + } + + return data, nil +} + +func readFileOrDir(root, name string, parentMode os.FileMode) ([]subscriptionData, error) { + path := filepath.Join(root, name) + + s, err := os.Stat(path) + if err != nil { + return nil, err + } + + if s.IsDir() { + dirData, err := readAll(root, name, s.Mode()) + if err != nil { + return nil, err + } + return dirData, nil + } + bytes, err := ioutil.ReadFile(path) + if err != nil { + return nil, err + } + return []subscriptionData{{ + name: name, + data: bytes, + mode: s.Mode(), + dirMode: parentMode, + }}, nil +} + +func getHostSubscriptionData(hostDir string, mode os.FileMode) ([]subscriptionData, error) { + var allSubscriptions []subscriptionData + hostSubscriptions, err := readAll(hostDir, "", mode) + if err != nil { + return nil, errors.Wrapf(err, "failed to read subscriptions from %q", hostDir) + } + return append(allSubscriptions, hostSubscriptions...), nil +} + +func getMounts(filePath string) []string { + file, err := os.Open(filePath) + if err != nil { + // This is expected on most systems + logrus.Debugf("File %q not found, skipping...", filePath) + return nil + } + defer file.Close() + scanner := bufio.NewScanner(file) + if err = scanner.Err(); err != nil { + logrus.Errorf("Reading file %q, %v skipping...", filePath, err) + return nil + } + var mounts []string + for scanner.Scan() { + if strings.HasPrefix(strings.TrimSpace(scanner.Text()), "/") { + mounts = append(mounts, scanner.Text()) + } else { + logrus.Debugf("Skipping unrecognized mount in %v: %q", + filePath, scanner.Text()) + } + } + return mounts +} + +// getHostAndCtrDir separates the host:container paths +func getMountsMap(path string) (string, string, error) { //nolint + arr := strings.SplitN(path, ":", 2) + switch len(arr) { + case 1: + return arr[0], arr[0], nil + case 2: + return arr[0], arr[1], nil + } + return "", "", errors.Errorf("unable to get host and container dir from path: %s", path) +} + +// MountsWithUIDGID copies, adds, and mounts the subscriptions to the container root filesystem +// mountLabel: MAC/SELinux label for container content +// containerRunDir: Private data for storing subscriptions on the host mounted in container. +// mountFile: Additional mount points required for the container. +// mountPoint: Container image mountpoint, or the directory from the hosts perspective that +// corresponds to `/` in the container. +// uid: to assign to content created for subscriptions +// gid: to assign to content created for subscriptions +// rootless: indicates whether container is running in rootless mode +// disableFips: indicates whether system should ignore fips mode +func MountsWithUIDGID(mountLabel, containerRunDir, mountFile, mountPoint string, uid, gid int, rootless, disableFips bool) []rspec.Mount { + var ( + subscriptionMounts []rspec.Mount + mountFiles []string + ) + // Add subscriptions from paths given in the mounts.conf files + // mountFile will have a value if the hidden --default-mounts-file flag is set + // Note for testing purposes only + if mountFile == "" { + mountFiles = append(mountFiles, []string{OverrideMountsFile, DefaultMountsFile}...) + if rootless { + mountFiles = append([]string{UserOverrideMountsFile}, mountFiles...) + } + } else { + mountFiles = append(mountFiles, mountFile) + } + for _, file := range mountFiles { + if _, err := os.Stat(file); err == nil { + mounts, err := addSubscriptionsFromMountsFile(file, mountLabel, containerRunDir, uid, gid) + if err != nil { + logrus.Warnf("Failed to mount subscriptions, skipping entry in %s: %v", file, err) + } + subscriptionMounts = mounts + break + } + } + + // Only add FIPS subscription mount if disableFips=false + if disableFips { + return subscriptionMounts + } + // Add FIPS mode subscription if /etc/system-fips exists on the host + _, err := os.Stat("/etc/system-fips") + switch { + case err == nil: + if err := addFIPSModeSubscription(&subscriptionMounts, containerRunDir, mountPoint, mountLabel, uid, gid); err != nil { + logrus.Errorf("Adding FIPS mode subscription to container: %v", err) + } + case os.IsNotExist(err): + logrus.Debug("/etc/system-fips does not exist on host, not mounting FIPS mode subscription") + default: + logrus.Errorf("stat /etc/system-fips failed for FIPS mode subscription: %v", err) + } + return subscriptionMounts +} + +func rchown(chowndir string, uid, gid int) error { + return filepath.Walk(chowndir, func(filePath string, f os.FileInfo, err error) error { + return os.Lchown(filePath, uid, gid) + }) +} + +// addSubscriptionsFromMountsFile copies the contents of host directory to container directory +// and returns a list of mounts +func addSubscriptionsFromMountsFile(filePath, mountLabel, containerRunDir string, uid, gid int) ([]rspec.Mount, error) { + var mounts []rspec.Mount + defaultMountsPaths := getMounts(filePath) + for _, path := range defaultMountsPaths { + hostDirOrFile, ctrDirOrFile, err := getMountsMap(path) + if err != nil { + return nil, err + } + // skip if the hostDirOrFile path doesn't exist + fileInfo, err := os.Stat(hostDirOrFile) + if err != nil { + if os.IsNotExist(err) { + logrus.Warnf("Path %q from %q doesn't exist, skipping", hostDirOrFile, filePath) + continue + } + return nil, err + } + + ctrDirOrFileOnHost := filepath.Join(containerRunDir, ctrDirOrFile) + + // In the event of a restart, don't want to copy subscriptions over again as they already would exist in ctrDirOrFileOnHost + _, err = os.Stat(ctrDirOrFileOnHost) + if os.IsNotExist(err) { + + hostDirOrFile, err = resolveSymbolicLink(hostDirOrFile) + if err != nil { + return nil, err + } + + // Don't let the umask have any influence on the file and directory creation + oldUmask := umask.Set(0) + defer umask.Set(oldUmask) + + switch mode := fileInfo.Mode(); { + case mode.IsDir(): + if err = os.MkdirAll(ctrDirOrFileOnHost, mode.Perm()); err != nil { + return nil, errors.Wrap(err, "making container directory") + } + data, err := getHostSubscriptionData(hostDirOrFile, mode.Perm()) + if err != nil { + return nil, errors.Wrap(err, "getting host subscription data") + } + for _, s := range data { + if err := s.saveTo(ctrDirOrFileOnHost); err != nil { + return nil, errors.Wrapf(err, "error saving data to container filesystem on host %q", ctrDirOrFileOnHost) + } + } + case mode.IsRegular(): + data, err := readFileOrDir("", hostDirOrFile, mode.Perm()) + if err != nil { + return nil, err + + } + for _, s := range data { + if err := os.MkdirAll(filepath.Dir(ctrDirOrFileOnHost), s.dirMode); err != nil { + return nil, err + } + if err := ioutil.WriteFile(ctrDirOrFileOnHost, s.data, s.mode); err != nil { + return nil, errors.Wrap(err, "saving data to container filesystem") + } + } + default: + return nil, errors.Errorf("unsupported file type for: %q", hostDirOrFile) + } + + err = label.Relabel(ctrDirOrFileOnHost, mountLabel, false) + if err != nil { + return nil, errors.Wrap(err, "error applying correct labels") + } + if uid != 0 || gid != 0 { + if err := rchown(ctrDirOrFileOnHost, uid, gid); err != nil { + return nil, err + } + } + } else if err != nil { + return nil, err + } + + m := rspec.Mount{ + Source: ctrDirOrFileOnHost, + Destination: ctrDirOrFile, + Type: "bind", + Options: []string{"bind", "rprivate"}, + } + + mounts = append(mounts, m) + } + return mounts, nil +} + +// addFIPSModeSubscription adds mounts to the `mounts` slice that are needed for the container to run openssl in FIPs mode +// (i.e: be FIPs compliant). +// It should only be called if /etc/system-fips exists on host. +// It primarily does two things: +// - creates /run/secrets/system-fips in the container root filesystem, and adds it to the `mounts` slice. +// - If `/etc/crypto-policies/back-ends` already exists inside of the container, it creates +// `/usr/share/crypto-policies/back-ends/FIPS` inside the container as well. +// It is done from within the container to ensure to avoid policy incompatibility between the container and host. +func addFIPSModeSubscription(mounts *[]rspec.Mount, containerRunDir, mountPoint, mountLabel string, uid, gid int) error { + subscriptionsDir := "/run/secrets" + ctrDirOnHost := filepath.Join(containerRunDir, subscriptionsDir) + if _, err := os.Stat(ctrDirOnHost); os.IsNotExist(err) { + if err = idtools.MkdirAllAs(ctrDirOnHost, 0755, uid, gid); err != nil { //nolint + return err + } + if err = label.Relabel(ctrDirOnHost, mountLabel, false); err != nil { + return errors.Wrapf(err, "applying correct labels on %q", ctrDirOnHost) + } + } + fipsFile := filepath.Join(ctrDirOnHost, "system-fips") + // In the event of restart, it is possible for the FIPS mode file to already exist + if _, err := os.Stat(fipsFile); os.IsNotExist(err) { + file, err := os.Create(fipsFile) + if err != nil { + return errors.Wrap(err, "creating system-fips file in container for FIPS mode") + } + file.Close() + } + + if !mountExists(*mounts, subscriptionsDir) { + m := rspec.Mount{ + Source: ctrDirOnHost, + Destination: subscriptionsDir, + Type: "bind", + Options: []string{"bind", "rprivate"}, + } + *mounts = append(*mounts, m) + } + + srcBackendDir := "/usr/share/crypto-policies/back-ends/FIPS" + destDir := "/etc/crypto-policies/back-ends" + srcOnHost := filepath.Join(mountPoint, srcBackendDir) + if _, err := os.Stat(srcOnHost); err != nil { + if os.IsNotExist(err) { + return nil + } + return errors.Wrap(err, "FIPS Backend directory") + } + + if !mountExists(*mounts, destDir) { + m := rspec.Mount{ + Source: srcOnHost, + Destination: destDir, + Type: "bind", + Options: []string{"bind", "rprivate"}, + } + *mounts = append(*mounts, m) + } + return nil +} + +// mountExists checks if a mount already exists in the spec +func mountExists(mounts []rspec.Mount, dest string) bool { + for _, mount := range mounts { + if mount.Destination == dest { + return true + } + } + return false +} + +// resolveSymbolicLink resolves a possbile symlink path. If the path is a symlink, returns resolved +// path; if not, returns the original path. +func resolveSymbolicLink(path string) (string, error) { + info, err := os.Lstat(path) + if err != nil { + return "", err + } + if info.Mode()&os.ModeSymlink != os.ModeSymlink { + return path, nil + } + return filepath.EvalSymlinks(path) +} diff --git a/vendor/github.com/containers/common/pkg/supplemented/errors.go b/vendor/github.com/containers/common/pkg/supplemented/errors.go new file mode 100644 index 00000000000..a031951f152 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/supplemented/errors.go @@ -0,0 +1,17 @@ +package supplemented + +import ( + "errors" + + "github.com/containers/common/pkg/manifests" +) + +var ( + // ErrDigestNotFound is returned when we look for an image instance + // with a particular digest in a list or index, and fail to find it. + ErrDigestNotFound = manifests.ErrDigestNotFound + // ErrBlobNotFound is returned when try to figure out which supplemental + // image we should ask for a blob with the specified characteristics, + // based on the information in each of the supplemental images' manifests. + ErrBlobNotFound = errors.New("location of blob could not be determined") +) diff --git a/vendor/github.com/containers/common/pkg/supplemented/supplemented.go b/vendor/github.com/containers/common/pkg/supplemented/supplemented.go new file mode 100644 index 00000000000..196176a1c6c --- /dev/null +++ b/vendor/github.com/containers/common/pkg/supplemented/supplemented.go @@ -0,0 +1,402 @@ +package supplemented + +import ( + "container/list" + "context" + "io" + + cp "github.com/containers/image/v5/copy" + "github.com/containers/image/v5/image" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" + multierror "github.com/hashicorp/go-multierror" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// supplementedImageReference groups multiple references together. +type supplementedImageReference struct { + types.ImageReference + references []types.ImageReference + multiple cp.ImageListSelection + instances []digest.Digest +} + +// supplementedImageSource represents an image, plus all of the blobs of other images. +type supplementedImageSource struct { + types.ImageSource + reference types.ImageReference + manifest []byte // The manifest list or image index. + manifestType string // The MIME type of the manifest list or image index. + sourceDefaultInstances map[types.ImageSource]digest.Digest // The default manifest instances of open ImageSource objects. + sourceInstancesByInstance map[digest.Digest]types.ImageSource // A map from manifest instance digests to open ImageSource objects. + instancesByBlobDigest map[digest.Digest]digest.Digest // A map from blob digests to manifest instance digests. +} + +// Reference groups one reference and some number of additional references +// together as a group. The first reference's default instance will be treated +// as the default instance of the resulting reference, with the other +// references' instances made available as instances for their respective +// digests. +func Reference(ref types.ImageReference, supplemental []types.ImageReference, multiple cp.ImageListSelection, instances []digest.Digest) types.ImageReference { + if len(instances) > 0 { + i := make([]digest.Digest, len(instances)) + copy(i, instances) + instances = i + } + return &supplementedImageReference{ + ImageReference: ref, + references: append([]types.ImageReference{}, supplemental...), + multiple: multiple, + instances: instances, + } +} + +// NewImage returns a new higher-level view of the image. +func (s *supplementedImageReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + src, err := s.NewImageSource(ctx, sys) + if err != nil { + return nil, errors.Wrapf(err, "error building a new Image using an ImageSource") + } + return image.FromSource(ctx, sys, src) +} + +// NewImageSource opens the referenced images, scans their manifests for +// instances, and builds mappings from each blob mentioned in them to their +// instances. +func (s *supplementedImageReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (iss types.ImageSource, err error) { + sources := make(map[digest.Digest]types.ImageSource) + defaultInstances := make(map[types.ImageSource]digest.Digest) + instances := make(map[digest.Digest]digest.Digest) + var sis *supplementedImageSource + + // Open the default instance for reading. + top, err := s.ImageReference.NewImageSource(ctx, sys) + if err != nil { + return nil, errors.Wrapf(err, "error opening %q as image source", transports.ImageName(s.ImageReference)) + } + + defer func() { + if err != nil { + if iss != nil { + // The composite source has been created. Use its Close method. + if err2 := iss.Close(); err2 != nil { + logrus.Errorf("Opening image: %v", err2) + } + } else if top != nil { + // The composite source has not been created, but the top was already opened. Close it. + if err2 := top.Close(); err2 != nil { + logrus.Errorf("Closing image: %v", err2) + } + } + } + }() + + var addSingle, addMulti func(manifestBytes []byte, manifestType string, src types.ImageSource) error + type manifestToRead struct { + src types.ImageSource + instance *digest.Digest + } + manifestsToRead := list.New() + + addSingle = func(manifestBytes []byte, manifestType string, src types.ImageSource) error { + // Mark this instance as being associated with this ImageSource. + manifestDigest, err := manifest.Digest(manifestBytes) + if err != nil { + return errors.Wrapf(err, "error computing digest over manifest %q", string(manifestBytes)) + } + sources[manifestDigest] = src + + // Parse the manifest as a single image. + man, err := manifest.FromBlob(manifestBytes, manifestType) + if err != nil { + return errors.Wrapf(err, "error parsing manifest %q", string(manifestBytes)) + } + + // Log the config blob's digest and the blobs of its layers as associated with this manifest. + config := man.ConfigInfo() + if config.Digest != "" { + instances[config.Digest] = manifestDigest + logrus.Debugf("blob %q belongs to %q", config.Digest, manifestDigest) + } + + layers := man.LayerInfos() + for _, layer := range layers { + instances[layer.Digest] = manifestDigest + logrus.Debugf("layer %q belongs to %q", layer.Digest, manifestDigest) + } + + return nil + } + + addMulti = func(manifestBytes []byte, manifestType string, src types.ImageSource) error { + // Mark this instance as being associated with this ImageSource. + manifestDigest, err := manifest.Digest(manifestBytes) + if err != nil { + return errors.Wrapf(err, "error computing manifest digest") + } + sources[manifestDigest] = src + + // Parse the manifest as a list of images. + list, err := manifest.ListFromBlob(manifestBytes, manifestType) + if err != nil { + return errors.Wrapf(err, "error parsing manifest blob %q as a %q", string(manifestBytes), manifestType) + } + + // Figure out which of its instances we want to look at. + var chaseInstances []digest.Digest + switch s.multiple { + case cp.CopySystemImage: + instance, err := list.ChooseInstance(sys) + if err != nil { + return errors.Wrapf(err, "error selecting appropriate instance from list") + } + chaseInstances = []digest.Digest{instance} + case cp.CopySpecificImages: + chaseInstances = s.instances + case cp.CopyAllImages: + chaseInstances = list.Instances() + } + + // Queue these manifest instances for reading from this + // ImageSource later, if we don't stumble across them somewhere + // else first. + for _, instanceIterator := range chaseInstances { + instance := instanceIterator + next := &manifestToRead{ + src: src, + instance: &instance, + } + if src == top { + // Prefer any other source. + manifestsToRead.PushBack(next) + } else { + // Prefer this source over the first ("main") one. + manifestsToRead.PushFront(next) + } + } + return nil + } + + visitedReferences := make(map[types.ImageReference]struct{}) + for i, ref := range append([]types.ImageReference{s.ImageReference}, s.references...) { + if _, visited := visitedReferences[ref]; visited { + continue + } + visitedReferences[ref] = struct{}{} + + // Open this image for reading. + var src types.ImageSource + if ref == s.ImageReference { + src = top + } else { + src, err = ref.NewImageSource(ctx, sys) + if err != nil { + return nil, errors.Wrapf(err, "error opening %q as image source", transports.ImageName(ref)) + } + } + + // Read the default manifest for the image. + manifestBytes, manifestType, err := src.GetManifest(ctx, nil) + if err != nil { + return nil, errors.Wrapf(err, "error reading default manifest from image %q", transports.ImageName(ref)) + } + + // If this is the first image, mark it as our starting point. + if i == 0 { + sources[""] = src + + sis = &supplementedImageSource{ + ImageSource: top, + reference: s, + manifest: manifestBytes, + manifestType: manifestType, + sourceDefaultInstances: defaultInstances, + sourceInstancesByInstance: sources, + instancesByBlobDigest: instances, + } + iss = sis + } + + // Record the digest of the ImageSource's default instance's manifest. + manifestDigest, err := manifest.Digest(manifestBytes) + if err != nil { + return nil, errors.Wrapf(err, "error computing digest of manifest from image %q", transports.ImageName(ref)) + } + sis.sourceDefaultInstances[src] = manifestDigest + + // If the ImageSource's default manifest is a list, parse each of its instances. + if manifest.MIMETypeIsMultiImage(manifestType) { + if err = addMulti(manifestBytes, manifestType, src); err != nil { + return nil, errors.Wrapf(err, "error adding multi-image %q", transports.ImageName(ref)) + } + } else { + if err = addSingle(manifestBytes, manifestType, src); err != nil { + return nil, errors.Wrapf(err, "error adding single image %q", transports.ImageName(ref)) + } + } + } + + // Parse the rest of the instances. + for manifestsToRead.Front() != nil { + front := manifestsToRead.Front() + value := front.Value + manifestToRead, ok := value.(*manifestToRead) + if !ok { + panic("bug: wrong type looking for *manifestToRead in list?") + } + manifestsToRead.Remove(front) + + // If we already read this manifest, no need to read it again. + if _, alreadyRead := sources[*manifestToRead.instance]; alreadyRead { + continue + } + + // Read the instance's manifest. + manifestBytes, manifestType, err := manifestToRead.src.GetManifest(ctx, manifestToRead.instance) + if err != nil { + // if errors.Cause(err) == storage.ErrImageUnknown || os.IsNotExist(errors.Cause(err)) { + // Trust that we either don't need it, or that it's in another reference. + // continue + // } + return nil, errors.Wrapf(err, "error reading manifest for instance %q", manifestToRead.instance) + } + + if manifest.MIMETypeIsMultiImage(manifestType) { + // Add the list's contents. + if err = addMulti(manifestBytes, manifestType, manifestToRead.src); err != nil { + return nil, errors.Wrapf(err, "error adding single image instance %q", manifestToRead.instance) + } + } else { + // Add the single image's contents. + if err = addSingle(manifestBytes, manifestType, manifestToRead.src); err != nil { + return nil, errors.Wrapf(err, "error adding single image instance %q", manifestToRead.instance) + } + } + } + + return iss, nil +} + +func (s *supplementedImageReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + return errors.Errorf("deletion of images not implemented") +} + +func (s *supplementedImageSource) Close() error { + var returnErr *multierror.Error + closed := make(map[types.ImageSource]struct{}) + for _, sourceInstance := range s.sourceInstancesByInstance { + if _, closed := closed[sourceInstance]; closed { + continue + } + if err := sourceInstance.Close(); err != nil { + returnErr = multierror.Append(returnErr, err) + } + closed[sourceInstance] = struct{}{} + } + if returnErr == nil { + return nil + } + return returnErr.ErrorOrNil() +} + +func (s *supplementedImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { + requestInstanceDigest := instanceDigest + if instanceDigest == nil { + return s.manifest, s.manifestType, nil + } + if sourceInstance, ok := s.sourceInstancesByInstance[*instanceDigest]; ok { + if *instanceDigest == s.sourceDefaultInstances[sourceInstance] { + requestInstanceDigest = nil + } + return sourceInstance.GetManifest(ctx, requestInstanceDigest) + } + return nil, "", errors.Wrapf(ErrDigestNotFound, "error getting manifest for digest %q", *instanceDigest) +} + +func (s *supplementedImageSource) GetBlob(ctx context.Context, blob types.BlobInfo, bic types.BlobInfoCache) (io.ReadCloser, int64, error) { + sourceInstance, ok := s.instancesByBlobDigest[blob.Digest] + if !ok { + return nil, -1, errors.Wrapf(ErrBlobNotFound, "error blob %q in known instances", blob.Digest) + } + src, ok := s.sourceInstancesByInstance[sourceInstance] + if !ok { + return nil, -1, errors.Wrapf(ErrDigestNotFound, "error getting image source for instance %q", sourceInstance) + } + return src.GetBlob(ctx, blob, bic) +} + +func (s *supplementedImageSource) HasThreadSafeGetBlob() bool { + checked := make(map[types.ImageSource]struct{}) + for _, sourceInstance := range s.sourceInstancesByInstance { + if _, checked := checked[sourceInstance]; checked { + continue + } + if !sourceInstance.HasThreadSafeGetBlob() { + return false + } + checked[sourceInstance] = struct{}{} + } + return true +} + +func (s *supplementedImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + var ( + src types.ImageSource + digest digest.Digest + ) + requestInstanceDigest := instanceDigest + if instanceDigest == nil { + if sourceInstance, ok := s.sourceInstancesByInstance[""]; ok { + src = sourceInstance + } + } else { + digest = *instanceDigest + if sourceInstance, ok := s.sourceInstancesByInstance[*instanceDigest]; ok { + src = sourceInstance + } + if *instanceDigest == s.sourceDefaultInstances[src] { + requestInstanceDigest = nil + } + } + if src != nil { + return src.GetSignatures(ctx, requestInstanceDigest) + } + return nil, errors.Wrapf(ErrDigestNotFound, "error finding instance for instance digest %q to read signatures", digest) +} + +func (s *supplementedImageSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) { + var src types.ImageSource + requestInstanceDigest := instanceDigest + errMsgDigest := "" + if instanceDigest == nil { + if sourceInstance, ok := s.sourceInstancesByInstance[""]; ok { + src = sourceInstance + } + } else { + errMsgDigest = string(*instanceDigest) + if sourceInstance, ok := s.sourceInstancesByInstance[*instanceDigest]; ok { + src = sourceInstance + } + if *instanceDigest == s.sourceDefaultInstances[src] { + requestInstanceDigest = nil + } + } + if src != nil { + blobInfos, err := src.LayerInfosForCopy(ctx, requestInstanceDigest) + if err != nil { + return nil, errors.Wrapf(err, "error reading layer infos for copy from instance %q", instanceDigest) + } + var manifestDigest digest.Digest + if instanceDigest != nil { + manifestDigest = *instanceDigest + } + for _, blobInfo := range blobInfos { + s.instancesByBlobDigest[blobInfo.Digest] = manifestDigest + } + return blobInfos, nil + } + return nil, errors.Wrapf(ErrDigestNotFound, "error finding instance for instance digest %q to copy layers", errMsgDigest) +} diff --git a/vendor/github.com/containers/common/pkg/timetype/timestamp.go b/vendor/github.com/containers/common/pkg/timetype/timestamp.go new file mode 100644 index 00000000000..ce2cb64f28b --- /dev/null +++ b/vendor/github.com/containers/common/pkg/timetype/timestamp.go @@ -0,0 +1,131 @@ +package timetype + +// code adapted from https://github.com/moby/moby/blob/master/api/types/time/timestamp.go + +import ( + "fmt" + "math" + "strconv" + "strings" + "time" +) + +// These are additional predefined layouts for use in Time.Format and Time.Parse +// with --since and --until parameters for `docker logs` and `docker events` +const ( + rFC3339Local = "2006-01-02T15:04:05" // RFC3339 with local timezone + rFC3339NanoLocal = "2006-01-02T15:04:05.999999999" // RFC3339Nano with local timezone + dateWithZone = "2006-01-02Z07:00" // RFC3339 with time at 00:00:00 + dateLocal = "2006-01-02" // RFC3339 with local timezone and time at 00:00:00 +) + +// GetTimestamp tries to parse given string as golang duration, +// then RFC3339 time and finally as a Unix timestamp. If +// any of these were successful, it returns a Unix timestamp +// as string otherwise returns the given value back. +// In case of duration input, the returned timestamp is computed +// as the given reference time minus the amount of the duration. +func GetTimestamp(value string, reference time.Time) (string, error) { + if d, err := time.ParseDuration(value); value != "0" && err == nil { + return strconv.FormatInt(reference.Add(-d).Unix(), 10), nil + } + + var format string + // if the string has a Z or a + or three dashes use parse otherwise use parseinlocation + parseInLocation := !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3) + + if strings.Contains(value, ".") { // nolint:gocritic + if parseInLocation { + format = rFC3339NanoLocal + } else { + format = time.RFC3339Nano + } + } else if strings.Contains(value, "T") { + // we want the number of colons in the T portion of the timestamp + tcolons := strings.Count(value, ":") + // if parseInLocation is off and we have a +/- zone offset (not Z) then + // there will be an extra colon in the input for the tz offset subtract that + // colon from the tcolons count + if !parseInLocation && !strings.ContainsAny(value, "zZ") && tcolons > 0 { + tcolons-- + } + if parseInLocation { + switch tcolons { + case 0: + format = "2006-01-02T15" + case 1: + format = "2006-01-02T15:04" + default: + format = rFC3339Local + } + } else { + switch tcolons { + case 0: + format = "2006-01-02T15Z07:00" + case 1: + format = "2006-01-02T15:04Z07:00" + default: + format = time.RFC3339 + } + } + } else if parseInLocation { + format = dateLocal + } else { + format = dateWithZone + } + + var t time.Time + var err error + + if parseInLocation { + t, err = time.ParseInLocation(format, value, time.FixedZone(reference.Zone())) + } else { + t, err = time.Parse(format, value) + } + + if err != nil { + // if there is a `-` then it's an RFC3339 like timestamp + if strings.Contains(value, "-") { + return "", err // was probably an RFC3339 like timestamp but the parser failed with an error + } + if _, _, err := parseTimestamp(value); err != nil { + return "", fmt.Errorf("failed to parse value as time or duration: %q", value) + } + return value, nil // unix timestamp in and out case (meaning: the value passed at the command line is already in the right format for passing to the server) + } + + return fmt.Sprintf("%d.%09d", t.Unix(), int64(t.Nanosecond())), nil +} + +// ParseTimestamps returns seconds and nanoseconds from a timestamp that has the +// format "%d.%09d", time.Unix(), int64(time.Nanosecond())) +// if the incoming nanosecond portion is longer or shorter than 9 digits it is +// converted to nanoseconds. The expectation is that the seconds and +// seconds will be used to create a time variable. For example: +// seconds, nanoseconds, err := ParseTimestamp("1136073600.000000001",0) +// if err == nil since := time.Unix(seconds, nanoseconds) +// returns seconds as def(aultSeconds) if value == "" +func ParseTimestamps(value string, def int64) (secs, nanoSecs int64, err error) { + if value == "" { + return def, 0, nil + } + return parseTimestamp(value) +} + +func parseTimestamp(value string) (int64, int64, error) { // nolint:gocritic + sa := strings.SplitN(value, ".", 2) + s, err := strconv.ParseInt(sa[0], 10, 64) + if err != nil { + return s, 0, err + } + if len(sa) != 2 { + return s, 0, nil + } + n, err := strconv.ParseInt(sa[1], 10, 64) + if err != nil { + return s, n, err + } + // should already be in nanoseconds but just in case convert n to nanoseconds + n = int64(float64(n) * math.Pow(float64(10), float64(9-len(sa[1])))) + return s, n, nil +} diff --git a/vendor/github.com/containers/common/pkg/umask/umask_unix.go b/vendor/github.com/containers/common/pkg/umask/umask_unix.go new file mode 100644 index 00000000000..bb589f7ac04 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/umask/umask_unix.go @@ -0,0 +1,20 @@ +// +build linux darwin + +package umask + +import ( + "syscall" + + "github.com/sirupsen/logrus" +) + +func Check() { + oldUmask := syscall.Umask(0022) //nolint + if (oldUmask & ^0022) != 0 { + logrus.Debugf("umask value too restrictive. Forcing it to 022") + } +} + +func Set(value int) int { + return syscall.Umask(value) +} diff --git a/vendor/github.com/containers/common/pkg/umask/umask_unsupported.go b/vendor/github.com/containers/common/pkg/umask/umask_unsupported.go new file mode 100644 index 00000000000..9041d5f2098 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/umask/umask_unsupported.go @@ -0,0 +1,7 @@ +// +build !linux,!darwin + +package umask + +func Check() {} + +func Set(int) int { return 0 } diff --git a/vendor/github.com/containers/common/pkg/util/util.go b/vendor/github.com/containers/common/pkg/util/util.go new file mode 100644 index 00000000000..98890a686f8 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/util/util.go @@ -0,0 +1,24 @@ +package util + +import "regexp" + +// StringInSlice determines if a string is in a string slice, returns bool +func StringInSlice(s string, sl []string) bool { + for _, i := range sl { + if i == s { + return true + } + } + return false +} + +// StringMatchRegexSlice determines if a given string matches one of the given regexes, returns bool +func StringMatchRegexSlice(s string, re []string) bool { + for _, r := range re { + m, err := regexp.MatchString(r, s) + if err == nil && m { + return true + } + } + return false +} diff --git a/vendor/github.com/containers/common/pkg/util/util_supported.go b/vendor/github.com/containers/common/pkg/util/util_supported.go new file mode 100644 index 00000000000..422e28742bc --- /dev/null +++ b/vendor/github.com/containers/common/pkg/util/util_supported.go @@ -0,0 +1,80 @@ +// +build linux darwin + +package util + +import ( + "fmt" + "os" + "path/filepath" + "sync" + "syscall" + + "github.com/containers/storage/pkg/unshare" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +var ( + rootlessRuntimeDirOnce sync.Once + rootlessRuntimeDir string +) + +// GetRuntimeDir returns the runtime directory +func GetRuntimeDir() (string, error) { + var rootlessRuntimeDirError error + + rootlessRuntimeDirOnce.Do(func() { + runtimeDir := os.Getenv("XDG_RUNTIME_DIR") + if runtimeDir != "" { + st, err := os.Stat(runtimeDir) + if err != nil { + rootlessRuntimeDirError = err + return + } + if int(st.Sys().(*syscall.Stat_t).Uid) != os.Geteuid() { + rootlessRuntimeDirError = fmt.Errorf("XDG_RUNTIME_DIR directory %q is not owned by the current user", runtimeDir) + return + } + } + uid := fmt.Sprintf("%d", unshare.GetRootlessUID()) + if runtimeDir == "" { + tmpDir := filepath.Join("/run", "user", uid) + if err := os.MkdirAll(tmpDir, 0700); err != nil { + logrus.Debugf("unable to make temp dir: %v", err) + } + st, err := os.Stat(tmpDir) + if err == nil && int(st.Sys().(*syscall.Stat_t).Uid) == os.Geteuid() && st.Mode().Perm() == 0700 { + runtimeDir = tmpDir + } + } + if runtimeDir == "" { + tmpDir := filepath.Join(os.TempDir(), fmt.Sprintf("podman-run-%s", uid)) + if err := os.MkdirAll(tmpDir, 0700); err != nil { + logrus.Debugf("unable to make temp dir %v", err) + } + st, err := os.Stat(tmpDir) + if err == nil && int(st.Sys().(*syscall.Stat_t).Uid) == os.Geteuid() && st.Mode().Perm() == 0700 { + runtimeDir = tmpDir + } + } + if runtimeDir == "" { + home := os.Getenv("HOME") + if home == "" { + rootlessRuntimeDirError = errors.New("neither XDG_RUNTIME_DIR nor HOME was set non-empty") + return + } + resolvedHome, err := filepath.EvalSymlinks(home) + if err != nil { + rootlessRuntimeDirError = errors.Wrap(err, "cannot resolve home") + return + } + runtimeDir = filepath.Join(resolvedHome, "rundir") + } + rootlessRuntimeDir = runtimeDir + }) + + if rootlessRuntimeDirError != nil { + return "", rootlessRuntimeDirError + } + return rootlessRuntimeDir, nil +} diff --git a/vendor/github.com/containers/common/pkg/util/util_windows.go b/vendor/github.com/containers/common/pkg/util/util_windows.go new file mode 100644 index 00000000000..2add712f19d --- /dev/null +++ b/vendor/github.com/containers/common/pkg/util/util_windows.go @@ -0,0 +1,12 @@ +// +build windows + +package util + +import ( + "github.com/pkg/errors" +) + +// getRuntimeDir returns the runtime directory +func GetRuntimeDir() (string, error) { + return "", errors.New("this function is not implemented for windows") +} diff --git a/vendor/github.com/containers/common/version/version.go b/vendor/github.com/containers/common/version/version.go new file mode 100644 index 00000000000..2088e955295 --- /dev/null +++ b/vendor/github.com/containers/common/version/version.go @@ -0,0 +1,4 @@ +package version + +// Version is the version of the build. +const Version = "0.47.5" diff --git a/vendor/github.com/containers/image/v5/LICENSE b/vendor/github.com/containers/image/v5/LICENSE new file mode 100644 index 00000000000..95356353060 --- /dev/null +++ b/vendor/github.com/containers/image/v5/LICENSE @@ -0,0 +1,189 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containers/image/v5/copy/copy.go b/vendor/github.com/containers/image/v5/copy/copy.go new file mode 100644 index 00000000000..512e643b9cc --- /dev/null +++ b/vendor/github.com/containers/image/v5/copy/copy.go @@ -0,0 +1,1782 @@ +package copy + +import ( + "bytes" + "context" + "fmt" + "io" + "io/ioutil" + "os" + "reflect" + "strings" + "sync" + "time" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/image" + internalblobinfocache "github.com/containers/image/v5/internal/blobinfocache" + "github.com/containers/image/v5/internal/pkg/platform" + internalTypes "github.com/containers/image/v5/internal/types" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/blobinfocache" + "github.com/containers/image/v5/pkg/compression" + compressiontypes "github.com/containers/image/v5/pkg/compression/types" + "github.com/containers/image/v5/signature" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" + "github.com/containers/ocicrypt" + encconfig "github.com/containers/ocicrypt/config" + digest "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/vbauerster/mpb/v7" + "github.com/vbauerster/mpb/v7/decor" + "golang.org/x/sync/semaphore" + "golang.org/x/term" +) + +var ( + // ErrDecryptParamsMissing is returned if there is missing decryption parameters + ErrDecryptParamsMissing = errors.New("Necessary DecryptParameters not present") + + // maxParallelDownloads is used to limit the maximum number of parallel + // downloads. Let's follow Firefox by limiting it to 6. + maxParallelDownloads = uint(6) + + // defaultCompressionFormat is used if the destination transport requests + // compression, and the user does not explicitly instruct us to use an algorithm. + defaultCompressionFormat = &compression.Gzip +) + +// compressionBufferSize is the buffer size used to compress a blob +var compressionBufferSize = 1048576 + +// expectedCompressionFormats is used to check if a blob with a specified media type is compressed +// using the algorithm that the media type says it should be compressed with +var expectedCompressionFormats = map[string]*compressiontypes.Algorithm{ + imgspecv1.MediaTypeImageLayerGzip: &compression.Gzip, + imgspecv1.MediaTypeImageLayerZstd: &compression.Zstd, + manifest.DockerV2Schema2LayerMediaType: &compression.Gzip, +} + +// copier allows us to keep track of diffID values for blobs, and other +// data shared across one or more images in a possible manifest list. +type copier struct { + dest types.ImageDestination + rawSource types.ImageSource + reportWriter io.Writer + progressOutput io.Writer + progressInterval time.Duration + progress chan types.ProgressProperties + blobInfoCache internalblobinfocache.BlobInfoCache2 + compressionFormat *compressiontypes.Algorithm // Compression algorithm to use, if the user explicitly requested one, or nil. + compressionLevel *int + ociDecryptConfig *encconfig.DecryptConfig + ociEncryptConfig *encconfig.EncryptConfig + concurrentBlobCopiesSemaphore *semaphore.Weighted // Limits the amount of concurrently copied blobs + downloadForeignLayers bool +} + +// imageCopier tracks state specific to a single image (possibly an item of a manifest list) +type imageCopier struct { + c *copier + manifestUpdates *types.ManifestUpdateOptions + src types.Image + diffIDsAreNeeded bool + cannotModifyManifestReason string // The reason the manifest cannot be modified, or an empty string if it can + canSubstituteBlobs bool + ociEncryptLayers *[]int +} + +const ( + // CopySystemImage is the default value which, when set in + // Options.ImageListSelection, indicates that the caller expects only one + // image to be copied, so if the source reference refers to a list of + // images, one that matches the current system will be selected. + CopySystemImage ImageListSelection = iota + // CopyAllImages is a value which, when set in Options.ImageListSelection, + // indicates that the caller expects to copy multiple images, and if + // the source reference refers to a list, that the list and every image + // to which it refers will be copied. If the source reference refers + // to a list, the target reference can not accept lists, an error + // should be returned. + CopyAllImages + // CopySpecificImages is a value which, when set in + // Options.ImageListSelection, indicates that the caller expects the + // source reference to be either a single image or a list of images, + // and if the source reference is a list, wants only specific instances + // from it copied (or none of them, if the list of instances to copy is + // empty), along with the list itself. If the target reference can + // only accept one image (i.e., it cannot accept lists), an error + // should be returned. + CopySpecificImages +) + +// ImageListSelection is one of CopySystemImage, CopyAllImages, or +// CopySpecificImages, to control whether, when the source reference is a list, +// copy.Image() copies only an image which matches the current runtime +// environment, or all images which match the supplied reference, or only +// specific images from the source reference. +type ImageListSelection int + +// Options allows supplying non-default configuration modifying the behavior of CopyImage. +type Options struct { + RemoveSignatures bool // Remove any pre-existing signatures. SignBy will still add a new signature. + SignBy string // If non-empty, asks for a signature to be added during the copy, and specifies a key ID, as accepted by signature.NewGPGSigningMechanism().SignDockerManifest(), + SignPassphrase string // Passphare to use when signing with the key ID from `SignBy`. + ReportWriter io.Writer + SourceCtx *types.SystemContext + DestinationCtx *types.SystemContext + ProgressInterval time.Duration // time to wait between reports to signal the progress channel + Progress chan types.ProgressProperties // Reported to when ProgressInterval has arrived for a single artifact+offset. + + // Preserve digests, and fail if we cannot. + PreserveDigests bool + // manifest MIME type of image set by user. "" is default and means use the autodetection to the the manifest MIME type + ForceManifestMIMEType string + ImageListSelection ImageListSelection // set to either CopySystemImage (the default), CopyAllImages, or CopySpecificImages to control which instances we copy when the source reference is a list; ignored if the source reference is not a list + Instances []digest.Digest // if ImageListSelection is CopySpecificImages, copy only these instances and the list itself + + // If OciEncryptConfig is non-nil, it indicates that an image should be encrypted. + // The encryption options is derived from the construction of EncryptConfig object. + // Note: During initial encryption process of a layer, the resultant digest is not known + // during creation, so newDigestingReader has to be set with validateDigest = false + OciEncryptConfig *encconfig.EncryptConfig + // OciEncryptLayers represents the list of layers to encrypt. + // If nil, don't encrypt any layers. + // If non-nil and len==0, denotes encrypt all layers. + // integers in the slice represent 0-indexed layer indices, with support for negative + // indexing. i.e. 0 is the first layer, -1 is the last (top-most) layer. + OciEncryptLayers *[]int + // OciDecryptConfig contains the config that can be used to decrypt an image if it is + // encrypted if non-nil. If nil, it does not attempt to decrypt an image. + OciDecryptConfig *encconfig.DecryptConfig + + // A weighted semaphore to limit the amount of concurrently copied layers and configs. Applies to all copy operations using the semaphore. If set, MaxParallelDownloads is ignored. + ConcurrentBlobCopiesSemaphore *semaphore.Weighted + + // MaxParallelDownloads indicates the maximum layers to pull at the same time. Applies to a single copy operation. A reasonable default is used if this is left as 0. Ignored if ConcurrentBlobCopiesSemaphore is set. + MaxParallelDownloads uint + + // When OptimizeDestinationImageAlreadyExists is set, optimize the copy assuming that the destination image already + // exists (and is equivalent). Making the eventual (no-op) copy more performant for this case. Enabling the option + // is slightly pessimistic if the destination image doesn't exist, or is not equivalent. + OptimizeDestinationImageAlreadyExists bool + + // Download layer contents with "nondistributable" media types ("foreign" layers) and translate the layer media type + // to not indicate "nondistributable". + DownloadForeignLayers bool +} + +// validateImageListSelection returns an error if the passed-in value is not one that we recognize as a valid ImageListSelection value +func validateImageListSelection(selection ImageListSelection) error { + switch selection { + case CopySystemImage, CopyAllImages, CopySpecificImages: + return nil + default: + return errors.Errorf("Invalid value for options.ImageListSelection: %d", selection) + } +} + +// Image copies image from srcRef to destRef, using policyContext to validate +// source image admissibility. It returns the manifest which was written to +// the new copy of the image. +func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, srcRef types.ImageReference, options *Options) (copiedManifest []byte, retErr error) { + // NOTE this function uses an output parameter for the error return value. + // Setting this and returning is the ideal way to return an error. + // + // the defers in this routine will wrap the error return with its own errors + // which can be valuable context in the middle of a multi-streamed copy. + if options == nil { + options = &Options{} + } + + if err := validateImageListSelection(options.ImageListSelection); err != nil { + return nil, err + } + + reportWriter := ioutil.Discard + + if options.ReportWriter != nil { + reportWriter = options.ReportWriter + } + + dest, err := destRef.NewImageDestination(ctx, options.DestinationCtx) + if err != nil { + return nil, errors.Wrapf(err, "initializing destination %s", transports.ImageName(destRef)) + } + defer func() { + if err := dest.Close(); err != nil { + retErr = errors.Wrapf(retErr, " (dest: %v)", err) + } + }() + + rawSource, err := srcRef.NewImageSource(ctx, options.SourceCtx) + if err != nil { + return nil, errors.Wrapf(err, "initializing source %s", transports.ImageName(srcRef)) + } + defer func() { + if err := rawSource.Close(); err != nil { + retErr = errors.Wrapf(retErr, " (src: %v)", err) + } + }() + + // If reportWriter is not a TTY (e.g., when piping to a file), do not + // print the progress bars to avoid long and hard to parse output. + // createProgressBar() will print a single line instead. + progressOutput := reportWriter + if !isTTY(reportWriter) { + progressOutput = ioutil.Discard + } + + c := &copier{ + dest: dest, + rawSource: rawSource, + reportWriter: reportWriter, + progressOutput: progressOutput, + progressInterval: options.ProgressInterval, + progress: options.Progress, + // FIXME? The cache is used for sources and destinations equally, but we only have a SourceCtx and DestinationCtx. + // For now, use DestinationCtx (because blob reuse changes the behavior of the destination side more); eventually + // we might want to add a separate CommonCtx — or would that be too confusing? + blobInfoCache: internalblobinfocache.FromBlobInfoCache(blobinfocache.DefaultCache(options.DestinationCtx)), + ociDecryptConfig: options.OciDecryptConfig, + ociEncryptConfig: options.OciEncryptConfig, + downloadForeignLayers: options.DownloadForeignLayers, + } + + // Set the concurrentBlobCopiesSemaphore if we can copy layers in parallel. + if dest.HasThreadSafePutBlob() && rawSource.HasThreadSafeGetBlob() { + c.concurrentBlobCopiesSemaphore = options.ConcurrentBlobCopiesSemaphore + if c.concurrentBlobCopiesSemaphore == nil { + max := options.MaxParallelDownloads + if max == 0 { + max = maxParallelDownloads + } + c.concurrentBlobCopiesSemaphore = semaphore.NewWeighted(int64(max)) + } + } else { + c.concurrentBlobCopiesSemaphore = semaphore.NewWeighted(int64(1)) + if options.ConcurrentBlobCopiesSemaphore != nil { + if err := options.ConcurrentBlobCopiesSemaphore.Acquire(ctx, 1); err != nil { + return nil, fmt.Errorf("acquiring semaphore for concurrent blob copies: %w", err) + } + defer options.ConcurrentBlobCopiesSemaphore.Release(1) + } + } + + if options.DestinationCtx != nil { + // Note that compressionFormat and compressionLevel can be nil. + c.compressionFormat = options.DestinationCtx.CompressionFormat + c.compressionLevel = options.DestinationCtx.CompressionLevel + } + + unparsedToplevel := image.UnparsedInstance(rawSource, nil) + multiImage, err := isMultiImage(ctx, unparsedToplevel) + if err != nil { + return nil, errors.Wrapf(err, "determining manifest MIME type for %s", transports.ImageName(srcRef)) + } + + if !multiImage { + // The simple case: just copy a single image. + if copiedManifest, _, _, err = c.copyOneImage(ctx, policyContext, options, unparsedToplevel, unparsedToplevel, nil); err != nil { + return nil, err + } + } else if options.ImageListSelection == CopySystemImage { + // This is a manifest list, and we weren't asked to copy multiple images. Choose a single image that + // matches the current system to copy, and copy it. + mfest, manifestType, err := unparsedToplevel.Manifest(ctx) + if err != nil { + return nil, errors.Wrapf(err, "reading manifest for %s", transports.ImageName(srcRef)) + } + manifestList, err := manifest.ListFromBlob(mfest, manifestType) + if err != nil { + return nil, errors.Wrapf(err, "parsing primary manifest as list for %s", transports.ImageName(srcRef)) + } + instanceDigest, err := manifestList.ChooseInstance(options.SourceCtx) // try to pick one that matches options.SourceCtx + if err != nil { + return nil, errors.Wrapf(err, "choosing an image from manifest list %s", transports.ImageName(srcRef)) + } + logrus.Debugf("Source is a manifest list; copying (only) instance %s for current system", instanceDigest) + unparsedInstance := image.UnparsedInstance(rawSource, &instanceDigest) + + if copiedManifest, _, _, err = c.copyOneImage(ctx, policyContext, options, unparsedToplevel, unparsedInstance, nil); err != nil { + return nil, err + } + } else { /* options.ImageListSelection == CopyAllImages or options.ImageListSelection == CopySpecificImages, */ + // If we were asked to copy multiple images and can't, that's an error. + if !supportsMultipleImages(c.dest) { + return nil, errors.Errorf("copying multiple images: destination transport %q does not support copying multiple images as a group", destRef.Transport().Name()) + } + // Copy some or all of the images. + switch options.ImageListSelection { + case CopyAllImages: + logrus.Debugf("Source is a manifest list; copying all instances") + case CopySpecificImages: + logrus.Debugf("Source is a manifest list; copying some instances") + } + if copiedManifest, err = c.copyMultipleImages(ctx, policyContext, options, unparsedToplevel); err != nil { + return nil, err + } + } + + if err := c.dest.Commit(ctx, unparsedToplevel); err != nil { + return nil, errors.Wrap(err, "committing the finished image") + } + + return copiedManifest, nil +} + +// Checks if the destination supports accepting multiple images by checking if it can support +// manifest types that are lists of other manifests. +func supportsMultipleImages(dest types.ImageDestination) bool { + mtypes := dest.SupportedManifestMIMETypes() + if len(mtypes) == 0 { + // Anything goes! + return true + } + for _, mtype := range mtypes { + if manifest.MIMETypeIsMultiImage(mtype) { + return true + } + } + return false +} + +// compareImageDestinationManifestEqual compares the `src` and `dest` image manifests (reading the manifest from the +// (possibly remote) destination). Returning true and the destination's manifest, type and digest if they compare equal. +func compareImageDestinationManifestEqual(ctx context.Context, options *Options, src types.Image, targetInstance *digest.Digest, dest types.ImageDestination) (bool, []byte, string, digest.Digest, error) { + srcManifest, _, err := src.Manifest(ctx) + if err != nil { + return false, nil, "", "", errors.Wrapf(err, "reading manifest from image") + } + + srcManifestDigest, err := manifest.Digest(srcManifest) + if err != nil { + return false, nil, "", "", errors.Wrapf(err, "calculating manifest digest") + } + + destImageSource, err := dest.Reference().NewImageSource(ctx, options.DestinationCtx) + if err != nil { + logrus.Debugf("Unable to create destination image %s source: %v", dest.Reference(), err) + return false, nil, "", "", nil + } + + destManifest, destManifestType, err := destImageSource.GetManifest(ctx, targetInstance) + if err != nil { + logrus.Debugf("Unable to get destination image %s/%s manifest: %v", destImageSource, targetInstance, err) + return false, nil, "", "", nil + } + + destManifestDigest, err := manifest.Digest(destManifest) + if err != nil { + return false, nil, "", "", errors.Wrapf(err, "calculating manifest digest") + } + + logrus.Debugf("Comparing source and destination manifest digests: %v vs. %v", srcManifestDigest, destManifestDigest) + if srcManifestDigest != destManifestDigest { + return false, nil, "", "", nil + } + + // Destination and source manifests, types and digests should all be equivalent + return true, destManifest, destManifestType, destManifestDigest, nil +} + +// copyMultipleImages copies some or all of an image list's instances, using +// policyContext to validate source image admissibility. +func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signature.PolicyContext, options *Options, unparsedToplevel *image.UnparsedImage) (copiedManifest []byte, retErr error) { + // Parse the list and get a copy of the original value after it's re-encoded. + manifestList, manifestType, err := unparsedToplevel.Manifest(ctx) + if err != nil { + return nil, errors.Wrapf(err, "reading manifest list") + } + originalList, err := manifest.ListFromBlob(manifestList, manifestType) + if err != nil { + return nil, errors.Wrapf(err, "parsing manifest list %q", string(manifestList)) + } + updatedList := originalList.Clone() + + // Read and/or clear the set of signatures for this list. + var sigs [][]byte + if options.RemoveSignatures { + sigs = [][]byte{} + } else { + c.Printf("Getting image list signatures\n") + s, err := c.rawSource.GetSignatures(ctx, nil) + if err != nil { + return nil, errors.Wrap(err, "reading signatures") + } + sigs = s + } + if len(sigs) != 0 { + c.Printf("Checking if image list destination supports signatures\n") + if err := c.dest.SupportsSignatures(ctx); err != nil { + return nil, errors.Wrapf(err, "Can not copy signatures to %s", transports.ImageName(c.dest.Reference())) + } + } + + // If the destination is a digested reference, make a note of that, determine what digest value we're + // expecting, and check that the source manifest matches it. + destIsDigestedReference := false + if named := c.dest.Reference().DockerReference(); named != nil { + if digested, ok := named.(reference.Digested); ok { + destIsDigestedReference = true + matches, err := manifest.MatchesDigest(manifestList, digested.Digest()) + if err != nil { + return nil, errors.Wrapf(err, "computing digest of source image's manifest") + } + if !matches { + return nil, errors.New("Digest of source image's manifest would not match destination reference") + } + } + } + + // Determine if we're allowed to modify the manifest list. + // If we can, set to the empty string. If we can't, set to the reason why. + // Compare, and perhaps keep in sync with, the version in copyOneImage. + cannotModifyManifestListReason := "" + if len(sigs) > 0 { + cannotModifyManifestListReason = "Would invalidate signatures" + } + if destIsDigestedReference { + cannotModifyManifestListReason = "Destination specifies a digest" + } + if options.PreserveDigests { + cannotModifyManifestListReason = "Instructed to preserve digests" + } + + // Determine if we'll need to convert the manifest list to a different format. + forceListMIMEType := options.ForceManifestMIMEType + switch forceListMIMEType { + case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema2MediaType: + forceListMIMEType = manifest.DockerV2ListMediaType + case imgspecv1.MediaTypeImageManifest: + forceListMIMEType = imgspecv1.MediaTypeImageIndex + } + selectedListType, otherManifestMIMETypeCandidates, err := c.determineListConversion(manifestType, c.dest.SupportedManifestMIMETypes(), forceListMIMEType) + if err != nil { + return nil, errors.Wrapf(err, "determining manifest list type to write to destination") + } + if selectedListType != originalList.MIMEType() { + if cannotModifyManifestListReason != "" { + return nil, errors.Errorf("Manifest list must be converted to type %q to be written to destination, but we cannot modify it: %q", selectedListType, cannotModifyManifestListReason) + } + } + + // Copy each image, or just the ones we want to copy, in turn. + instanceDigests := updatedList.Instances() + imagesToCopy := len(instanceDigests) + if options.ImageListSelection == CopySpecificImages { + imagesToCopy = len(options.Instances) + } + c.Printf("Copying %d of %d images in list\n", imagesToCopy, len(instanceDigests)) + updates := make([]manifest.ListUpdate, len(instanceDigests)) + instancesCopied := 0 + for i, instanceDigest := range instanceDigests { + if options.ImageListSelection == CopySpecificImages { + skip := true + for _, instance := range options.Instances { + if instance == instanceDigest { + skip = false + break + } + } + if skip { + update, err := updatedList.Instance(instanceDigest) + if err != nil { + return nil, err + } + logrus.Debugf("Skipping instance %s (%d/%d)", instanceDigest, i+1, len(instanceDigests)) + // Record the digest/size/type of the manifest that we didn't copy. + updates[i] = update + continue + } + } + logrus.Debugf("Copying instance %s (%d/%d)", instanceDigest, i+1, len(instanceDigests)) + c.Printf("Copying image %s (%d/%d)\n", instanceDigest, instancesCopied+1, imagesToCopy) + unparsedInstance := image.UnparsedInstance(c.rawSource, &instanceDigest) + updatedManifest, updatedManifestType, updatedManifestDigest, err := c.copyOneImage(ctx, policyContext, options, unparsedToplevel, unparsedInstance, &instanceDigest) + if err != nil { + return nil, err + } + instancesCopied++ + // Record the result of a possible conversion here. + update := manifest.ListUpdate{ + Digest: updatedManifestDigest, + Size: int64(len(updatedManifest)), + MediaType: updatedManifestType, + } + updates[i] = update + } + + // Now reset the digest/size/types of the manifests in the list to account for any conversions that we made. + if err = updatedList.UpdateInstances(updates); err != nil { + return nil, errors.Wrapf(err, "updating manifest list") + } + + // Iterate through supported list types, preferred format first. + c.Printf("Writing manifest list to image destination\n") + var errs []string + for _, thisListType := range append([]string{selectedListType}, otherManifestMIMETypeCandidates...) { + attemptedList := updatedList + + logrus.Debugf("Trying to use manifest list type %s…", thisListType) + + // Perform the list conversion, if we need one. + if thisListType != updatedList.MIMEType() { + attemptedList, err = updatedList.ConvertToMIMEType(thisListType) + if err != nil { + return nil, errors.Wrapf(err, "converting manifest list to list with MIME type %q", thisListType) + } + } + + // Check if the updates or a type conversion meaningfully changed the list of images + // by serializing them both so that we can compare them. + attemptedManifestList, err := attemptedList.Serialize() + if err != nil { + return nil, errors.Wrapf(err, "encoding updated manifest list (%q: %#v)", updatedList.MIMEType(), updatedList.Instances()) + } + originalManifestList, err := originalList.Serialize() + if err != nil { + return nil, errors.Wrapf(err, "encoding original manifest list for comparison (%q: %#v)", originalList.MIMEType(), originalList.Instances()) + } + + // If we can't just use the original value, but we have to change it, flag an error. + if !bytes.Equal(attemptedManifestList, originalManifestList) { + if cannotModifyManifestListReason != "" { + return nil, errors.Errorf("Manifest list must be converted to type %q to be written to destination, but we cannot modify it: %q", thisListType, cannotModifyManifestListReason) + } + logrus.Debugf("Manifest list has been updated") + } else { + // We can just use the original value, so use it instead of the one we just rebuilt, so that we don't change the digest. + attemptedManifestList = manifestList + } + + // Save the manifest list. + err = c.dest.PutManifest(ctx, attemptedManifestList, nil) + if err != nil { + logrus.Debugf("Upload of manifest list type %s failed: %v", thisListType, err) + errs = append(errs, fmt.Sprintf("%s(%v)", thisListType, err)) + continue + } + errs = nil + manifestList = attemptedManifestList + break + } + if errs != nil { + return nil, fmt.Errorf("Uploading manifest list failed, attempted the following formats: %s", strings.Join(errs, ", ")) + } + + // Sign the manifest list. + if options.SignBy != "" { + newSig, err := c.createSignature(manifestList, options.SignBy, options.SignPassphrase) + if err != nil { + return nil, err + } + sigs = append(sigs, newSig) + } + + c.Printf("Storing list signatures\n") + if err := c.dest.PutSignatures(ctx, sigs, nil); err != nil { + return nil, errors.Wrap(err, "writing signatures") + } + + return manifestList, nil +} + +// copyOneImage copies a single (non-manifest-list) image unparsedImage, using policyContext to validate +// source image admissibility. +func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.PolicyContext, options *Options, unparsedToplevel, unparsedImage *image.UnparsedImage, targetInstance *digest.Digest) (retManifest []byte, retManifestType string, retManifestDigest digest.Digest, retErr error) { + // The caller is handling manifest lists; this could happen only if a manifest list contains a manifest list. + // Make sure we fail cleanly in such cases. + multiImage, err := isMultiImage(ctx, unparsedImage) + if err != nil { + // FIXME FIXME: How to name a reference for the sub-image? + return nil, "", "", errors.Wrapf(err, "determining manifest MIME type for %s", transports.ImageName(unparsedImage.Reference())) + } + if multiImage { + return nil, "", "", fmt.Errorf("Unexpectedly received a manifest list instead of a manifest for a single image") + } + + // Please keep this policy check BEFORE reading any other information about the image. + // (The multiImage check above only matches the MIME type, which we have received anyway. + // Actual parsing of anything should be deferred.) + if allowed, err := policyContext.IsRunningImageAllowed(ctx, unparsedImage); !allowed || err != nil { // Be paranoid and fail if either return value indicates so. + return nil, "", "", errors.Wrap(err, "Source image rejected") + } + src, err := image.FromUnparsedImage(ctx, options.SourceCtx, unparsedImage) + if err != nil { + return nil, "", "", errors.Wrapf(err, "initializing image from source %s", transports.ImageName(c.rawSource.Reference())) + } + + // If the destination is a digested reference, make a note of that, determine what digest value we're + // expecting, and check that the source manifest matches it. If the source manifest doesn't, but it's + // one item from a manifest list that matches it, accept that as a match. + destIsDigestedReference := false + if named := c.dest.Reference().DockerReference(); named != nil { + if digested, ok := named.(reference.Digested); ok { + destIsDigestedReference = true + sourceManifest, _, err := src.Manifest(ctx) + if err != nil { + return nil, "", "", errors.Wrapf(err, "reading manifest from source image") + } + matches, err := manifest.MatchesDigest(sourceManifest, digested.Digest()) + if err != nil { + return nil, "", "", errors.Wrapf(err, "computing digest of source image's manifest") + } + if !matches { + manifestList, _, err := unparsedToplevel.Manifest(ctx) + if err != nil { + return nil, "", "", errors.Wrapf(err, "reading manifest from source image") + } + matches, err = manifest.MatchesDigest(manifestList, digested.Digest()) + if err != nil { + return nil, "", "", errors.Wrapf(err, "computing digest of source image's manifest") + } + if !matches { + return nil, "", "", errors.New("Digest of source image's manifest would not match destination reference") + } + } + } + } + + if err := checkImageDestinationForCurrentRuntime(ctx, options.DestinationCtx, src, c.dest); err != nil { + return nil, "", "", err + } + + var sigs [][]byte + if options.RemoveSignatures { + sigs = [][]byte{} + } else { + c.Printf("Getting image source signatures\n") + s, err := src.Signatures(ctx) + if err != nil { + return nil, "", "", errors.Wrap(err, "reading signatures") + } + sigs = s + } + if len(sigs) != 0 { + c.Printf("Checking if image destination supports signatures\n") + if err := c.dest.SupportsSignatures(ctx); err != nil { + return nil, "", "", errors.Wrapf(err, "Can not copy signatures to %s", transports.ImageName(c.dest.Reference())) + } + } + + // Determine if we're allowed to modify the manifest. + // If we can, set to the empty string. If we can't, set to the reason why. + // Compare, and perhaps keep in sync with, the version in copyMultipleImages. + cannotModifyManifestReason := "" + if len(sigs) > 0 { + cannotModifyManifestReason = "Would invalidate signatures" + } + if destIsDigestedReference { + cannotModifyManifestReason = "Destination specifies a digest" + } + if options.PreserveDigests { + cannotModifyManifestReason = "Instructed to preserve digests" + } + + ic := imageCopier{ + c: c, + manifestUpdates: &types.ManifestUpdateOptions{InformationOnly: types.ManifestUpdateInformation{Destination: c.dest}}, + src: src, + // diffIDsAreNeeded is computed later + cannotModifyManifestReason: cannotModifyManifestReason, + ociEncryptLayers: options.OciEncryptLayers, + } + // Ensure _this_ copy sees exactly the intended data when either processing a signed image or signing it. + // This may be too conservative, but for now, better safe than sorry, _especially_ on the SignBy path: + // The signature makes the content non-repudiable, so it very much matters that the signature is made over exactly what the user intended. + // We do intend the RecordDigestUncompressedPair calls to only work with reliable data, but at least there’s a risk + // that the compressed version coming from a third party may be designed to attack some other decompressor implementation, + // and we would reuse and sign it. + ic.canSubstituteBlobs = ic.cannotModifyManifestReason == "" && options.SignBy == "" + + if err := ic.updateEmbeddedDockerReference(); err != nil { + return nil, "", "", err + } + + destRequiresOciEncryption := (isEncrypted(src) && ic.c.ociDecryptConfig != nil) || options.OciEncryptLayers != nil + + // We compute preferredManifestMIMEType only to show it in error messages. + // Without having to add this context in an error message, we would be happy enough to know only that no conversion is needed. + preferredManifestMIMEType, otherManifestMIMETypeCandidates, err := ic.determineManifestConversion(ctx, c.dest.SupportedManifestMIMETypes(), options.ForceManifestMIMEType, destRequiresOciEncryption) + if err != nil { + return nil, "", "", err + } + + // If src.UpdatedImageNeedsLayerDiffIDs(ic.manifestUpdates) will be true, it needs to be true by the time we get here. + ic.diffIDsAreNeeded = src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates) + // If encrypted and decryption keys provided, we should try to decrypt + ic.diffIDsAreNeeded = ic.diffIDsAreNeeded || (isEncrypted(src) && ic.c.ociDecryptConfig != nil) || ic.c.ociEncryptConfig != nil + + // If enabled, fetch and compare the destination's manifest. And as an optimization skip updating the destination iff equal + if options.OptimizeDestinationImageAlreadyExists { + shouldUpdateSigs := len(sigs) > 0 || options.SignBy != "" // TODO: Consider allowing signatures updates only and skipping the image's layers/manifest copy if possible + noPendingManifestUpdates := ic.noPendingManifestUpdates() + + logrus.Debugf("Checking if we can skip copying: has signatures=%t, OCI encryption=%t, no manifest updates=%t", shouldUpdateSigs, destRequiresOciEncryption, noPendingManifestUpdates) + if !shouldUpdateSigs && !destRequiresOciEncryption && noPendingManifestUpdates { + isSrcDestManifestEqual, retManifest, retManifestType, retManifestDigest, err := compareImageDestinationManifestEqual(ctx, options, src, targetInstance, c.dest) + if err != nil { + logrus.Warnf("Failed to compare destination image manifest: %v", err) + return nil, "", "", err + } + + if isSrcDestManifestEqual { + c.Printf("Skipping: image already present at destination\n") + return retManifest, retManifestType, retManifestDigest, nil + } + } + } + + if err := ic.copyLayers(ctx); err != nil { + return nil, "", "", err + } + + // With docker/distribution registries we do not know whether the registry accepts schema2 or schema1 only; + // and at least with the OpenShift registry "acceptschema2" option, there is no way to detect the support + // without actually trying to upload something and getting a types.ManifestTypeRejectedError. + // So, try the preferred manifest MIME type with possibly-updated blob digests, media types, and sizes if + // we're altering how they're compressed. If the process succeeds, fine… + manifestBytes, retManifestDigest, err := ic.copyUpdatedConfigAndManifest(ctx, targetInstance) + retManifestType = preferredManifestMIMEType + if err != nil { + logrus.Debugf("Writing manifest using preferred type %s failed: %v", preferredManifestMIMEType, err) + // … if it fails, and the failure is either because the manifest is rejected by the registry, or + // because we failed to create a manifest of the specified type because the specific manifest type + // doesn't support the type of compression we're trying to use (e.g. docker v2s2 and zstd), we may + // have other options available that could still succeed. + _, isManifestRejected := errors.Cause(err).(types.ManifestTypeRejectedError) + _, isCompressionIncompatible := errors.Cause(err).(manifest.ManifestLayerCompressionIncompatibilityError) + if (!isManifestRejected && !isCompressionIncompatible) || len(otherManifestMIMETypeCandidates) == 0 { + // We don’t have other options. + // In principle the code below would handle this as well, but the resulting error message is fairly ugly. + // Don’t bother the user with MIME types if we have no choice. + return nil, "", "", err + } + // If the original MIME type is acceptable, determineManifestConversion always uses it as preferredManifestMIMEType. + // So if we are here, we will definitely be trying to convert the manifest. + // With ic.cannotModifyManifestReason != "", that would just be a string of repeated failures for the same reason, + // so let’s bail out early and with a better error message. + if ic.cannotModifyManifestReason != "" { + return nil, "", "", errors.Wrapf(err, "Writing manifest failed and we cannot try conversions: %q", cannotModifyManifestReason) + } + + // errs is a list of errors when trying various manifest types. Also serves as an "upload succeeded" flag when set to nil. + errs := []string{fmt.Sprintf("%s(%v)", preferredManifestMIMEType, err)} + for _, manifestMIMEType := range otherManifestMIMETypeCandidates { + logrus.Debugf("Trying to use manifest type %s…", manifestMIMEType) + ic.manifestUpdates.ManifestMIMEType = manifestMIMEType + attemptedManifest, attemptedManifestDigest, err := ic.copyUpdatedConfigAndManifest(ctx, targetInstance) + if err != nil { + logrus.Debugf("Upload of manifest type %s failed: %v", manifestMIMEType, err) + errs = append(errs, fmt.Sprintf("%s(%v)", manifestMIMEType, err)) + continue + } + + // We have successfully uploaded a manifest. + manifestBytes = attemptedManifest + retManifestDigest = attemptedManifestDigest + retManifestType = manifestMIMEType + errs = nil // Mark this as a success so that we don't abort below. + break + } + if errs != nil { + return nil, "", "", fmt.Errorf("Uploading manifest failed, attempted the following formats: %s", strings.Join(errs, ", ")) + } + } + if targetInstance != nil { + targetInstance = &retManifestDigest + } + + if options.SignBy != "" { + newSig, err := c.createSignature(manifestBytes, options.SignBy, options.SignPassphrase) + if err != nil { + return nil, "", "", err + } + sigs = append(sigs, newSig) + } + + c.Printf("Storing signatures\n") + if err := c.dest.PutSignatures(ctx, sigs, targetInstance); err != nil { + return nil, "", "", errors.Wrap(err, "writing signatures") + } + + return manifestBytes, retManifestType, retManifestDigest, nil +} + +// Printf writes a formatted string to c.reportWriter. +// Note that the method name Printf is not entirely arbitrary: (go tool vet) +// has a built-in list of functions/methods (whatever object they are for) +// which have their format strings checked; for other names we would have +// to pass a parameter to every (go tool vet) invocation. +func (c *copier) Printf(format string, a ...interface{}) { + fmt.Fprintf(c.reportWriter, format, a...) +} + +// checkImageDestinationForCurrentRuntime enforces dest.MustMatchRuntimeOS, if necessary. +func checkImageDestinationForCurrentRuntime(ctx context.Context, sys *types.SystemContext, src types.Image, dest types.ImageDestination) error { + if dest.MustMatchRuntimeOS() { + c, err := src.OCIConfig(ctx) + if err != nil { + return errors.Wrapf(err, "parsing image configuration") + } + wantedPlatforms, err := platform.WantedPlatforms(sys) + if err != nil { + return errors.Wrapf(err, "getting current platform information %#v", sys) + } + + options := newOrderedSet() + match := false + for _, wantedPlatform := range wantedPlatforms { + // Waiting for https://github.com/opencontainers/image-spec/pull/777 : + // This currently can’t use image.MatchesPlatform because we don’t know what to use + // for image.Variant. + if wantedPlatform.OS == c.OS && wantedPlatform.Architecture == c.Architecture { + match = true + break + } + options.append(fmt.Sprintf("%s+%s", wantedPlatform.OS, wantedPlatform.Architecture)) + } + if !match { + logrus.Infof("Image operating system mismatch: image uses OS %q+architecture %q, expecting one of %q", + c.OS, c.Architecture, strings.Join(options.list, ", ")) + } + } + return nil +} + +// updateEmbeddedDockerReference handles the Docker reference embedded in Docker schema1 manifests. +func (ic *imageCopier) updateEmbeddedDockerReference() error { + if ic.c.dest.IgnoresEmbeddedDockerReference() { + return nil // Destination would prefer us not to update the embedded reference. + } + destRef := ic.c.dest.Reference().DockerReference() + if destRef == nil { + return nil // Destination does not care about Docker references + } + if !ic.src.EmbeddedDockerReferenceConflicts(destRef) { + return nil // No reference embedded in the manifest, or it matches destRef already. + } + + if ic.cannotModifyManifestReason != "" { + return errors.Errorf("Copying a schema1 image with an embedded Docker reference to %s (Docker reference %s) would change the manifest, which we cannot do: %q", + transports.ImageName(ic.c.dest.Reference()), destRef.String(), ic.cannotModifyManifestReason) + } + ic.manifestUpdates.EmbeddedDockerReference = destRef + return nil +} + +func (ic *imageCopier) noPendingManifestUpdates() bool { + return reflect.DeepEqual(*ic.manifestUpdates, types.ManifestUpdateOptions{InformationOnly: ic.manifestUpdates.InformationOnly}) +} + +// isTTY returns true if the io.Writer is a file and a tty. +func isTTY(w io.Writer) bool { + if f, ok := w.(*os.File); ok { + return term.IsTerminal(int(f.Fd())) + } + return false +} + +// copyLayers copies layers from ic.src/ic.c.rawSource to dest, using and updating ic.manifestUpdates if necessary and ic.cannotModifyManifestReason == "". +func (ic *imageCopier) copyLayers(ctx context.Context) error { + srcInfos := ic.src.LayerInfos() + numLayers := len(srcInfos) + updatedSrcInfos, err := ic.src.LayerInfosForCopy(ctx) + if err != nil { + return err + } + srcInfosUpdated := false + // If we only need to check authorization, no updates required. + if updatedSrcInfos != nil && !reflect.DeepEqual(srcInfos, updatedSrcInfos) { + if ic.cannotModifyManifestReason != "" { + return errors.Errorf("Copying this image would require changing layer representation, which we cannot do: %q", ic.cannotModifyManifestReason) + } + srcInfos = updatedSrcInfos + srcInfosUpdated = true + } + + type copyLayerData struct { + destInfo types.BlobInfo + diffID digest.Digest + err error + } + + // The manifest is used to extract the information whether a given + // layer is empty. + manifestBlob, manifestType, err := ic.src.Manifest(ctx) + if err != nil { + return err + } + man, err := manifest.FromBlob(manifestBlob, manifestType) + if err != nil { + return err + } + manifestLayerInfos := man.LayerInfos() + + // copyGroup is used to determine if all layers are copied + copyGroup := sync.WaitGroup{} + + data := make([]copyLayerData, numLayers) + copyLayerHelper := func(index int, srcLayer types.BlobInfo, toEncrypt bool, pool *mpb.Progress, srcRef reference.Named) { + defer ic.c.concurrentBlobCopiesSemaphore.Release(1) + defer copyGroup.Done() + cld := copyLayerData{} + if !ic.c.downloadForeignLayers && ic.c.dest.AcceptsForeignLayerURLs() && len(srcLayer.URLs) != 0 { + // DiffIDs are, currently, needed only when converting from schema1. + // In which case src.LayerInfos will not have URLs because schema1 + // does not support them. + if ic.diffIDsAreNeeded { + cld.err = errors.New("getting DiffID for foreign layers is unimplemented") + } else { + cld.destInfo = srcLayer + logrus.Debugf("Skipping foreign layer %q copy to %s", cld.destInfo.Digest, ic.c.dest.Reference().Transport().Name()) + } + } else { + cld.destInfo, cld.diffID, cld.err = ic.copyLayer(ctx, srcLayer, toEncrypt, pool, index, srcRef, manifestLayerInfos[index].EmptyLayer) + } + data[index] = cld + } + + // Create layer Encryption map + encLayerBitmap := map[int]bool{} + var encryptAll bool + if ic.ociEncryptLayers != nil { + encryptAll = len(*ic.ociEncryptLayers) == 0 + totalLayers := len(srcInfos) + for _, l := range *ic.ociEncryptLayers { + // if layer is negative, it is reverse indexed. + encLayerBitmap[(totalLayers+l)%totalLayers] = true + } + + if encryptAll { + for i := 0; i < len(srcInfos); i++ { + encLayerBitmap[i] = true + } + } + } + + if err := func() error { // A scope for defer + progressPool := ic.c.newProgressPool() + defer progressPool.Wait() + + // Ensure we wait for all layers to be copied. progressPool.Wait() must not be called while any of the copyLayerHelpers interact with the progressPool. + defer copyGroup.Wait() + + for i, srcLayer := range srcInfos { + err = ic.c.concurrentBlobCopiesSemaphore.Acquire(ctx, 1) + if err != nil { + // This can only fail with ctx.Err(), so no need to blame acquiring the semaphore. + return fmt.Errorf("copying layer: %w", err) + } + copyGroup.Add(1) + go copyLayerHelper(i, srcLayer, encLayerBitmap[i], progressPool, ic.c.rawSource.Reference().DockerReference()) + } + + // A call to copyGroup.Wait() is done at this point by the defer above. + return nil + }(); err != nil { + return err + } + + destInfos := make([]types.BlobInfo, numLayers) + diffIDs := make([]digest.Digest, numLayers) + for i, cld := range data { + if cld.err != nil { + return cld.err + } + destInfos[i] = cld.destInfo + diffIDs[i] = cld.diffID + } + + // WARNING: If you are adding new reasons to change ic.manifestUpdates, also update the + // OptimizeDestinationImageAlreadyExists short-circuit conditions + ic.manifestUpdates.InformationOnly.LayerInfos = destInfos + if ic.diffIDsAreNeeded { + ic.manifestUpdates.InformationOnly.LayerDiffIDs = diffIDs + } + if srcInfosUpdated || layerDigestsDiffer(srcInfos, destInfos) { + ic.manifestUpdates.LayerInfos = destInfos + } + return nil +} + +// layerDigestsDiffer returns true iff the digests in a and b differ (ignoring sizes and possible other fields) +func layerDigestsDiffer(a, b []types.BlobInfo) bool { + if len(a) != len(b) { + return true + } + for i := range a { + if a[i].Digest != b[i].Digest { + return true + } + } + return false +} + +// copyUpdatedConfigAndManifest updates the image per ic.manifestUpdates, if necessary, +// stores the resulting config and manifest to the destination, and returns the stored manifest +// and its digest. +func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, digest.Digest, error) { + pendingImage := ic.src + if !ic.noPendingManifestUpdates() { + if ic.cannotModifyManifestReason != "" { + return nil, "", errors.Errorf("Internal error: copy needs an updated manifest but that was known to be forbidden: %q", ic.cannotModifyManifestReason) + } + if !ic.diffIDsAreNeeded && ic.src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates) { + // We have set ic.diffIDsAreNeeded based on the preferred MIME type returned by determineManifestConversion. + // So, this can only happen if we are trying to upload using one of the other MIME type candidates. + // Because UpdatedImageNeedsLayerDiffIDs is true only when converting from s1 to s2, this case should only arise + // when ic.c.dest.SupportedManifestMIMETypes() includes both s1 and s2, the upload using s1 failed, and we are now trying s2. + // Supposedly s2-only registries do not exist or are extremely rare, so failing with this error message is good enough for now. + // If handling such registries turns out to be necessary, we could compute ic.diffIDsAreNeeded based on the full list of manifest MIME type candidates. + return nil, "", errors.Errorf("Can not convert image to %s, preparing DiffIDs for this case is not supported", ic.manifestUpdates.ManifestMIMEType) + } + pi, err := ic.src.UpdatedImage(ctx, *ic.manifestUpdates) + if err != nil { + return nil, "", errors.Wrap(err, "creating an updated image manifest") + } + pendingImage = pi + } + man, _, err := pendingImage.Manifest(ctx) + if err != nil { + return nil, "", errors.Wrap(err, "reading manifest") + } + + if err := ic.c.copyConfig(ctx, pendingImage); err != nil { + return nil, "", err + } + + ic.c.Printf("Writing manifest to image destination\n") + manifestDigest, err := manifest.Digest(man) + if err != nil { + return nil, "", err + } + if instanceDigest != nil { + instanceDigest = &manifestDigest + } + if err := ic.c.dest.PutManifest(ctx, man, instanceDigest); err != nil { + logrus.Debugf("Error %v while writing manifest %q", err, string(man)) + return nil, "", errors.Wrapf(err, "writing manifest") + } + return man, manifestDigest, nil +} + +// newProgressPool creates a *mpb.Progress. +// The caller must eventually call pool.Wait() after the pool will no longer be updated. +// NOTE: Every progress bar created within the progress pool must either successfully +// complete or be aborted, or pool.Wait() will hang. That is typically done +// using "defer bar.Abort(false)", which must be called BEFORE pool.Wait() is called. +func (c *copier) newProgressPool() *mpb.Progress { + return mpb.New(mpb.WithWidth(40), mpb.WithOutput(c.progressOutput)) +} + +// customPartialBlobDecorFunc implements mpb.DecorFunc for the partial blobs retrieval progress bar +func customPartialBlobDecorFunc(s decor.Statistics) string { + if s.Total == 0 { + pairFmt := "%.1f / %.1f (skipped: %.1f)" + return fmt.Sprintf(pairFmt, decor.SizeB1024(s.Current), decor.SizeB1024(s.Total), decor.SizeB1024(s.Refill)) + } + pairFmt := "%.1f / %.1f (skipped: %.1f = %.2f%%)" + percentage := 100.0 * float64(s.Refill) / float64(s.Total) + return fmt.Sprintf(pairFmt, decor.SizeB1024(s.Current), decor.SizeB1024(s.Total), decor.SizeB1024(s.Refill), percentage) +} + +// createProgressBar creates a mpb.Bar in pool. Note that if the copier's reportWriter +// is ioutil.Discard, the progress bar's output will be discarded +// NOTE: Every progress bar created within a progress pool must either successfully +// complete or be aborted, or pool.Wait() will hang. That is typically done +// using "defer bar.Abort(false)", which must happen BEFORE pool.Wait() is called. +func (c *copier) createProgressBar(pool *mpb.Progress, partial bool, info types.BlobInfo, kind string, onComplete string) *mpb.Bar { + // shortDigestLen is the length of the digest used for blobs. + const shortDigestLen = 12 + + prefix := fmt.Sprintf("Copying %s %s", kind, info.Digest.Encoded()) + // Truncate the prefix (chopping of some part of the digest) to make all progress bars aligned in a column. + maxPrefixLen := len("Copying blob ") + shortDigestLen + if len(prefix) > maxPrefixLen { + prefix = prefix[:maxPrefixLen] + } + + // onComplete will replace prefix once the bar/spinner has completed + onComplete = prefix + " " + onComplete + + // Use a normal progress bar when we know the size (i.e., size > 0). + // Otherwise, use a spinner to indicate that something's happening. + var bar *mpb.Bar + if info.Size > 0 { + if partial { + bar = pool.AddBar(info.Size, + mpb.BarFillerClearOnComplete(), + mpb.PrependDecorators( + decor.OnComplete(decor.Name(prefix), onComplete), + ), + mpb.AppendDecorators( + decor.Any(customPartialBlobDecorFunc), + ), + ) + } else { + bar = pool.AddBar(info.Size, + mpb.BarFillerClearOnComplete(), + mpb.PrependDecorators( + decor.OnComplete(decor.Name(prefix), onComplete), + ), + mpb.AppendDecorators( + decor.OnComplete(decor.CountersKibiByte("%.1f / %.1f"), ""), + ), + ) + } + } else { + bar = pool.New(0, + mpb.SpinnerStyle(".", "..", "...", "....", "").PositionLeft(), + mpb.BarFillerClearOnComplete(), + mpb.PrependDecorators( + decor.OnComplete(decor.Name(prefix), onComplete), + ), + ) + } + if c.progressOutput == ioutil.Discard { + c.Printf("Copying %s %s\n", kind, info.Digest) + } + return bar +} + +// copyConfig copies config.json, if any, from src to dest. +func (c *copier) copyConfig(ctx context.Context, src types.Image) error { + srcInfo := src.ConfigInfo() + if srcInfo.Digest != "" { + if err := c.concurrentBlobCopiesSemaphore.Acquire(ctx, 1); err != nil { + // This can only fail with ctx.Err(), so no need to blame acquiring the semaphore. + return fmt.Errorf("copying config: %w", err) + } + defer c.concurrentBlobCopiesSemaphore.Release(1) + + configBlob, err := src.ConfigBlob(ctx) + if err != nil { + return errors.Wrapf(err, "reading config blob %s", srcInfo.Digest) + } + + destInfo, err := func() (types.BlobInfo, error) { // A scope for defer + progressPool := c.newProgressPool() + defer progressPool.Wait() + bar := c.createProgressBar(progressPool, false, srcInfo, "config", "done") + defer bar.Abort(false) + + destInfo, err := c.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, false, true, false, bar, -1, false) + if err != nil { + return types.BlobInfo{}, err + } + bar.SetTotal(int64(len(configBlob)), true) + return destInfo, nil + }() + if err != nil { + return err + } + if destInfo.Digest != srcInfo.Digest { + return errors.Errorf("Internal error: copying uncompressed config blob %s changed digest to %s", srcInfo.Digest, destInfo.Digest) + } + } + return nil +} + +// diffIDResult contains both a digest value and an error from diffIDComputationGoroutine. +// We could also send the error through the pipeReader, but this more cleanly separates the copying of the layer and the DiffID computation. +type diffIDResult struct { + digest digest.Digest + err error +} + +// copyLayer copies a layer with srcInfo (with known Digest and Annotations and possibly known Size) in src to dest, perhaps (de/re/)compressing it, +// and returns a complete blobInfo of the copied layer, and a value for LayerDiffIDs if diffIDIsNeeded +// srcRef can be used as an additional hint to the destination during checking whether a layer can be reused but srcRef can be nil. +func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, toEncrypt bool, pool *mpb.Progress, layerIndex int, srcRef reference.Named, emptyLayer bool) (types.BlobInfo, digest.Digest, error) { + // If the srcInfo doesn't contain compression information, try to compute it from the + // MediaType, which was either read from a manifest by way of LayerInfos() or constructed + // by LayerInfosForCopy(), if it was supplied at all. If we succeed in copying the blob, + // the BlobInfo we return will be passed to UpdatedImage() and then to UpdateLayerInfos(), + // which uses the compression information to compute the updated MediaType values. + // (Sadly UpdatedImage() is documented to not update MediaTypes from + // ManifestUpdateOptions.LayerInfos[].MediaType, so we are doing it indirectly.) + // + // This MIME type → compression mapping belongs in manifest-specific code in our manifest + // package (but we should preferably replace/change UpdatedImage instead of productizing + // this workaround). + if srcInfo.CompressionAlgorithm == nil { + switch srcInfo.MediaType { + case manifest.DockerV2Schema2LayerMediaType, imgspecv1.MediaTypeImageLayerGzip: + srcInfo.CompressionAlgorithm = &compression.Gzip + case imgspecv1.MediaTypeImageLayerZstd: + srcInfo.CompressionAlgorithm = &compression.Zstd + } + } + + cachedDiffID := ic.c.blobInfoCache.UncompressedDigest(srcInfo.Digest) // May be "" + // Diffs are needed if we are encrypting an image or trying to decrypt an image + diffIDIsNeeded := ic.diffIDsAreNeeded && cachedDiffID == "" || toEncrypt || (isOciEncrypted(srcInfo.MediaType) && ic.c.ociDecryptConfig != nil) + + // If we already have the blob, and we don't need to compute the diffID, then we don't need to read it from the source. + if !diffIDIsNeeded { + // TODO: at this point we don't know whether or not a blob we end up reusing is compressed using an algorithm + // that is acceptable for use on layers in the manifest that we'll be writing later, so if we end up reusing + // a blob that's compressed with e.g. zstd, but we're only allowed to write a v2s2 manifest, this will cause + // a failure when we eventually try to update the manifest with the digest and MIME type of the reused blob. + // Fixing that will probably require passing more information to TryReusingBlob() than the current version of + // the ImageDestination interface lets us pass in. + var ( + blobInfo types.BlobInfo + reused bool + err error + ) + // Note: the storage destination optimizes the committing of + // layers which requires passing the index of the layer. + // Hence, we need to special case and cast. + dest, ok := ic.c.dest.(internalTypes.ImageDestinationWithOptions) + if ok { + options := internalTypes.TryReusingBlobOptions{ + Cache: ic.c.blobInfoCache, + CanSubstitute: ic.canSubstituteBlobs, + SrcRef: srcRef, + EmptyLayer: emptyLayer, + } + options.LayerIndex = &layerIndex + reused, blobInfo, err = dest.TryReusingBlobWithOptions(ctx, srcInfo, options) + } else { + reused, blobInfo, err = ic.c.dest.TryReusingBlob(ctx, srcInfo, ic.c.blobInfoCache, ic.canSubstituteBlobs) + } + + if err != nil { + return types.BlobInfo{}, "", errors.Wrapf(err, "trying to reuse blob %s at destination", srcInfo.Digest) + } + if reused { + logrus.Debugf("Skipping blob %s (already present):", srcInfo.Digest) + func() { // A scope for defer + bar := ic.c.createProgressBar(pool, false, srcInfo, "blob", "skipped: already exists") + defer bar.Abort(false) + bar.SetTotal(0, true) + }() + + // Throw an event that the layer has been skipped + if ic.c.progress != nil && ic.c.progressInterval > 0 { + ic.c.progress <- types.ProgressProperties{ + Event: types.ProgressEventSkipped, + Artifact: srcInfo, + } + } + + // If the reused blob has the same digest as the one we asked for, but + // the transport didn't/couldn't supply compression info, fill it in based + // on what we know from the srcInfos we were given. + // If the srcInfos came from LayerInfosForCopy(), then UpdatedImage() will + // call UpdateLayerInfos(), which uses this information to compute the + // MediaType value for the updated layer infos, and it the transport + // didn't pass the information along from its input to its output, then + // it can derive the MediaType incorrectly. + if blobInfo.Digest == srcInfo.Digest && blobInfo.CompressionAlgorithm == nil { + blobInfo.CompressionOperation = srcInfo.CompressionOperation + blobInfo.CompressionAlgorithm = srcInfo.CompressionAlgorithm + } + return blobInfo, cachedDiffID, nil + } + } + + // A partial pull is managed by the destination storage, that decides what portions + // of the source file are not known yet and must be fetched. + // Attempt a partial only when the source allows to retrieve a blob partially and + // the destination has support for it. + imgSource, okSource := ic.c.rawSource.(internalTypes.ImageSourceSeekable) + imgDest, okDest := ic.c.dest.(internalTypes.ImageDestinationPartial) + if okSource && okDest && !diffIDIsNeeded { + if reused, blobInfo := func() (bool, types.BlobInfo) { // A scope for defer + bar := ic.c.createProgressBar(pool, true, srcInfo, "blob", "done") + hideProgressBar := true + defer func() { // Note that this is not the same as defer bar.Abort(hideProgressBar); we need hideProgressBar to be evaluated lazily. + bar.Abort(hideProgressBar) + }() + + progress := make(chan int64) + terminate := make(chan interface{}) + + defer close(terminate) + defer close(progress) + + proxy := imageSourceSeekableProxy{ + source: imgSource, + progress: progress, + } + go func() { + for { + select { + case written := <-progress: + bar.IncrInt64(written) + case <-terminate: + return + } + } + }() + + bar.SetTotal(srcInfo.Size, false) + info, err := imgDest.PutBlobPartial(ctx, proxy, srcInfo, ic.c.blobInfoCache) + if err == nil { + bar.SetRefill(srcInfo.Size - bar.Current()) + bar.SetCurrent(srcInfo.Size) + bar.SetTotal(srcInfo.Size, true) + hideProgressBar = false + logrus.Debugf("Retrieved partial blob %v", srcInfo.Digest) + return true, info + } + logrus.Debugf("Failed to retrieve partial blob: %v", err) + return false, types.BlobInfo{} + }(); reused { + return blobInfo, cachedDiffID, nil + } + } + + // Fallback: copy the layer, computing the diffID if we need to do so + srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(ctx, srcInfo, ic.c.blobInfoCache) + if err != nil { + return types.BlobInfo{}, "", errors.Wrapf(err, "reading blob %s", srcInfo.Digest) + } + defer srcStream.Close() + + return func() (types.BlobInfo, digest.Digest, error) { // A scope for defer + bar := ic.c.createProgressBar(pool, false, srcInfo, "blob", "done") + defer bar.Abort(false) + + blobInfo, diffIDChan, err := ic.copyLayerFromStream(ctx, srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize, MediaType: srcInfo.MediaType, Annotations: srcInfo.Annotations}, diffIDIsNeeded, toEncrypt, bar, layerIndex, emptyLayer) + if err != nil { + return types.BlobInfo{}, "", err + } + + diffID := cachedDiffID + if diffIDIsNeeded { + select { + case <-ctx.Done(): + return types.BlobInfo{}, "", ctx.Err() + case diffIDResult := <-diffIDChan: + if diffIDResult.err != nil { + return types.BlobInfo{}, "", errors.Wrap(diffIDResult.err, "computing layer DiffID") + } + logrus.Debugf("Computed DiffID %s for layer %s", diffIDResult.digest, srcInfo.Digest) + // This is safe because we have just computed diffIDResult.Digest ourselves, and in the process + // we have read all of the input blob, so srcInfo.Digest must have been validated by digestingReader. + ic.c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, diffIDResult.digest) + diffID = diffIDResult.digest + } + } + + bar.SetTotal(srcInfo.Size, true) + return blobInfo, diffID, nil + }() +} + +// copyLayerFromStream is an implementation detail of copyLayer; mostly providing a separate “defer” scope. +// it copies a blob with srcInfo (with known Digest and Annotations and possibly known Size) from srcStream to dest, +// perhaps (de/re/)compressing the stream, +// and returns a complete blobInfo of the copied blob and perhaps a <-chan diffIDResult if diffIDIsNeeded, to be read by the caller. +func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo, + diffIDIsNeeded bool, toEncrypt bool, bar *mpb.Bar, layerIndex int, emptyLayer bool) (types.BlobInfo, <-chan diffIDResult, error) { + var getDiffIDRecorder func(compressiontypes.DecompressorFunc) io.Writer // = nil + var diffIDChan chan diffIDResult + + err := errors.New("Internal error: unexpected panic in copyLayer") // For pipeWriter.CloseWithbelow + if diffIDIsNeeded { + diffIDChan = make(chan diffIDResult, 1) // Buffered, so that sending a value after this or our caller has failed and exited does not block. + pipeReader, pipeWriter := io.Pipe() + defer func() { // Note that this is not the same as {defer pipeWriter.CloseWithError(err)}; we need err to be evaluated lazily. + _ = pipeWriter.CloseWithError(err) // CloseWithError(nil) is equivalent to Close(), always returns nil + }() + + getDiffIDRecorder = func(decompressor compressiontypes.DecompressorFunc) io.Writer { + // If this fails, e.g. because we have exited and due to pipeWriter.CloseWithError() above further + // reading from the pipe has failed, we don’t really care. + // We only read from diffIDChan if the rest of the flow has succeeded, and when we do read from it, + // the return value includes an error indication, which we do check. + // + // If this gets never called, pipeReader will not be used anywhere, but pipeWriter will only be + // closed above, so we are happy enough with both pipeReader and pipeWriter to just get collected by GC. + go diffIDComputationGoroutine(diffIDChan, pipeReader, decompressor) // Closes pipeReader + return pipeWriter + } + } + + blobInfo, err := ic.c.copyBlobFromStream(ctx, srcStream, srcInfo, getDiffIDRecorder, ic.cannotModifyManifestReason == "", false, toEncrypt, bar, layerIndex, emptyLayer) // Sets err to nil on success + return blobInfo, diffIDChan, err + // We need the defer … pipeWriter.CloseWithError() to happen HERE so that the caller can block on reading from diffIDChan +} + +// diffIDComputationGoroutine reads all input from layerStream, uncompresses using decompressor if necessary, and sends its digest, and status, if any, to dest. +func diffIDComputationGoroutine(dest chan<- diffIDResult, layerStream io.ReadCloser, decompressor compressiontypes.DecompressorFunc) { + result := diffIDResult{ + digest: "", + err: errors.New("Internal error: unexpected panic in diffIDComputationGoroutine"), + } + defer func() { dest <- result }() + defer layerStream.Close() // We do not care to bother the other end of the pipe with other failures; we send them to dest instead. + + result.digest, result.err = computeDiffID(layerStream, decompressor) +} + +// computeDiffID reads all input from layerStream, uncompresses it using decompressor if necessary, and returns its digest. +func computeDiffID(stream io.Reader, decompressor compressiontypes.DecompressorFunc) (digest.Digest, error) { + if decompressor != nil { + s, err := decompressor(stream) + if err != nil { + return "", err + } + defer s.Close() + stream = s + } + + return digest.Canonical.FromReader(stream) +} + +// errorAnnotationReader wraps the io.Reader passed to PutBlob for annotating the error happened during read. +// These errors are reported as PutBlob errors, so we would otherwise misleadingly attribute them to the copy destination. +type errorAnnotationReader struct { + reader io.Reader +} + +// Read annotates the error happened during read +func (r errorAnnotationReader) Read(b []byte) (n int, err error) { + n, err = r.reader.Read(b) + if err != io.EOF { + return n, errors.Wrapf(err, "happened during read") + } + return n, err +} + +// copyBlobFromStream copies a blob with srcInfo (with known Digest and Annotations and possibly known Size) from srcStream to dest, +// perhaps sending a copy to an io.Writer if getOriginalLayerCopyWriter != nil, +// perhaps (de/re/)compressing it if canModifyBlob, +// and returns a complete blobInfo of the copied blob. +func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo, + getOriginalLayerCopyWriter func(decompressor compressiontypes.DecompressorFunc) io.Writer, + canModifyBlob bool, isConfig bool, toEncrypt bool, bar *mpb.Bar, layerIndex int, emptyLayer bool) (types.BlobInfo, error) { + if isConfig { // This is guaranteed by the caller, but set it here to be explicit. + canModifyBlob = false + } + + // The copying happens through a pipeline of connected io.Readers. + // === Input: srcStream + + // === Process input through digestingReader to validate against the expected digest. + // Be paranoid; in case PutBlob somehow managed to ignore an error from digestingReader, + // use a separate validation failure indicator. + // Note that for this check we don't use the stronger "validationSucceeded" indicator, because + // dest.PutBlob may detect that the layer already exists, in which case we don't + // read stream to the end, and validation does not happen. + digestingReader, err := newDigestingReader(srcStream, srcInfo.Digest) + if err != nil { + return types.BlobInfo{}, errors.Wrapf(err, "preparing to verify blob %s", srcInfo.Digest) + } + var destStream io.Reader = digestingReader + + // === Decrypt the stream, if required. + var decrypted bool + if isOciEncrypted(srcInfo.MediaType) && c.ociDecryptConfig != nil { + newDesc := imgspecv1.Descriptor{ + Annotations: srcInfo.Annotations, + } + + var d digest.Digest + destStream, d, err = ocicrypt.DecryptLayer(c.ociDecryptConfig, destStream, newDesc, false) + if err != nil { + return types.BlobInfo{}, errors.Wrapf(err, "decrypting layer %s", srcInfo.Digest) + } + + srcInfo.Digest = d + srcInfo.Size = -1 + for k := range srcInfo.Annotations { + if strings.HasPrefix(k, "org.opencontainers.image.enc") { + delete(srcInfo.Annotations, k) + } + } + decrypted = true + } + + // === Detect compression of the input stream. + // This requires us to “peek ahead” into the stream to read the initial part, which requires us to chain through another io.Reader returned by DetectCompression. + compressionFormat, decompressor, destStream, err := compression.DetectCompressionFormat(destStream) // We could skip this in some cases, but let's keep the code path uniform + if err != nil { + return types.BlobInfo{}, errors.Wrapf(err, "reading blob %s", srcInfo.Digest) + } + isCompressed := decompressor != nil + if expectedCompressionFormat, known := expectedCompressionFormats[srcInfo.MediaType]; known && isCompressed && compressionFormat.Name() != expectedCompressionFormat.Name() { + logrus.Debugf("blob %s with type %s should be compressed with %s, but compressor appears to be %s", srcInfo.Digest.String(), srcInfo.MediaType, expectedCompressionFormat.Name(), compressionFormat.Name()) + } + + // === Update progress bars + destStream = bar.ProxyReader(destStream) + + // === Send a copy of the original, uncompressed, stream, to a separate path if necessary. + var originalLayerReader io.Reader // DO NOT USE this other than to drain the input if no other consumer in the pipeline has done so. + if getOriginalLayerCopyWriter != nil { + destStream = io.TeeReader(destStream, getOriginalLayerCopyWriter(decompressor)) + originalLayerReader = destStream + } + + compressionMetadata := map[string]string{} + // === Deal with layer compression/decompression if necessary + // WARNING: If you are adding new reasons to change the blob, update also the OptimizeDestinationImageAlreadyExists + // short-circuit conditions + var inputInfo types.BlobInfo + var compressionOperation types.LayerCompression + var uploadCompressionFormat *compressiontypes.Algorithm + srcCompressorName := internalblobinfocache.Uncompressed + if isCompressed { + srcCompressorName = compressionFormat.Name() + } + var uploadCompressorName string + if canModifyBlob && isOciEncrypted(srcInfo.MediaType) { + // PreserveOriginal due to any compression not being able to be done on an encrypted blob unless decrypted + logrus.Debugf("Using original blob without modification for encrypted blob") + compressionOperation = types.PreserveOriginal + inputInfo = srcInfo + srcCompressorName = internalblobinfocache.UnknownCompression + uploadCompressionFormat = nil + uploadCompressorName = internalblobinfocache.UnknownCompression + } else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Compress && !isCompressed { + logrus.Debugf("Compressing blob on the fly") + compressionOperation = types.Compress + pipeReader, pipeWriter := io.Pipe() + defer pipeReader.Close() + + if c.compressionFormat != nil { + uploadCompressionFormat = c.compressionFormat + } else { + uploadCompressionFormat = defaultCompressionFormat + } + // If this fails while writing data, it will do pipeWriter.CloseWithError(); if it fails otherwise, + // e.g. because we have exited and due to pipeReader.Close() above further writing to the pipe has failed, + // we don’t care. + go c.compressGoroutine(pipeWriter, destStream, compressionMetadata, *uploadCompressionFormat) // Closes pipeWriter + destStream = pipeReader + inputInfo.Digest = "" + inputInfo.Size = -1 + uploadCompressorName = uploadCompressionFormat.Name() + } else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Compress && isCompressed && + c.compressionFormat != nil && c.compressionFormat.Name() != compressionFormat.Name() { + // When the blob is compressed, but the desired format is different, it first needs to be decompressed and finally + // re-compressed using the desired format. + logrus.Debugf("Blob will be converted") + + compressionOperation = types.PreserveOriginal + s, err := decompressor(destStream) + if err != nil { + return types.BlobInfo{}, err + } + defer s.Close() + + pipeReader, pipeWriter := io.Pipe() + defer pipeReader.Close() + + uploadCompressionFormat = c.compressionFormat + go c.compressGoroutine(pipeWriter, s, compressionMetadata, *uploadCompressionFormat) // Closes pipeWriter + + destStream = pipeReader + inputInfo.Digest = "" + inputInfo.Size = -1 + uploadCompressorName = uploadCompressionFormat.Name() + } else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Decompress && isCompressed { + logrus.Debugf("Blob will be decompressed") + compressionOperation = types.Decompress + s, err := decompressor(destStream) + if err != nil { + return types.BlobInfo{}, err + } + defer s.Close() + destStream = s + inputInfo.Digest = "" + inputInfo.Size = -1 + uploadCompressionFormat = nil + uploadCompressorName = internalblobinfocache.Uncompressed + } else { + // PreserveOriginal might also need to recompress the original blob if the desired compression format is different. + logrus.Debugf("Using original blob without modification") + compressionOperation = types.PreserveOriginal + inputInfo = srcInfo + // Remember if the original blob was compressed, and if so how, so that if + // LayerInfosForCopy() returned something that differs from what was in the + // source's manifest, and UpdatedImage() needs to call UpdateLayerInfos(), + // it will be able to correctly derive the MediaType for the copied blob. + if isCompressed { + uploadCompressionFormat = &compressionFormat + } else { + uploadCompressionFormat = nil + } + uploadCompressorName = srcCompressorName + } + + // === Encrypt the stream for valid mediatypes if ociEncryptConfig provided + var ( + encrypted bool + finalizer ocicrypt.EncryptLayerFinalizer + ) + if toEncrypt { + if decrypted { + return types.BlobInfo{}, errors.New("Unable to support both decryption and encryption in the same copy") + } + + if !isOciEncrypted(srcInfo.MediaType) && c.ociEncryptConfig != nil { + var annotations map[string]string + if !decrypted { + annotations = srcInfo.Annotations + } + desc := imgspecv1.Descriptor{ + MediaType: srcInfo.MediaType, + Digest: srcInfo.Digest, + Size: srcInfo.Size, + Annotations: annotations, + } + + s, fin, err := ocicrypt.EncryptLayer(c.ociEncryptConfig, destStream, desc) + if err != nil { + return types.BlobInfo{}, errors.Wrapf(err, "encrypting blob %s", srcInfo.Digest) + } + + destStream = s + finalizer = fin + inputInfo.Digest = "" + inputInfo.Size = -1 + encrypted = true + } + } + + // === Report progress using the c.progress channel, if required. + if c.progress != nil && c.progressInterval > 0 { + progressReader := newProgressReader( + destStream, + c.progress, + c.progressInterval, + srcInfo, + ) + defer progressReader.reportDone() + destStream = progressReader + } + + // === Finally, send the layer stream to dest. + var uploadedInfo types.BlobInfo + // Note: the storage destination optimizes the committing of layers + // which requires passing the index of the layer. Hence, we need to + // special case and cast. + dest, ok := c.dest.(internalTypes.ImageDestinationWithOptions) + if ok { + options := internalTypes.PutBlobOptions{ + Cache: c.blobInfoCache, + IsConfig: isConfig, + EmptyLayer: emptyLayer, + } + if !isConfig { + options.LayerIndex = &layerIndex + } + uploadedInfo, err = dest.PutBlobWithOptions(ctx, &errorAnnotationReader{destStream}, inputInfo, options) + } else { + uploadedInfo, err = c.dest.PutBlob(ctx, &errorAnnotationReader{destStream}, inputInfo, c.blobInfoCache, isConfig) + } + if err != nil { + return types.BlobInfo{}, errors.Wrap(err, "writing blob") + } + + uploadedInfo.Annotations = srcInfo.Annotations + + uploadedInfo.CompressionOperation = compressionOperation + // If we can modify the layer's blob, set the desired algorithm for it to be set in the manifest. + uploadedInfo.CompressionAlgorithm = uploadCompressionFormat + if decrypted { + uploadedInfo.CryptoOperation = types.Decrypt + } else if encrypted { + encryptAnnotations, err := finalizer() + if err != nil { + return types.BlobInfo{}, errors.Wrap(err, "Unable to finalize encryption") + } + uploadedInfo.CryptoOperation = types.Encrypt + if uploadedInfo.Annotations == nil { + uploadedInfo.Annotations = map[string]string{} + } + for k, v := range encryptAnnotations { + uploadedInfo.Annotations[k] = v + } + } + + // This is fairly horrible: the writer from getOriginalLayerCopyWriter wants to consume + // all of the input (to compute DiffIDs), even if dest.PutBlob does not need it. + // So, read everything from originalLayerReader, which will cause the rest to be + // sent there if we are not already at EOF. + if getOriginalLayerCopyWriter != nil { + logrus.Debugf("Consuming rest of the original blob to satisfy getOriginalLayerCopyWriter") + _, err := io.Copy(ioutil.Discard, originalLayerReader) + if err != nil { + return types.BlobInfo{}, errors.Wrapf(err, "reading input blob %s", srcInfo.Digest) + } + } + + if digestingReader.validationFailed { // Coverage: This should never happen. + return types.BlobInfo{}, errors.Errorf("Internal error writing blob %s, digest verification failed but was ignored", srcInfo.Digest) + } + if inputInfo.Digest != "" && uploadedInfo.Digest != inputInfo.Digest { + return types.BlobInfo{}, errors.Errorf("Internal error writing blob %s, blob with digest %s saved with digest %s", srcInfo.Digest, inputInfo.Digest, uploadedInfo.Digest) + } + if digestingReader.validationSucceeded { + // If compressionOperation != types.PreserveOriginal, we now have two reliable digest values: + // srcinfo.Digest describes the pre-compressionOperation input, verified by digestingReader + // uploadedInfo.Digest describes the post-compressionOperation output, computed by PutBlob + // (because inputInfo.Digest == "", this must have been computed afresh). + switch compressionOperation { + case types.PreserveOriginal: + break // Do nothing, we have only one digest and we might not have even verified it. + case types.Compress: + c.blobInfoCache.RecordDigestUncompressedPair(uploadedInfo.Digest, srcInfo.Digest) + case types.Decompress: + c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, uploadedInfo.Digest) + default: + return types.BlobInfo{}, errors.Errorf("Internal error: Unexpected compressionOperation value %#v", compressionOperation) + } + if uploadCompressorName != "" && uploadCompressorName != internalblobinfocache.UnknownCompression { + c.blobInfoCache.RecordDigestCompressorName(uploadedInfo.Digest, uploadCompressorName) + } + if srcInfo.Digest != "" && srcCompressorName != "" && srcCompressorName != internalblobinfocache.UnknownCompression { + c.blobInfoCache.RecordDigestCompressorName(srcInfo.Digest, srcCompressorName) + } + } + + // Copy all the metadata generated by the compressor into the annotations. + if uploadedInfo.Annotations == nil { + uploadedInfo.Annotations = map[string]string{} + } + for k, v := range compressionMetadata { + uploadedInfo.Annotations[k] = v + } + + return uploadedInfo, nil +} + +// doCompression reads all input from src and writes its compressed equivalent to dest. +func doCompression(dest io.Writer, src io.Reader, metadata map[string]string, compressionFormat compressiontypes.Algorithm, compressionLevel *int) error { + compressor, err := compression.CompressStreamWithMetadata(dest, metadata, compressionFormat, compressionLevel) + if err != nil { + return err + } + + buf := make([]byte, compressionBufferSize) + + _, err = io.CopyBuffer(compressor, src, buf) // Sets err to nil, i.e. causes dest.Close() + if err != nil { + compressor.Close() + return err + } + + return compressor.Close() +} + +// compressGoroutine reads all input from src and writes its compressed equivalent to dest. +func (c *copier) compressGoroutine(dest *io.PipeWriter, src io.Reader, metadata map[string]string, compressionFormat compressiontypes.Algorithm) { + err := errors.New("Internal error: unexpected panic in compressGoroutine") + defer func() { // Note that this is not the same as {defer dest.CloseWithError(err)}; we need err to be evaluated lazily. + _ = dest.CloseWithError(err) // CloseWithError(nil) is equivalent to Close(), always returns nil + }() + + err = doCompression(dest, src, metadata, compressionFormat, c.compressionLevel) +} diff --git a/vendor/github.com/containers/image/v5/copy/digesting_reader.go b/vendor/github.com/containers/image/v5/copy/digesting_reader.go new file mode 100644 index 00000000000..ccc9110ff90 --- /dev/null +++ b/vendor/github.com/containers/image/v5/copy/digesting_reader.go @@ -0,0 +1,62 @@ +package copy + +import ( + "hash" + "io" + + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +type digestingReader struct { + source io.Reader + digester digest.Digester + hash hash.Hash + expectedDigest digest.Digest + validationFailed bool + validationSucceeded bool +} + +// newDigestingReader returns an io.Reader implementation with contents of source, which will eventually return a non-EOF error +// or set validationSucceeded/validationFailed to true if the source stream does/does not match expectedDigest. +// (neither is set if EOF is never reached). +func newDigestingReader(source io.Reader, expectedDigest digest.Digest) (*digestingReader, error) { + var digester digest.Digester + if err := expectedDigest.Validate(); err != nil { + return nil, errors.Errorf("Invalid digest specification %s", expectedDigest) + } + digestAlgorithm := expectedDigest.Algorithm() + if !digestAlgorithm.Available() { + return nil, errors.Errorf("Invalid digest specification %s: unsupported digest algorithm %s", expectedDigest, digestAlgorithm) + } + digester = digestAlgorithm.Digester() + + return &digestingReader{ + source: source, + digester: digester, + hash: digester.Hash(), + expectedDigest: expectedDigest, + validationFailed: false, + }, nil +} + +func (d *digestingReader) Read(p []byte) (int, error) { + n, err := d.source.Read(p) + if n > 0 { + if n2, err := d.hash.Write(p[:n]); n2 != n || err != nil { + // Coverage: This should not happen, the hash.Hash interface requires + // d.digest.Write to never return an error, and the io.Writer interface + // requires n2 == len(input) if no error is returned. + return 0, errors.Wrapf(err, "updating digest during verification: %d vs. %d", n2, n) + } + } + if err == io.EOF { + actualDigest := d.digester.Digest() + if actualDigest != d.expectedDigest { + d.validationFailed = true + return 0, errors.Errorf("Digest did not match, expected %s, got %s", d.expectedDigest, actualDigest) + } + d.validationSucceeded = true + } + return n, err +} diff --git a/vendor/github.com/containers/image/v5/copy/encrypt.go b/vendor/github.com/containers/image/v5/copy/encrypt.go new file mode 100644 index 00000000000..a18d6f1518f --- /dev/null +++ b/vendor/github.com/containers/image/v5/copy/encrypt.go @@ -0,0 +1,24 @@ +package copy + +import ( + "strings" + + "github.com/containers/image/v5/types" +) + +// isOciEncrypted returns a bool indicating if a mediatype is encrypted +// This function will be moved to be part of OCI spec when adopted. +func isOciEncrypted(mediatype string) bool { + return strings.HasSuffix(mediatype, "+encrypted") +} + +// isEncrypted checks if an image is encrypted +func isEncrypted(i types.Image) bool { + layers := i.LayerInfos() + for _, l := range layers { + if isOciEncrypted(l.MediaType) { + return true + } + } + return false +} diff --git a/vendor/github.com/containers/image/v5/copy/manifest.go b/vendor/github.com/containers/image/v5/copy/manifest.go new file mode 100644 index 00000000000..86ec8863aea --- /dev/null +++ b/vendor/github.com/containers/image/v5/copy/manifest.go @@ -0,0 +1,170 @@ +package copy + +import ( + "context" + "strings" + + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// preferredManifestMIMETypes lists manifest MIME types in order of our preference, if we can't use the original manifest and need to convert. +// Prefer v2s2 to v2s1 because v2s2 does not need to be changed when uploading to a different location. +// Include v2s1 signed but not v2s1 unsigned, because docker/distribution requires a signature even if the unsigned MIME type is used. +var preferredManifestMIMETypes = []string{manifest.DockerV2Schema2MediaType, manifest.DockerV2Schema1SignedMediaType} + +// orderedSet is a list of strings (MIME types or platform descriptors in our case), with each string appearing at most once. +type orderedSet struct { + list []string + included map[string]struct{} +} + +// newOrderedSet creates a correctly initialized orderedSet. +// [Sometimes it would be really nice if Golang had constructors…] +func newOrderedSet() *orderedSet { + return &orderedSet{ + list: []string{}, + included: map[string]struct{}{}, + } +} + +// append adds s to the end of os, only if it is not included already. +func (os *orderedSet) append(s string) { + if _, ok := os.included[s]; !ok { + os.list = append(os.list, s) + os.included[s] = struct{}{} + } +} + +// determineManifestConversion updates ic.manifestUpdates to convert manifest to a supported MIME type, if necessary and ic.canModifyManifest. +// Note that the conversion will only happen later, through ic.src.UpdatedImage +// Returns the preferred manifest MIME type (whether we are converting to it or using it unmodified), +// and a list of other possible alternatives, in order. +func (ic *imageCopier) determineManifestConversion(ctx context.Context, destSupportedManifestMIMETypes []string, forceManifestMIMEType string, requiresOciEncryption bool) (string, []string, error) { + _, srcType, err := ic.src.Manifest(ctx) + if err != nil { // This should have been cached?! + return "", nil, errors.Wrap(err, "reading manifest") + } + normalizedSrcType := manifest.NormalizedMIMEType(srcType) + if srcType != normalizedSrcType { + logrus.Debugf("Source manifest MIME type %s, treating it as %s", srcType, normalizedSrcType) + srcType = normalizedSrcType + } + + if forceManifestMIMEType != "" { + destSupportedManifestMIMETypes = []string{forceManifestMIMEType} + } + + if len(destSupportedManifestMIMETypes) == 0 && (!requiresOciEncryption || manifest.MIMETypeSupportsEncryption(srcType)) { + return srcType, []string{}, nil // Anything goes; just use the original as is, do not try any conversions. + } + supportedByDest := map[string]struct{}{} + for _, t := range destSupportedManifestMIMETypes { + if !requiresOciEncryption || manifest.MIMETypeSupportsEncryption(t) { + supportedByDest[t] = struct{}{} + } + } + + // destSupportedManifestMIMETypes is a static guess; a particular registry may still only support a subset of the types. + // So, build a list of types to try in order of decreasing preference. + // FIXME? This treats manifest.DockerV2Schema1SignedMediaType and manifest.DockerV2Schema1MediaType as distinct, + // although we are not really making any conversion, and it is very unlikely that a destination would support one but not the other. + // In practice, schema1 is probably the lowest common denominator, so we would expect to try the first one of the MIME types + // and never attempt the other one. + prioritizedTypes := newOrderedSet() + + // First of all, prefer to keep the original manifest unmodified. + if _, ok := supportedByDest[srcType]; ok { + prioritizedTypes.append(srcType) + } + if ic.cannotModifyManifestReason != "" { + // We could also drop this check and have the caller + // make the choice; it is already doing that to an extent, to improve error + // messages. But it is nice to hide the “if we can't modify, do no conversion” + // special case in here; the caller can then worry (or not) only about a good UI. + logrus.Debugf("We can't modify the manifest, hoping for the best...") + return srcType, []string{}, nil // Take our chances - FIXME? Or should we fail without trying? + } + + // Then use our list of preferred types. + for _, t := range preferredManifestMIMETypes { + if _, ok := supportedByDest[t]; ok { + prioritizedTypes.append(t) + } + } + + // Finally, try anything else the destination supports. + for _, t := range destSupportedManifestMIMETypes { + prioritizedTypes.append(t) + } + + logrus.Debugf("Manifest has MIME type %s, ordered candidate list [%s]", srcType, strings.Join(prioritizedTypes.list, ", ")) + if len(prioritizedTypes.list) == 0 { // Coverage: destSupportedManifestMIMETypes is not empty (or we would have exited in the “Anything goes” case above), so this should never happen. + return "", nil, errors.New("Internal error: no candidate MIME types") + } + preferredType := prioritizedTypes.list[0] + if preferredType != srcType { + ic.manifestUpdates.ManifestMIMEType = preferredType + } else { + logrus.Debugf("... will first try using the original manifest unmodified") + } + return preferredType, prioritizedTypes.list[1:], nil +} + +// isMultiImage returns true if img is a list of images +func isMultiImage(ctx context.Context, img types.UnparsedImage) (bool, error) { + _, mt, err := img.Manifest(ctx) + if err != nil { + return false, err + } + return manifest.MIMETypeIsMultiImage(mt), nil +} + +// determineListConversion takes the current MIME type of a list of manifests, +// the list of MIME types supported for a given destination, and a possible +// forced value, and returns the MIME type to which we should convert the list +// of manifests (regardless of whether we are converting to it or using it +// unmodified) and a slice of other list types which might be supported by the +// destination. +func (c *copier) determineListConversion(currentListMIMEType string, destSupportedMIMETypes []string, forcedListMIMEType string) (string, []string, error) { + // If there's no list of supported types, then anything we support is expected to be supported. + if len(destSupportedMIMETypes) == 0 { + destSupportedMIMETypes = manifest.SupportedListMIMETypes + } + // If we're forcing it, replace the list of supported types with the forced value. + if forcedListMIMEType != "" { + destSupportedMIMETypes = []string{forcedListMIMEType} + } + + prioritizedTypes := newOrderedSet() + // The first priority is the current type, if it's in the list, since that lets us avoid a + // conversion that isn't strictly necessary. + for _, t := range destSupportedMIMETypes { + if t == currentListMIMEType { + prioritizedTypes.append(currentListMIMEType) + break + } + } + // Pick out the other list types that we support. + for _, t := range destSupportedMIMETypes { + if manifest.MIMETypeIsMultiImage(t) { + prioritizedTypes.append(t) + } + } + + logrus.Debugf("Manifest list has MIME type %s, ordered candidate list [%s]", currentListMIMEType, strings.Join(destSupportedMIMETypes, ", ")) + if len(prioritizedTypes.list) == 0 { + return "", nil, errors.Errorf("destination does not support any supported manifest list types (%v)", manifest.SupportedListMIMETypes) + } + selectedType := prioritizedTypes.list[0] + otherSupportedTypes := prioritizedTypes.list[1:] + if selectedType != currentListMIMEType { + logrus.Debugf("... will convert to %s first, and then try %v", selectedType, otherSupportedTypes) + } else { + logrus.Debugf("... will use the original manifest list type, and then try %v", otherSupportedTypes) + } + // Done. + return selectedType, otherSupportedTypes, nil +} diff --git a/vendor/github.com/containers/image/v5/copy/progress_reader.go b/vendor/github.com/containers/image/v5/copy/progress_reader.go new file mode 100644 index 00000000000..42f490d326a --- /dev/null +++ b/vendor/github.com/containers/image/v5/copy/progress_reader.go @@ -0,0 +1,104 @@ +package copy + +import ( + "context" + "io" + "time" + + internalTypes "github.com/containers/image/v5/internal/types" + "github.com/containers/image/v5/types" +) + +// progressReader is a reader that reports its progress on an interval. +type progressReader struct { + source io.Reader + channel chan<- types.ProgressProperties + interval time.Duration + artifact types.BlobInfo + lastUpdate time.Time + offset uint64 + offsetUpdate uint64 +} + +// newProgressReader creates a new progress reader for: +// `source`: The source when internally reading bytes +// `channel`: The reporter channel to which the progress will be sent +// `interval`: The update interval to indicate how often the progress should update +// `artifact`: The blob metadata which is currently being progressed +func newProgressReader( + source io.Reader, + channel chan<- types.ProgressProperties, + interval time.Duration, + artifact types.BlobInfo, +) *progressReader { + // The progress reader constructor informs the progress channel + // that a new artifact will be read + channel <- types.ProgressProperties{ + Event: types.ProgressEventNewArtifact, + Artifact: artifact, + } + return &progressReader{ + source: source, + channel: channel, + interval: interval, + artifact: artifact, + lastUpdate: time.Now(), + offset: 0, + offsetUpdate: 0, + } +} + +// reportDone indicates to the internal channel that the progress has been +// finished +func (r *progressReader) reportDone() { + r.channel <- types.ProgressProperties{ + Event: types.ProgressEventDone, + Artifact: r.artifact, + Offset: r.offset, + OffsetUpdate: r.offsetUpdate, + } +} + +// Read continuously reads bytes into the progress reader and reports the +// status via the internal channel +func (r *progressReader) Read(p []byte) (int, error) { + n, err := r.source.Read(p) + r.offset += uint64(n) + r.offsetUpdate += uint64(n) + + // Fire the progress reader in the provided interval + if time.Since(r.lastUpdate) > r.interval { + r.channel <- types.ProgressProperties{ + Event: types.ProgressEventRead, + Artifact: r.artifact, + Offset: r.offset, + OffsetUpdate: r.offsetUpdate, + } + r.lastUpdate = time.Now() + r.offsetUpdate = 0 + } + return n, err +} + +// imageSourceSeekableProxy wraps ImageSourceSeekable and keeps track of how many bytes +// are received. +type imageSourceSeekableProxy struct { + // source is the seekable input to read from. + source internalTypes.ImageSourceSeekable + // progress is the chan where the total number of bytes read so far are reported. + progress chan int64 +} + +// GetBlobAt reads from the ImageSourceSeekable and report how many bytes were received +// to the progress chan. +func (s imageSourceSeekableProxy) GetBlobAt(ctx context.Context, bInfo types.BlobInfo, chunks []internalTypes.ImageSourceChunk) (chan io.ReadCloser, chan error, error) { + rc, errs, err := s.source.GetBlobAt(ctx, bInfo, chunks) + if err == nil { + total := int64(0) + for _, c := range chunks { + total += int64(c.Length) + } + s.progress <- total + } + return rc, errs, err +} diff --git a/vendor/github.com/containers/image/v5/copy/sign.go b/vendor/github.com/containers/image/v5/copy/sign.go new file mode 100644 index 00000000000..21a3facd72a --- /dev/null +++ b/vendor/github.com/containers/image/v5/copy/sign.go @@ -0,0 +1,31 @@ +package copy + +import ( + "github.com/containers/image/v5/signature" + "github.com/containers/image/v5/transports" + "github.com/pkg/errors" +) + +// createSignature creates a new signature of manifest using keyIdentity. +func (c *copier) createSignature(manifest []byte, keyIdentity string, passphrase string) ([]byte, error) { + mech, err := signature.NewGPGSigningMechanism() + if err != nil { + return nil, errors.Wrap(err, "initializing GPG") + } + defer mech.Close() + if err := mech.SupportsSigning(); err != nil { + return nil, errors.Wrap(err, "Signing not supported") + } + + dockerReference := c.dest.Reference().DockerReference() + if dockerReference == nil { + return nil, errors.Errorf("Cannot determine canonical Docker reference for destination %s", transports.ImageName(c.dest.Reference())) + } + + c.Printf("Signing manifest\n") + newSig, err := signature.SignDockerManifestWithOptions(manifest, dockerReference.String(), mech, keyIdentity, &signature.SignOptions{Passphrase: passphrase}) + if err != nil { + return nil, errors.Wrap(err, "creating signature") + } + return newSig, nil +} diff --git a/vendor/github.com/containers/image/v5/directory/directory_dest.go b/vendor/github.com/containers/image/v5/directory/directory_dest.go new file mode 100644 index 00000000000..ea20e7c5e41 --- /dev/null +++ b/vendor/github.com/containers/image/v5/directory/directory_dest.go @@ -0,0 +1,295 @@ +package directory + +import ( + "context" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + + "github.com/containers/image/v5/internal/putblobdigest" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const version = "Directory Transport Version: 1.1\n" + +// ErrNotContainerImageDir indicates that the directory doesn't match the expected contents of a directory created +// using the 'dir' transport +var ErrNotContainerImageDir = errors.New("not a containers image directory, don't want to overwrite important data") + +type dirImageDestination struct { + ref dirReference + desiredLayerCompression types.LayerCompression +} + +// newImageDestination returns an ImageDestination for writing to a directory. +func newImageDestination(sys *types.SystemContext, ref dirReference) (types.ImageDestination, error) { + desiredLayerCompression := types.PreserveOriginal + if sys != nil { + if sys.DirForceCompress { + desiredLayerCompression = types.Compress + + if sys.DirForceDecompress { + return nil, errors.Errorf("Cannot compress and decompress at the same time") + } + } + if sys.DirForceDecompress { + desiredLayerCompression = types.Decompress + } + } + d := &dirImageDestination{ref: ref, desiredLayerCompression: desiredLayerCompression} + + // If directory exists check if it is empty + // if not empty, check whether the contents match that of a container image directory and overwrite the contents + // if the contents don't match throw an error + dirExists, err := pathExists(d.ref.resolvedPath) + if err != nil { + return nil, errors.Wrapf(err, "checking for path %q", d.ref.resolvedPath) + } + if dirExists { + isEmpty, err := isDirEmpty(d.ref.resolvedPath) + if err != nil { + return nil, err + } + + if !isEmpty { + versionExists, err := pathExists(d.ref.versionPath()) + if err != nil { + return nil, errors.Wrapf(err, "checking if path exists %q", d.ref.versionPath()) + } + if versionExists { + contents, err := ioutil.ReadFile(d.ref.versionPath()) + if err != nil { + return nil, err + } + // check if contents of version file is what we expect it to be + if string(contents) != version { + return nil, ErrNotContainerImageDir + } + } else { + return nil, ErrNotContainerImageDir + } + // delete directory contents so that only one image is in the directory at a time + if err = removeDirContents(d.ref.resolvedPath); err != nil { + return nil, errors.Wrapf(err, "erasing contents in %q", d.ref.resolvedPath) + } + logrus.Debugf("overwriting existing container image directory %q", d.ref.resolvedPath) + } + } else { + // create directory if it doesn't exist + if err := os.MkdirAll(d.ref.resolvedPath, 0755); err != nil { + return nil, errors.Wrapf(err, "unable to create directory %q", d.ref.resolvedPath) + } + } + // create version file + err = ioutil.WriteFile(d.ref.versionPath(), []byte(version), 0644) + if err != nil { + return nil, errors.Wrapf(err, "creating version file %q", d.ref.versionPath()) + } + return d, nil +} + +// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, +// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. +func (d *dirImageDestination) Reference() types.ImageReference { + return d.ref +} + +// Close removes resources associated with an initialized ImageDestination, if any. +func (d *dirImageDestination) Close() error { + return nil +} + +func (d *dirImageDestination) SupportedManifestMIMETypes() []string { + return nil +} + +// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. +// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. +func (d *dirImageDestination) SupportsSignatures(ctx context.Context) error { + return nil +} + +func (d *dirImageDestination) DesiredLayerCompression() types.LayerCompression { + return d.desiredLayerCompression +} + +// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually +// uploaded to the image destination, true otherwise. +func (d *dirImageDestination) AcceptsForeignLayerURLs() bool { + return false +} + +// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise. +func (d *dirImageDestination) MustMatchRuntimeOS() bool { + return false +} + +// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), +// and would prefer to receive an unmodified manifest instead of one modified for the destination. +// Does not make a difference if Reference().DockerReference() is nil. +func (d *dirImageDestination) IgnoresEmbeddedDockerReference() bool { + return false // N/A, DockerReference() returns nil. +} + +// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. +func (d *dirImageDestination) HasThreadSafePutBlob() bool { + return false +} + +// PutBlob writes contents of stream and returns data representing the result (with all data filled in). +// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. +// inputInfo.Size is the expected length of stream, if known. +// May update cache. +// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available +// to any other readers for download using the supplied digest. +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. +func (d *dirImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { + blobFile, err := ioutil.TempFile(d.ref.path, "dir-put-blob") + if err != nil { + return types.BlobInfo{}, err + } + succeeded := false + explicitClosed := false + defer func() { + if !explicitClosed { + blobFile.Close() + } + if !succeeded { + os.Remove(blobFile.Name()) + } + }() + + digester, stream := putblobdigest.DigestIfCanonicalUnknown(stream, inputInfo) + // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). + size, err := io.Copy(blobFile, stream) + if err != nil { + return types.BlobInfo{}, err + } + blobDigest := digester.Digest() + if inputInfo.Size != -1 && size != inputInfo.Size { + return types.BlobInfo{}, errors.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size) + } + if err := blobFile.Sync(); err != nil { + return types.BlobInfo{}, err + } + + // On POSIX systems, blobFile was created with mode 0600, so we need to make it readable. + // On Windows, the “permissions of newly created files” argument to syscall.Open is + // ignored and the file is already readable; besides, blobFile.Chmod, i.e. syscall.Fchmod, + // always fails on Windows. + if runtime.GOOS != "windows" { + if err := blobFile.Chmod(0644); err != nil { + return types.BlobInfo{}, err + } + } + + blobPath := d.ref.layerPath(blobDigest) + // need to explicitly close the file, since a rename won't otherwise not work on Windows + blobFile.Close() + explicitClosed = true + if err := os.Rename(blobFile.Name(), blobPath); err != nil { + return types.BlobInfo{}, err + } + succeeded = true + return types.BlobInfo{Digest: blobDigest, Size: size}, nil +} + +// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. +// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may +// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be +// reflected in the manifest that will be written. +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +// May use and/or update cache. +func (d *dirImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { + if info.Digest == "" { + return false, types.BlobInfo{}, errors.Errorf(`"Can not check for a blob with unknown digest`) + } + blobPath := d.ref.layerPath(info.Digest) + finfo, err := os.Stat(blobPath) + if err != nil && os.IsNotExist(err) { + return false, types.BlobInfo{}, nil + } + if err != nil { + return false, types.BlobInfo{}, err + } + return true, types.BlobInfo{Digest: info.Digest, Size: finfo.Size()}, nil +} + +// PutManifest writes manifest to the destination. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write the manifest for (when +// the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. +// It is expected but not enforced that the instanceDigest, when specified, matches the digest of `manifest` as generated +// by `manifest.Digest()`. +// FIXME? This should also receive a MIME type if known, to differentiate between schema versions. +// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), +// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. +func (d *dirImageDestination) PutManifest(ctx context.Context, manifest []byte, instanceDigest *digest.Digest) error { + return ioutil.WriteFile(d.ref.manifestPath(instanceDigest), manifest, 0644) +} + +// PutSignatures writes a set of signatures to the destination. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for +// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. +func (d *dirImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error { + for i, sig := range signatures { + if err := ioutil.WriteFile(d.ref.signaturePath(i, instanceDigest), sig, 0644); err != nil { + return err + } + } + return nil +} + +// Commit marks the process of storing the image as successful and asks for the image to be persisted. +// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list +// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the +// original manifest list digest, if desired. +// WARNING: This does not have any transactional semantics: +// - Uploaded data MAY be visible to others before Commit() is called +// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) +func (d *dirImageDestination) Commit(context.Context, types.UnparsedImage) error { + return nil +} + +// returns true if path exists +func pathExists(path string) (bool, error) { + _, err := os.Stat(path) + if err == nil { + return true, nil + } + if os.IsNotExist(err) { + return false, nil + } + return false, err +} + +// returns true if directory is empty +func isDirEmpty(path string) (bool, error) { + files, err := ioutil.ReadDir(path) + if err != nil { + return false, err + } + return len(files) == 0, nil +} + +// deletes the contents of a directory +func removeDirContents(path string) error { + files, err := ioutil.ReadDir(path) + if err != nil { + return err + } + + for _, file := range files { + if err := os.RemoveAll(filepath.Join(path, file.Name())); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/containers/image/v5/directory/directory_src.go b/vendor/github.com/containers/image/v5/directory/directory_src.go new file mode 100644 index 00000000000..ad9129d4012 --- /dev/null +++ b/vendor/github.com/containers/image/v5/directory/directory_src.go @@ -0,0 +1,96 @@ +package directory + +import ( + "context" + "io" + "io/ioutil" + "os" + + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" +) + +type dirImageSource struct { + ref dirReference +} + +// newImageSource returns an ImageSource reading from an existing directory. +// The caller must call .Close() on the returned ImageSource. +func newImageSource(ref dirReference) types.ImageSource { + return &dirImageSource{ref} +} + +// Reference returns the reference used to set up this source, _as specified by the user_ +// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. +func (s *dirImageSource) Reference() types.ImageReference { + return s.ref +} + +// Close removes resources associated with an initialized ImageSource, if any. +func (s *dirImageSource) Close() error { + return nil +} + +// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). +// It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); +// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). +func (s *dirImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { + m, err := ioutil.ReadFile(s.ref.manifestPath(instanceDigest)) + if err != nil { + return nil, "", err + } + return m, manifest.GuessMIMEType(m), err +} + +// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. +func (s *dirImageSource) HasThreadSafeGetBlob() bool { + return false +} + +// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). +// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. +// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. +func (s *dirImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { + r, err := os.Open(s.ref.layerPath(info.Digest)) + if err != nil { + return nil, -1, err + } + fi, err := r.Stat() + if err != nil { + return nil, -1, err + } + return r, fi.Size(), nil +} + +// GetSignatures returns the image's signatures. It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +func (s *dirImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + signatures := [][]byte{} + for i := 0; ; i++ { + signature, err := ioutil.ReadFile(s.ref.signaturePath(i, instanceDigest)) + if err != nil { + if os.IsNotExist(err) { + break + } + return nil, err + } + signatures = append(signatures, signature) + } + return signatures, nil +} + +// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer +// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob() +// to read the image's layers. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (s *dirImageSource) LayerInfosForCopy(context.Context, *digest.Digest) ([]types.BlobInfo, error) { + return nil, nil +} diff --git a/vendor/github.com/containers/image/v5/directory/directory_transport.go b/vendor/github.com/containers/image/v5/directory/directory_transport.go new file mode 100644 index 00000000000..e542d888c22 --- /dev/null +++ b/vendor/github.com/containers/image/v5/directory/directory_transport.go @@ -0,0 +1,189 @@ +package directory + +import ( + "context" + "fmt" + "path/filepath" + "strings" + + "github.com/containers/image/v5/directory/explicitfilepath" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/image" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +func init() { + transports.Register(Transport) +} + +// Transport is an ImageTransport for directory paths. +var Transport = dirTransport{} + +type dirTransport struct{} + +func (t dirTransport) Name() string { + return "dir" +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. +func (t dirTransport) ParseReference(reference string) (types.ImageReference, error) { + return NewReference(reference) +} + +// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys +// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). +// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. +// scope passed to this function will not be "", that value is always allowed. +func (t dirTransport) ValidatePolicyConfigurationScope(scope string) error { + if !strings.HasPrefix(scope, "/") { + return errors.Errorf("Invalid scope %s: Must be an absolute path", scope) + } + // Refuse also "/", otherwise "/" and "" would have the same semantics, + // and "" could be unexpectedly shadowed by the "/" entry. + if scope == "/" { + return errors.New(`Invalid scope "/": Use the generic default scope ""`) + } + cleaned := filepath.Clean(scope) + if cleaned != scope { + return errors.Errorf(`Invalid scope %s: Uses non-canonical format, perhaps try %s`, scope, cleaned) + } + return nil +} + +// dirReference is an ImageReference for directory paths. +type dirReference struct { + // Note that the interpretation of paths below depends on the underlying filesystem state, which may change under us at any time! + // Either of the paths may point to a different, or no, inode over time. resolvedPath may contain symbolic links, and so on. + + // Generally we follow the intent of the user, and use the "path" member for filesystem operations (e.g. the user can use a relative path to avoid + // being exposed to symlinks and renames in the parent directories to the working directory). + // (But in general, we make no attempt to be completely safe against concurrent hostile filesystem modifications.) + path string // As specified by the user. May be relative, contain symlinks, etc. + resolvedPath string // Absolute path with no symlinks, at least at the time of its creation. Primarily used for policy namespaces. +} + +// There is no directory.ParseReference because it is rather pointless. +// Callers who need a transport-independent interface will go through +// dirTransport.ParseReference; callers who intentionally deal with directories +// can use directory.NewReference. + +// NewReference returns a directory reference for a specified path. +// +// We do not expose an API supplying the resolvedPath; we could, but recomputing it +// is generally cheap enough that we prefer being confident about the properties of resolvedPath. +func NewReference(path string) (types.ImageReference, error) { + resolved, err := explicitfilepath.ResolvePathToFullyExplicit(path) + if err != nil { + return nil, err + } + return dirReference{path: path, resolvedPath: resolved}, nil +} + +func (ref dirReference) Transport() types.ImageTransport { + return Transport +} + +// StringWithinTransport returns a string representation of the reference, which MUST be such that +// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. +// NOTE: The returned string is not promised to be equal to the original input to ParseReference; +// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. +// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. +func (ref dirReference) StringWithinTransport() string { + return ref.path +} + +// DockerReference returns a Docker reference associated with this reference +// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, +// not e.g. after redirect or alias processing), or nil if unknown/not applicable. +func (ref dirReference) DockerReference() reference.Named { + return nil +} + +// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. +// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; +// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical +// (i.e. various references with exactly the same semantics should return the same configuration identity) +// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but +// not required/guaranteed that it will be a valid input to Transport().ParseReference(). +// Returns "" if configuration identities for these references are not supported. +func (ref dirReference) PolicyConfigurationIdentity() string { + return ref.resolvedPath +} + +// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search +// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed +// in order, terminating on first match, and an implicit "" is always checked at the end. +// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), +// and each following element to be a prefix of the element preceding it. +func (ref dirReference) PolicyConfigurationNamespaces() []string { + res := []string{} + path := ref.resolvedPath + for { + lastSlash := strings.LastIndex(path, "/") + if lastSlash == -1 || lastSlash == 0 { + break + } + path = path[:lastSlash] + res = append(res, path) + } + // Note that we do not include "/"; it is redundant with the default "" global default, + // and rejected by dirTransport.ValidatePolicyConfigurationScope above. + return res +} + +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (ref dirReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + src := newImageSource(ref) + return image.FromSource(ctx, sys, src) +} + +// NewImageSource returns a types.ImageSource for this reference. +// The caller must call .Close() on the returned ImageSource. +func (ref dirReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { + return newImageSource(ref), nil +} + +// NewImageDestination returns a types.ImageDestination for this reference. +// The caller must call .Close() on the returned ImageDestination. +func (ref dirReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { + return newImageDestination(sys, ref) +} + +// DeleteImage deletes the named image from the registry, if supported. +func (ref dirReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + return errors.Errorf("Deleting images not implemented for dir: images") +} + +// manifestPath returns a path for the manifest within a directory using our conventions. +func (ref dirReference) manifestPath(instanceDigest *digest.Digest) string { + if instanceDigest != nil { + return filepath.Join(ref.path, instanceDigest.Encoded()+".manifest.json") + } + return filepath.Join(ref.path, "manifest.json") +} + +// layerPath returns a path for a layer tarball within a directory using our conventions. +func (ref dirReference) layerPath(digest digest.Digest) string { + // FIXME: Should we keep the digest identification? + return filepath.Join(ref.path, digest.Encoded()) +} + +// signaturePath returns a path for a signature within a directory using our conventions. +func (ref dirReference) signaturePath(index int, instanceDigest *digest.Digest) string { + if instanceDigest != nil { + return filepath.Join(ref.path, fmt.Sprintf(instanceDigest.Encoded()+".signature-%d", index+1)) + } + return filepath.Join(ref.path, fmt.Sprintf("signature-%d", index+1)) +} + +// versionPath returns a path for the version file within a directory using our conventions. +func (ref dirReference) versionPath() string { + return filepath.Join(ref.path, "version") +} diff --git a/vendor/github.com/containers/image/v5/directory/explicitfilepath/path.go b/vendor/github.com/containers/image/v5/directory/explicitfilepath/path.go new file mode 100644 index 00000000000..71136b88089 --- /dev/null +++ b/vendor/github.com/containers/image/v5/directory/explicitfilepath/path.go @@ -0,0 +1,56 @@ +package explicitfilepath + +import ( + "os" + "path/filepath" + + "github.com/pkg/errors" +) + +// ResolvePathToFullyExplicit returns the input path converted to an absolute, no-symlinks, cleaned up path. +// To do so, all elements of the input path must exist; as a special case, the final component may be +// a non-existent name (but not a symlink pointing to a non-existent name) +// This is intended as a a helper for implementations of types.ImageReference.PolicyConfigurationIdentity etc. +func ResolvePathToFullyExplicit(path string) (string, error) { + switch _, err := os.Lstat(path); { + case err == nil: + return resolveExistingPathToFullyExplicit(path) + case os.IsNotExist(err): + parent, file := filepath.Split(path) + resolvedParent, err := resolveExistingPathToFullyExplicit(parent) + if err != nil { + return "", err + } + if file == "." || file == ".." { + // Coverage: This can happen, but very rarely: if we have successfully resolved the parent, both "." and ".." in it should have been resolved as well. + // This can still happen if there is a filesystem race condition, causing the Lstat() above to fail but the later resolution to succeed. + // We do not care to promise anything if such filesystem race conditions can happen, but we definitely don't want to return "."/".." components + // in the resulting path, and especially not at the end. + return "", errors.Errorf("Unexpectedly missing special filename component in %s", path) + } + resolvedPath := filepath.Join(resolvedParent, file) + // As a sanity check, ensure that there are no "." or ".." components. + cleanedResolvedPath := filepath.Clean(resolvedPath) + if cleanedResolvedPath != resolvedPath { + // Coverage: This should never happen. + return "", errors.Errorf("Internal inconsistency: Path %s resolved to %s still cleaned up to %s", path, resolvedPath, cleanedResolvedPath) + } + return resolvedPath, nil + default: // err != nil, unrecognized + return "", err + } +} + +// resolveExistingPathToFullyExplicit is the same as ResolvePathToFullyExplicit, +// but without the special case for missing final component. +func resolveExistingPathToFullyExplicit(path string) (string, error) { + resolved, err := filepath.Abs(path) + if err != nil { + return "", err // Coverage: This can fail only if os.Getwd() fails. + } + resolved, err = filepath.EvalSymlinks(resolved) + if err != nil { + return "", err + } + return filepath.Clean(resolved), nil +} diff --git a/vendor/github.com/containers/image/v5/docker/archive/dest.go b/vendor/github.com/containers/image/v5/docker/archive/dest.go new file mode 100644 index 00000000000..d4248db21f0 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/archive/dest.go @@ -0,0 +1,81 @@ +package archive + +import ( + "context" + "io" + + "github.com/containers/image/v5/docker/internal/tarfile" + "github.com/containers/image/v5/types" + "github.com/pkg/errors" +) + +type archiveImageDestination struct { + *tarfile.Destination // Implements most of types.ImageDestination + ref archiveReference + archive *tarfile.Writer // Should only be closed if writer != nil + writer io.Closer // May be nil if the archive is shared +} + +func newImageDestination(sys *types.SystemContext, ref archiveReference) (types.ImageDestination, error) { + if ref.sourceIndex != -1 { + return nil, errors.Errorf("Destination reference must not contain a manifest index @%d", ref.sourceIndex) + } + + var archive *tarfile.Writer + var writer io.Closer + if ref.archiveWriter != nil { + archive = ref.archiveWriter + writer = nil + } else { + fh, err := openArchiveForWriting(ref.path) + if err != nil { + return nil, err + } + + archive = tarfile.NewWriter(fh) + writer = fh + } + tarDest := tarfile.NewDestination(sys, archive, ref.ref) + if sys != nil && sys.DockerArchiveAdditionalTags != nil { + tarDest.AddRepoTags(sys.DockerArchiveAdditionalTags) + } + return &archiveImageDestination{ + Destination: tarDest, + ref: ref, + archive: archive, + writer: writer, + }, nil +} + +// DesiredLayerCompression indicates if layers must be compressed, decompressed or preserved +func (d *archiveImageDestination) DesiredLayerCompression() types.LayerCompression { + return types.Decompress +} + +// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, +// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. +func (d *archiveImageDestination) Reference() types.ImageReference { + return d.ref +} + +// Close removes resources associated with an initialized ImageDestination, if any. +func (d *archiveImageDestination) Close() error { + if d.writer != nil { + return d.writer.Close() + } + return nil +} + +// Commit marks the process of storing the image as successful and asks for the image to be persisted. +// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list +// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the +// original manifest list digest, if desired. +// WARNING: This does not have any transactional semantics: +// - Uploaded data MAY be visible to others before Commit() is called +// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) +func (d *archiveImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error { + if d.writer != nil { + return d.archive.Close() + } + return nil +} diff --git a/vendor/github.com/containers/image/v5/docker/archive/reader.go b/vendor/github.com/containers/image/v5/docker/archive/reader.go new file mode 100644 index 00000000000..4bb519a2622 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/archive/reader.go @@ -0,0 +1,120 @@ +package archive + +import ( + "github.com/containers/image/v5/docker/internal/tarfile" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" + "github.com/pkg/errors" +) + +// Reader manages a single Docker archive, allows listing its contents and accessing +// individual images with less overhead than creating image references individually +// (because the archive is, if necessary, copied or decompressed only once). +type Reader struct { + path string // The original, user-specified path; not the maintained temporary file, if any + archive *tarfile.Reader +} + +// NewReader returns a Reader for path. +// The caller should call .Close() on the returned object. +func NewReader(sys *types.SystemContext, path string) (*Reader, error) { + archive, err := tarfile.NewReaderFromFile(sys, path) + if err != nil { + return nil, err + } + return &Reader{ + path: path, + archive: archive, + }, nil +} + +// Close deletes temporary files associated with the Reader, if any. +func (r *Reader) Close() error { + return r.archive.Close() +} + +// NewReaderForReference creates a Reader from a Reader-independent imageReference, which must be from docker/archive.Transport, +// and a variant of imageReference that points at the same image within the reader. +// The caller should call .Close() on the returned Reader. +func NewReaderForReference(sys *types.SystemContext, ref types.ImageReference) (*Reader, types.ImageReference, error) { + standalone, ok := ref.(archiveReference) + if !ok { + return nil, nil, errors.Errorf("Internal error: NewReaderForReference called for a non-docker/archive ImageReference %s", transports.ImageName(ref)) + } + if standalone.archiveReader != nil { + return nil, nil, errors.Errorf("Internal error: NewReaderForReference called for a reader-bound reference %s", standalone.StringWithinTransport()) + } + reader, err := NewReader(sys, standalone.path) + if err != nil { + return nil, nil, err + } + succeeded := false + defer func() { + if !succeeded { + reader.Close() + } + }() + readerRef, err := newReference(standalone.path, standalone.ref, standalone.sourceIndex, reader.archive, nil) + if err != nil { + return nil, nil, err + } + succeeded = true + return reader, readerRef, nil +} + +// List returns the a set of references for images in the Reader, +// grouped by the image the references point to. +// The references are valid only until the Reader is closed. +func (r *Reader) List() ([][]types.ImageReference, error) { + res := [][]types.ImageReference{} + for imageIndex, image := range r.archive.Manifest { + refs := []types.ImageReference{} + for _, tag := range image.RepoTags { + parsedTag, err := reference.ParseNormalizedNamed(tag) + if err != nil { + return nil, errors.Wrapf(err, "Invalid tag %#v in manifest item @%d", tag, imageIndex) + } + nt, ok := parsedTag.(reference.NamedTagged) + if !ok { + return nil, errors.Errorf("Invalid tag %s (%s): does not contain a tag", tag, parsedTag.String()) + } + ref, err := newReference(r.path, nt, -1, r.archive, nil) + if err != nil { + return nil, errors.Wrapf(err, "creating a reference for tag %#v in manifest item @%d", tag, imageIndex) + } + refs = append(refs, ref) + } + if len(refs) == 0 { + ref, err := newReference(r.path, nil, imageIndex, r.archive, nil) + if err != nil { + return nil, errors.Wrapf(err, "creating a reference for manifest item @%d", imageIndex) + } + refs = append(refs, ref) + } + res = append(res, refs) + } + return res, nil +} + +// ManifestTagsForReference returns the set of tags “matching” ref in reader, as strings +// (i.e. exposing the short names before normalization). +// The function reports an error if ref does not identify a single image. +// If ref contains a NamedTagged reference, only a single tag “matching” ref is returned; +// If ref contains a source index, or neither a NamedTagged nor a source index, all tags +// matching the image are returned. +// Almost all users should use List() or ImageReference.DockerReference() instead. +func (r *Reader) ManifestTagsForReference(ref types.ImageReference) ([]string, error) { + archiveRef, ok := ref.(archiveReference) + if !ok { + return nil, errors.Errorf("Internal error: ManifestTagsForReference called for a non-docker/archive ImageReference %s", transports.ImageName(ref)) + } + manifestItem, tagIndex, err := r.archive.ChooseManifestItem(archiveRef.ref, archiveRef.sourceIndex) + if err != nil { + return nil, err + } + if tagIndex != -1 { + return []string{manifestItem.RepoTags[tagIndex]}, nil + } + return manifestItem.RepoTags, nil +} diff --git a/vendor/github.com/containers/image/v5/docker/archive/src.go b/vendor/github.com/containers/image/v5/docker/archive/src.go new file mode 100644 index 00000000000..7acca210ef1 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/archive/src.go @@ -0,0 +1,42 @@ +package archive + +import ( + "context" + + "github.com/containers/image/v5/docker/internal/tarfile" + "github.com/containers/image/v5/types" +) + +type archiveImageSource struct { + *tarfile.Source // Implements most of types.ImageSource + ref archiveReference +} + +// newImageSource returns a types.ImageSource for the specified image reference. +// The caller must call .Close() on the returned ImageSource. +func newImageSource(ctx context.Context, sys *types.SystemContext, ref archiveReference) (types.ImageSource, error) { + var archive *tarfile.Reader + var closeArchive bool + if ref.archiveReader != nil { + archive = ref.archiveReader + closeArchive = false + } else { + a, err := tarfile.NewReaderFromFile(sys, ref.path) + if err != nil { + return nil, err + } + archive = a + closeArchive = true + } + src := tarfile.NewSource(archive, closeArchive, ref.ref, ref.sourceIndex) + return &archiveImageSource{ + Source: src, + ref: ref, + }, nil +} + +// Reference returns the reference used to set up this source, _as specified by the user_ +// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. +func (s *archiveImageSource) Reference() types.ImageReference { + return s.ref +} diff --git a/vendor/github.com/containers/image/v5/docker/archive/transport.go b/vendor/github.com/containers/image/v5/docker/archive/transport.go new file mode 100644 index 00000000000..9a48cb46cc4 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/archive/transport.go @@ -0,0 +1,211 @@ +package archive + +import ( + "context" + "fmt" + "strconv" + "strings" + + "github.com/containers/image/v5/docker/internal/tarfile" + "github.com/containers/image/v5/docker/reference" + ctrImage "github.com/containers/image/v5/image" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" + "github.com/pkg/errors" +) + +func init() { + transports.Register(Transport) +} + +// Transport is an ImageTransport for local Docker archives. +var Transport = archiveTransport{} + +type archiveTransport struct{} + +func (t archiveTransport) Name() string { + return "docker-archive" +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. +func (t archiveTransport) ParseReference(reference string) (types.ImageReference, error) { + return ParseReference(reference) +} + +// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys +// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). +// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. +// scope passed to this function will not be "", that value is always allowed. +func (t archiveTransport) ValidatePolicyConfigurationScope(scope string) error { + // See the explanation in archiveReference.PolicyConfigurationIdentity. + return errors.New(`docker-archive: does not support any scopes except the default "" one`) +} + +// archiveReference is an ImageReference for Docker images. +type archiveReference struct { + path string + // May be nil to read the only image in an archive, or to create an untagged image. + ref reference.NamedTagged + // If not -1, a zero-based index of the image in the manifest. Valid only for sources. + // Must not be set if ref is set. + sourceIndex int + // If not nil, must have been created from path (but archiveReader.path may point at a temporary + // file, not necessarily path precisely). + archiveReader *tarfile.Reader + // If not nil, must have been created for path + archiveWriter *tarfile.Writer +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an Docker ImageReference. +func ParseReference(refString string) (types.ImageReference, error) { + if refString == "" { + return nil, errors.Errorf("docker-archive reference %s isn't of the form [:]", refString) + } + + parts := strings.SplitN(refString, ":", 2) + path := parts[0] + var nt reference.NamedTagged + sourceIndex := -1 + + if len(parts) == 2 { + // A :tag or :@index was specified. + if len(parts[1]) > 0 && parts[1][0] == '@' { + i, err := strconv.Atoi(parts[1][1:]) + if err != nil { + return nil, errors.Wrapf(err, "Invalid source index %s", parts[1]) + } + if i < 0 { + return nil, errors.Errorf("Invalid source index @%d: must not be negative", i) + } + sourceIndex = i + } else { + ref, err := reference.ParseNormalizedNamed(parts[1]) + if err != nil { + return nil, errors.Wrapf(err, "docker-archive parsing reference") + } + ref = reference.TagNameOnly(ref) + refTagged, isTagged := ref.(reference.NamedTagged) + if !isTagged { // If ref contains a digest, TagNameOnly does not change it + return nil, errors.Errorf("reference does not include a tag: %s", ref.String()) + } + nt = refTagged + } + } + + return newReference(path, nt, sourceIndex, nil, nil) +} + +// NewReference returns a Docker archive reference for a path and an optional reference. +func NewReference(path string, ref reference.NamedTagged) (types.ImageReference, error) { + return newReference(path, ref, -1, nil, nil) +} + +// NewIndexReference returns a Docker archive reference for a path and a zero-based source manifest index. +func NewIndexReference(path string, sourceIndex int) (types.ImageReference, error) { + return newReference(path, nil, sourceIndex, nil, nil) +} + +// newReference returns a docker archive reference for a path, an optional reference or sourceIndex, +// and optionally a tarfile.Reader and/or a tarfile.Writer matching path. +func newReference(path string, ref reference.NamedTagged, sourceIndex int, + archiveReader *tarfile.Reader, archiveWriter *tarfile.Writer) (types.ImageReference, error) { + if strings.Contains(path, ":") { + return nil, errors.Errorf("Invalid docker-archive: reference: colon in path %q is not supported", path) + } + if ref != nil && sourceIndex != -1 { + return nil, errors.Errorf("Invalid docker-archive: reference: cannot use both a tag and a source index") + } + if _, isDigest := ref.(reference.Canonical); isDigest { + return nil, errors.Errorf("docker-archive doesn't support digest references: %s", ref.String()) + } + if sourceIndex != -1 && sourceIndex < 0 { + return nil, errors.Errorf("Invalid docker-archive: reference: index @%d must not be negative", sourceIndex) + } + return archiveReference{ + path: path, + ref: ref, + sourceIndex: sourceIndex, + archiveReader: archiveReader, + archiveWriter: archiveWriter, + }, nil +} + +func (ref archiveReference) Transport() types.ImageTransport { + return Transport +} + +// StringWithinTransport returns a string representation of the reference, which MUST be such that +// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. +// NOTE: The returned string is not promised to be equal to the original input to ParseReference; +// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. +// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. +func (ref archiveReference) StringWithinTransport() string { + switch { + case ref.ref != nil: + return fmt.Sprintf("%s:%s", ref.path, ref.ref.String()) + case ref.sourceIndex != -1: + return fmt.Sprintf("%s:@%d", ref.path, ref.sourceIndex) + default: + return ref.path + } +} + +// DockerReference returns a Docker reference associated with this reference +// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, +// not e.g. after redirect or alias processing), or nil if unknown/not applicable. +func (ref archiveReference) DockerReference() reference.Named { + return ref.ref +} + +// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. +// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; +// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical +// (i.e. various references with exactly the same semantics should return the same configuration identity) +// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but +// not required/guaranteed that it will be a valid input to Transport().ParseReference(). +// Returns "" if configuration identities for these references are not supported. +func (ref archiveReference) PolicyConfigurationIdentity() string { + // Punt, the justification is similar to dockerReference.PolicyConfigurationIdentity. + return "" +} + +// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search +// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed +// in order, terminating on first match, and an implicit "" is always checked at the end. +// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), +// and each following element to be a prefix of the element preceding it. +func (ref archiveReference) PolicyConfigurationNamespaces() []string { + // TODO + return []string{} +} + +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (ref archiveReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + src, err := newImageSource(ctx, sys, ref) + if err != nil { + return nil, err + } + return ctrImage.FromSource(ctx, sys, src) +} + +// NewImageSource returns a types.ImageSource for this reference. +// The caller must call .Close() on the returned ImageSource. +func (ref archiveReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { + return newImageSource(ctx, sys, ref) +} + +// NewImageDestination returns a types.ImageDestination for this reference. +// The caller must call .Close() on the returned ImageDestination. +func (ref archiveReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { + return newImageDestination(sys, ref) +} + +// DeleteImage deletes the named image from the registry, if supported. +func (ref archiveReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + // Not really supported, for safety reasons. + return errors.New("Deleting images not implemented for docker-archive: images") +} diff --git a/vendor/github.com/containers/image/v5/docker/archive/writer.go b/vendor/github.com/containers/image/v5/docker/archive/writer.go new file mode 100644 index 00000000000..6a4b8c645a1 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/archive/writer.go @@ -0,0 +1,82 @@ +package archive + +import ( + "io" + "os" + + "github.com/containers/image/v5/docker/internal/tarfile" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/types" + "github.com/pkg/errors" +) + +// Writer manages a single in-progress Docker archive and allows adding images to it. +type Writer struct { + path string // The original, user-specified path; not the maintained temporary file, if any + archive *tarfile.Writer + writer io.Closer +} + +// NewWriter returns a Writer for path. +// The caller should call .Close() on the returned object. +func NewWriter(sys *types.SystemContext, path string) (*Writer, error) { + fh, err := openArchiveForWriting(path) + if err != nil { + return nil, err + } + archive := tarfile.NewWriter(fh) + + return &Writer{ + path: path, + archive: archive, + writer: fh, + }, nil +} + +// Close writes all outstanding data about images to the archive, and +// releases state associated with the Writer, if any. +// No more images can be added after this is called. +func (w *Writer) Close() error { + err := w.archive.Close() + if err2 := w.writer.Close(); err2 != nil && err == nil { + err = err2 + } + return err +} + +// NewReference returns an ImageReference that allows adding an image to Writer, +// with an optional reference. +func (w *Writer) NewReference(destinationRef reference.NamedTagged) (types.ImageReference, error) { + return newReference(w.path, destinationRef, -1, nil, w.archive) +} + +// openArchiveForWriting opens path for writing a tar archive, +// making a few sanity checks. +func openArchiveForWriting(path string) (*os.File, error) { + // path can be either a pipe or a regular file + // in the case of a pipe, we require that we can open it for write + // in the case of a regular file, we don't want to overwrite any pre-existing file + // so we check for Size() == 0 below (This is racy, but using O_EXCL would also be racy, + // only in a different way. Either way, it’s up to the user to not have two writers to the same path.) + fh, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE, 0644) + if err != nil { + return nil, errors.Wrapf(err, "opening file %q", path) + } + succeeded := false + defer func() { + if !succeeded { + fh.Close() + } + }() + fhStat, err := fh.Stat() + if err != nil { + return nil, errors.Wrapf(err, "statting file %q", path) + } + + if fhStat.Mode().IsRegular() && fhStat.Size() != 0 { + return nil, errors.New("docker-archive doesn't support modifying existing images") + } + + succeeded = true + return fh, nil +} diff --git a/vendor/github.com/containers/image/v5/docker/cache.go b/vendor/github.com/containers/image/v5/docker/cache.go new file mode 100644 index 00000000000..728d32d170c --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/cache.go @@ -0,0 +1,23 @@ +package docker + +import ( + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/types" +) + +// bicTransportScope returns a BICTransportScope appropriate for ref. +func bicTransportScope(ref dockerReference) types.BICTransportScope { + // Blobs can be reused across the whole registry. + return types.BICTransportScope{Opaque: reference.Domain(ref.ref)} +} + +// newBICLocationReference returns a BICLocationReference appropriate for ref. +func newBICLocationReference(ref dockerReference) types.BICLocationReference { + // Blobs are scoped to repositories (the tag/digest are not necessary to reuse a blob). + return types.BICLocationReference{Opaque: ref.ref.Name()} +} + +// parseBICLocationReference returns a repository for encoded lr. +func parseBICLocationReference(lr types.BICLocationReference) (reference.Named, error) { + return reference.ParseNormalizedNamed(lr.Opaque) +} diff --git a/vendor/github.com/containers/image/v5/docker/daemon/client.go b/vendor/github.com/containers/image/v5/docker/daemon/client.go new file mode 100644 index 00000000000..323a02fc095 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/daemon/client.go @@ -0,0 +1,85 @@ +package daemon + +import ( + "net/http" + "path/filepath" + + "github.com/containers/image/v5/types" + dockerclient "github.com/docker/docker/client" + "github.com/docker/go-connections/tlsconfig" +) + +const ( + // The default API version to be used in case none is explicitly specified + defaultAPIVersion = "1.22" +) + +// NewDockerClient initializes a new API client based on the passed SystemContext. +func newDockerClient(sys *types.SystemContext) (*dockerclient.Client, error) { + host := dockerclient.DefaultDockerHost + if sys != nil && sys.DockerDaemonHost != "" { + host = sys.DockerDaemonHost + } + + // Sadly, unix:// sockets don't work transparently with dockerclient.NewClient. + // They work fine with a nil httpClient; with a non-nil httpClient, the transport’s + // TLSClientConfig must be nil (or the client will try using HTTPS over the PF_UNIX socket + // regardless of the values in the *tls.Config), and we would have to call sockets.ConfigureTransport. + // + // We don't really want to configure anything for unix:// sockets, so just pass a nil *http.Client. + // + // Similarly, if we want to communicate over plain HTTP on a TCP socket, we also need to set + // TLSClientConfig to nil. This can be achieved by using the form `http://` + url, err := dockerclient.ParseHostURL(host) + if err != nil { + return nil, err + } + var httpClient *http.Client + if url.Scheme != "unix" { + if url.Scheme == "http" { + httpClient = httpConfig() + } else { + hc, err := tlsConfig(sys) + if err != nil { + return nil, err + } + httpClient = hc + } + } + + return dockerclient.NewClient(host, defaultAPIVersion, httpClient, nil) +} + +func tlsConfig(sys *types.SystemContext) (*http.Client, error) { + options := tlsconfig.Options{} + if sys != nil && sys.DockerDaemonInsecureSkipTLSVerify { + options.InsecureSkipVerify = true + } + + if sys != nil && sys.DockerDaemonCertPath != "" { + options.CAFile = filepath.Join(sys.DockerDaemonCertPath, "ca.pem") + options.CertFile = filepath.Join(sys.DockerDaemonCertPath, "cert.pem") + options.KeyFile = filepath.Join(sys.DockerDaemonCertPath, "key.pem") + } + + tlsc, err := tlsconfig.Client(options) + if err != nil { + return nil, err + } + + return &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsc, + }, + CheckRedirect: dockerclient.CheckRedirect, + }, nil +} + +func httpConfig() *http.Client { + return &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: nil, + }, + CheckRedirect: dockerclient.CheckRedirect, + } +} diff --git a/vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go b/vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go new file mode 100644 index 00000000000..f68981472f3 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go @@ -0,0 +1,154 @@ +package daemon + +import ( + "context" + "io" + + "github.com/containers/image/v5/docker/internal/tarfile" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/types" + "github.com/docker/docker/client" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +type daemonImageDestination struct { + ref daemonReference + mustMatchRuntimeOS bool + *tarfile.Destination // Implements most of types.ImageDestination + archive *tarfile.Writer + // For talking to imageLoadGoroutine + goroutineCancel context.CancelFunc + statusChannel <-chan error + writer *io.PipeWriter + // Other state + committed bool // writer has been closed +} + +// newImageDestination returns a types.ImageDestination for the specified image reference. +func newImageDestination(ctx context.Context, sys *types.SystemContext, ref daemonReference) (types.ImageDestination, error) { + if ref.ref == nil { + return nil, errors.Errorf("Invalid destination docker-daemon:%s: a destination must be a name:tag", ref.StringWithinTransport()) + } + namedTaggedRef, ok := ref.ref.(reference.NamedTagged) + if !ok { + return nil, errors.Errorf("Invalid destination docker-daemon:%s: a destination must be a name:tag", ref.StringWithinTransport()) + } + + var mustMatchRuntimeOS = true + if sys != nil && sys.DockerDaemonHost != client.DefaultDockerHost { + mustMatchRuntimeOS = false + } + + c, err := newDockerClient(sys) + if err != nil { + return nil, errors.Wrap(err, "initializing docker engine client") + } + + reader, writer := io.Pipe() + archive := tarfile.NewWriter(writer) + // Commit() may never be called, so we may never read from this channel; so, make this buffered to allow imageLoadGoroutine to write status and terminate even if we never read it. + statusChannel := make(chan error, 1) + + goroutineContext, goroutineCancel := context.WithCancel(ctx) + go imageLoadGoroutine(goroutineContext, c, reader, statusChannel) + + return &daemonImageDestination{ + ref: ref, + mustMatchRuntimeOS: mustMatchRuntimeOS, + Destination: tarfile.NewDestination(sys, archive, namedTaggedRef), + archive: archive, + goroutineCancel: goroutineCancel, + statusChannel: statusChannel, + writer: writer, + committed: false, + }, nil +} + +// imageLoadGoroutine accepts tar stream on reader, sends it to c, and reports error or success by writing to statusChannel +func imageLoadGoroutine(ctx context.Context, c *client.Client, reader *io.PipeReader, statusChannel chan<- error) { + err := errors.New("Internal error: unexpected panic in imageLoadGoroutine") + defer func() { + logrus.Debugf("docker-daemon: sending done, status %v", err) + statusChannel <- err + }() + defer func() { + if err == nil { + reader.Close() + } else { + if err := reader.CloseWithError(err); err != nil { + logrus.Debugf("imageLoadGoroutine: Error during reader.CloseWithError: %v", err) + } + } + }() + + resp, err := c.ImageLoad(ctx, reader, true) + if err != nil { + err = errors.Wrap(err, "saving image to docker engine") + return + } + defer resp.Body.Close() +} + +// DesiredLayerCompression indicates if layers must be compressed, decompressed or preserved +func (d *daemonImageDestination) DesiredLayerCompression() types.LayerCompression { + return types.PreserveOriginal +} + +// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise. +func (d *daemonImageDestination) MustMatchRuntimeOS() bool { + return d.mustMatchRuntimeOS +} + +// Close removes resources associated with an initialized ImageDestination, if any. +func (d *daemonImageDestination) Close() error { + if !d.committed { + logrus.Debugf("docker-daemon: Closing tar stream to abort loading") + // In principle, goroutineCancel() should abort the HTTP request and stop the process from continuing. + // In practice, though, various HTTP implementations used by client.Client.ImageLoad() (including + // https://github.com/golang/net/blob/master/context/ctxhttp/ctxhttp_pre17.go and the + // net/http version with native Context support in Go 1.7) do not always actually immediately cancel + // the operation: they may process the HTTP request, or a part of it, to completion in a goroutine, and + // return early if the context is canceled without terminating the goroutine at all. + // So we need this CloseWithError to terminate sending the HTTP request Body + // immediately, and hopefully, through terminating the sending which uses "Transfer-Encoding: chunked"" without sending + // the terminating zero-length chunk, prevent the docker daemon from processing the tar stream at all. + // Whether that works or not, closing the PipeWriter seems desirable in any case. + if err := d.writer.CloseWithError(errors.New("Aborting upload, daemonImageDestination closed without a previous .Commit()")); err != nil { + return err + } + } + d.goroutineCancel() + + return nil +} + +func (d *daemonImageDestination) Reference() types.ImageReference { + return d.ref +} + +// Commit marks the process of storing the image as successful and asks for the image to be persisted. +// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list +// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the +// original manifest list digest, if desired. +// WARNING: This does not have any transactional semantics: +// - Uploaded data MAY be visible to others before Commit() is called +// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) +func (d *daemonImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error { + logrus.Debugf("docker-daemon: Closing tar stream") + if err := d.archive.Close(); err != nil { + return err + } + if err := d.writer.Close(); err != nil { + return err + } + d.committed = true // We may still fail, but we are done sending to imageLoadGoroutine. + + logrus.Debugf("docker-daemon: Waiting for status") + select { + case <-ctx.Done(): + return ctx.Err() + case err := <-d.statusChannel: + return err + } +} diff --git a/vendor/github.com/containers/image/v5/docker/daemon/daemon_src.go b/vendor/github.com/containers/image/v5/docker/daemon/daemon_src.go new file mode 100644 index 00000000000..a6d8a6cf587 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/daemon/daemon_src.go @@ -0,0 +1,53 @@ +package daemon + +import ( + "context" + + "github.com/containers/image/v5/docker/internal/tarfile" + "github.com/containers/image/v5/types" + "github.com/pkg/errors" +) + +type daemonImageSource struct { + ref daemonReference + *tarfile.Source // Implements most of types.ImageSource +} + +// newImageSource returns a types.ImageSource for the specified image reference. +// The caller must call .Close() on the returned ImageSource. +// +// It would be great if we were able to stream the input tar as it is being +// sent; but Docker sends the top-level manifest, which determines which paths +// to look for, at the end, so in we will need to seek back and re-read, several times. +// (We could, perhaps, expect an exact sequence, assume that the first plaintext file +// is the config, and that the following len(RootFS) files are the layers, but that feels +// way too brittle.) +func newImageSource(ctx context.Context, sys *types.SystemContext, ref daemonReference) (types.ImageSource, error) { + c, err := newDockerClient(sys) + if err != nil { + return nil, errors.Wrap(err, "initializing docker engine client") + } + // Per NewReference(), ref.StringWithinTransport() is either an image ID (config digest), or a !reference.NameOnly() reference. + // Either way ImageSave should create a tarball with exactly one image. + inputStream, err := c.ImageSave(ctx, []string{ref.StringWithinTransport()}) + if err != nil { + return nil, errors.Wrap(err, "loading image from docker engine") + } + defer inputStream.Close() + + archive, err := tarfile.NewReaderFromStream(sys, inputStream) + if err != nil { + return nil, err + } + src := tarfile.NewSource(archive, true, nil, -1) + return &daemonImageSource{ + ref: ref, + Source: src, + }, nil +} + +// Reference returns the reference used to set up this source, _as specified by the user_ +// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. +func (s *daemonImageSource) Reference() types.ImageReference { + return s.ref +} diff --git a/vendor/github.com/containers/image/v5/docker/daemon/daemon_transport.go b/vendor/github.com/containers/image/v5/docker/daemon/daemon_transport.go new file mode 100644 index 00000000000..4e4ed688148 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/daemon/daemon_transport.go @@ -0,0 +1,223 @@ +package daemon + +import ( + "context" + "fmt" + + "github.com/containers/image/v5/docker/policyconfiguration" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/image" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +func init() { + transports.Register(Transport) +} + +// Transport is an ImageTransport for images managed by a local Docker daemon. +var Transport = daemonTransport{} + +type daemonTransport struct{} + +// Name returns the name of the transport, which must be unique among other transports. +func (t daemonTransport) Name() string { + return "docker-daemon" +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. +func (t daemonTransport) ParseReference(reference string) (types.ImageReference, error) { + return ParseReference(reference) +} + +// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys +// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). +// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. +// scope passed to this function will not be "", that value is always allowed. +func (t daemonTransport) ValidatePolicyConfigurationScope(scope string) error { + // ID values cannot be effectively namespaced, and are clearly invalid host:port values. + if _, err := digest.Parse(scope); err == nil { + return errors.Errorf(`docker-daemon: can not use algo:digest value %s as a namespace`, scope) + } + + // FIXME? We could be verifying the various character set and length restrictions + // from docker/distribution/reference.regexp.go, but other than that there + // are few semantically invalid strings. + return nil +} + +// daemonReference is an ImageReference for images managed by a local Docker daemon +// Exactly one of id and ref can be set. +// For daemonImageSource, both id and ref are acceptable, ref must not be a NameOnly (interpreted as all tags in that repository by the daemon) +// For daemonImageDestination, it must be a ref, which is NamedTagged. +// (We could, in principle, also allow storing images without tagging them, and the user would have to refer to them using the docker image ID = config digest. +// Using the config digest requires the caller to parse the manifest themselves, which is very cumbersome; so, for now, we don’t bother.) +type daemonReference struct { + id digest.Digest + ref reference.Named // !reference.IsNameOnly +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. +func ParseReference(refString string) (types.ImageReference, error) { + // This is intended to be compatible with reference.ParseAnyReference, but more strict about refusing some of the ambiguous cases. + // In particular, this rejects unprefixed digest values (64 hex chars), and sha256 digest prefixes (sha256:fewer-than-64-hex-chars). + + // digest:hexstring is structurally the same as a reponame:tag (meaning docker.io/library/reponame:tag). + // reference.ParseAnyReference interprets such strings as digests. + if dgst, err := digest.Parse(refString); err == nil { + // The daemon explicitly refuses to tag images with a reponame equal to digest.Canonical - but _only_ this digest name. + // Other digest references are ambiguous, so refuse them. + if dgst.Algorithm() != digest.Canonical { + return nil, errors.Errorf("Invalid docker-daemon: reference %s: only digest algorithm %s accepted", refString, digest.Canonical) + } + return NewReference(dgst, nil) + } + + ref, err := reference.ParseNormalizedNamed(refString) // This also rejects unprefixed digest values + if err != nil { + return nil, err + } + if reference.FamiliarName(ref) == digest.Canonical.String() { + return nil, errors.Errorf("Invalid docker-daemon: reference %s: The %s repository name is reserved for (non-shortened) digest references", refString, digest.Canonical) + } + return NewReference("", ref) +} + +// NewReference returns a docker-daemon reference for either the supplied image ID (config digest) or the supplied reference (which must satisfy !reference.IsNameOnly) +func NewReference(id digest.Digest, ref reference.Named) (types.ImageReference, error) { + if id != "" && ref != nil { + return nil, errors.New("docker-daemon: reference must not have an image ID and a reference string specified at the same time") + } + if ref != nil { + if reference.IsNameOnly(ref) { + return nil, errors.Errorf("docker-daemon: reference %s has neither a tag nor a digest", reference.FamiliarString(ref)) + } + // A github.com/distribution/reference value can have a tag and a digest at the same time! + // Most versions of docker/reference do not handle that (ignoring the tag), so reject such input. + // This MAY be accepted in the future. + // (Even if it were supported, the semantics of policy namespaces are unclear - should we drop + // the tag or the digest first?) + _, isTagged := ref.(reference.NamedTagged) + _, isDigested := ref.(reference.Canonical) + if isTagged && isDigested { + return nil, errors.Errorf("docker-daemon: references with both a tag and digest are currently not supported") + } + } + return daemonReference{ + id: id, + ref: ref, + }, nil +} + +func (ref daemonReference) Transport() types.ImageTransport { + return Transport +} + +// StringWithinTransport returns a string representation of the reference, which MUST be such that +// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. +// NOTE: The returned string is not promised to be equal to the original input to ParseReference; +// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. +// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix; +// instead, see transports.ImageName(). +func (ref daemonReference) StringWithinTransport() string { + switch { + case ref.id != "": + return ref.id.String() + case ref.ref != nil: + return reference.FamiliarString(ref.ref) + default: // Coverage: Should never happen, NewReference above should refuse such values. + panic("Internal inconsistency: daemonReference has empty id and nil ref") + } +} + +// DockerReference returns a Docker reference associated with this reference +// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, +// not e.g. after redirect or alias processing), or nil if unknown/not applicable. +func (ref daemonReference) DockerReference() reference.Named { + return ref.ref // May be nil +} + +// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. +// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; +// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical +// (i.e. various references with exactly the same semantics should return the same configuration identity) +// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but +// not required/guaranteed that it will be a valid input to Transport().ParseReference(). +// Returns "" if configuration identities for these references are not supported. +func (ref daemonReference) PolicyConfigurationIdentity() string { + // We must allow referring to images in the daemon by image ID, otherwise untagged images would not be accessible. + // But the existence of image IDs means that we can’t truly well namespace the input: + // a single image can be namespaced either using the name or the ID depending on how it is named. + // + // That’s fairly unexpected, but we have to cope somehow. + // + // So, use the ordinary docker/policyconfiguration namespacing for named images. + // image IDs all fall into the root namespace. + // Users can set up the root namespace to be either untrusted or rejected, + // and to set up specific trust for named namespaces. This allows verifying image + // identity when a name is known, and unnamed images would be untrusted or rejected. + switch { + case ref.id != "": + return "" // This still allows using the default "" scope to define a global policy for ID-identified images. + case ref.ref != nil: + res, err := policyconfiguration.DockerReferenceIdentity(ref.ref) + if res == "" || err != nil { // Coverage: Should never happen, NewReference above should refuse values which could cause a failure. + panic(fmt.Sprintf("Internal inconsistency: policyconfiguration.DockerReferenceIdentity returned %#v, %v", res, err)) + } + return res + default: // Coverage: Should never happen, NewReference above should refuse such values. + panic("Internal inconsistency: daemonReference has empty id and nil ref") + } +} + +// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search +// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed +// in order, terminating on first match, and an implicit "" is always checked at the end. +// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), +// and each following element to be a prefix of the element preceding it. +func (ref daemonReference) PolicyConfigurationNamespaces() []string { + // See the explanation in daemonReference.PolicyConfigurationIdentity. + switch { + case ref.id != "": + return []string{} + case ref.ref != nil: + return policyconfiguration.DockerReferenceNamespaces(ref.ref) + default: // Coverage: Should never happen, NewReference above should refuse such values. + panic("Internal inconsistency: daemonReference has empty id and nil ref") + } +} + +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (ref daemonReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + src, err := newImageSource(ctx, sys, ref) + if err != nil { + return nil, err + } + return image.FromSource(ctx, sys, src) +} + +// NewImageSource returns a types.ImageSource for this reference. +// The caller must call .Close() on the returned ImageSource. +func (ref daemonReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { + return newImageSource(ctx, sys, ref) +} + +// NewImageDestination returns a types.ImageDestination for this reference. +// The caller must call .Close() on the returned ImageDestination. +func (ref daemonReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { + return newImageDestination(ctx, sys, ref) +} + +// DeleteImage deletes the named image from the registry, if supported. +func (ref daemonReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + // Should this just untag the image? Should this stop running containers? + // The semantics is not quite as clear as for remote repositories. + // The user can run (docker rmi) directly anyway, so, for now(?), punt instead of trying to guess what the user meant. + return errors.Errorf("Deleting images not implemented for docker-daemon: images") +} diff --git a/vendor/github.com/containers/image/v5/docker/docker_client.go b/vendor/github.com/containers/image/v5/docker/docker_client.go new file mode 100644 index 00000000000..833323b424a --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/docker_client.go @@ -0,0 +1,820 @@ +package docker + +import ( + "bytes" + "context" + "crypto/tls" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "path/filepath" + "strconv" + "strings" + "sync" + "time" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/iolimits" + "github.com/containers/image/v5/pkg/docker/config" + "github.com/containers/image/v5/pkg/sysregistriesv2" + "github.com/containers/image/v5/pkg/tlsclientconfig" + "github.com/containers/image/v5/types" + "github.com/containers/image/v5/version" + "github.com/containers/storage/pkg/homedir" + clientLib "github.com/docker/distribution/registry/client" + "github.com/docker/go-connections/tlsconfig" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const ( + dockerHostname = "docker.io" + dockerV1Hostname = "index.docker.io" + dockerRegistry = "registry-1.docker.io" + + resolvedPingV2URL = "%s://%s/v2/" + resolvedPingV1URL = "%s://%s/v1/_ping" + tagsPath = "/v2/%s/tags/list" + manifestPath = "/v2/%s/manifests/%s" + blobsPath = "/v2/%s/blobs/%s" + blobUploadPath = "/v2/%s/blobs/uploads/" + extensionsSignaturePath = "/extensions/v2/%s/signatures/%s" + + minimumTokenLifetimeSeconds = 60 + + extensionSignatureSchemaVersion = 2 // extensionSignature.Version + extensionSignatureTypeAtomic = "atomic" // extensionSignature.Type + + backoffNumIterations = 5 + backoffInitialDelay = 2 * time.Second + backoffMaxDelay = 60 * time.Second +) + +type certPath struct { + path string + absolute bool +} + +var ( + homeCertDir = filepath.FromSlash(".config/containers/certs.d") + perHostCertDirs = []certPath{ + {path: "/etc/containers/certs.d", absolute: true}, + {path: "/etc/docker/certs.d", absolute: true}, + } + + defaultUserAgent = "containers/" + version.Version + " (github.com/containers/image)" +) + +// extensionSignature and extensionSignatureList come from github.com/openshift/origin/pkg/dockerregistry/server/signaturedispatcher.go: +// signature represents a Docker image signature. +type extensionSignature struct { + Version int `json:"schemaVersion"` // Version specifies the schema version + Name string `json:"name"` // Name must be in "sha256:@signatureName" format + Type string `json:"type"` // Type is optional, of not set it will be defaulted to "AtomicImageV1" + Content []byte `json:"content"` // Content contains the signature +} + +// signatureList represents list of Docker image signatures. +type extensionSignatureList struct { + Signatures []extensionSignature `json:"signatures"` +} + +type bearerToken struct { + Token string `json:"token"` + AccessToken string `json:"access_token"` + ExpiresIn int `json:"expires_in"` + IssuedAt time.Time `json:"issued_at"` + expirationTime time.Time +} + +// dockerClient is configuration for dealing with a single container registry. +type dockerClient struct { + // The following members are set by newDockerClient and do not change afterwards. + sys *types.SystemContext + registry string + userAgent string + + // tlsClientConfig is setup by newDockerClient and will be used and updated + // by detectProperties(). Callers can edit tlsClientConfig.InsecureSkipVerify in the meantime. + tlsClientConfig *tls.Config + // The following members are not set by newDockerClient and must be set by callers if needed. + auth types.DockerAuthConfig + registryToken string + signatureBase signatureStorageBase + scope authScope + + // The following members are detected registry properties: + // They are set after a successful detectProperties(), and never change afterwards. + client *http.Client + scheme string + challenges []challenge + supportsSignatures bool + + // Private state for setupRequestAuth (key: string, value: bearerToken) + tokenCache sync.Map + // Private state for detectProperties: + detectPropertiesOnce sync.Once // detectPropertiesOnce is used to execute detectProperties() at most once. + detectPropertiesError error // detectPropertiesError caches the initial error. +} + +type authScope struct { + remoteName string + actions string +} + +// sendAuth determines whether we need authentication for v2 or v1 endpoint. +type sendAuth int + +const ( + // v2 endpoint with authentication. + v2Auth sendAuth = iota + // v1 endpoint with authentication. + // TODO: Get v1Auth working + // v1Auth + // no authentication, works for both v1 and v2. + noAuth +) + +func newBearerTokenFromJSONBlob(blob []byte) (*bearerToken, error) { + token := new(bearerToken) + if err := json.Unmarshal(blob, &token); err != nil { + return nil, err + } + if token.Token == "" { + token.Token = token.AccessToken + } + if token.ExpiresIn < minimumTokenLifetimeSeconds { + token.ExpiresIn = minimumTokenLifetimeSeconds + logrus.Debugf("Increasing token expiration to: %d seconds", token.ExpiresIn) + } + if token.IssuedAt.IsZero() { + token.IssuedAt = time.Now().UTC() + } + token.expirationTime = token.IssuedAt.Add(time.Duration(token.ExpiresIn) * time.Second) + return token, nil +} + +// this is cloned from docker/go-connections because upstream docker has changed +// it and make deps here fails otherwise. +// We'll drop this once we upgrade to docker 1.13.x deps. +func serverDefault() *tls.Config { + return &tls.Config{ + // Avoid fallback to SSL protocols < TLS1.0 + MinVersion: tls.VersionTLS10, + PreferServerCipherSuites: true, + CipherSuites: tlsconfig.DefaultServerAcceptedCiphers, + } +} + +// dockerCertDir returns a path to a directory to be consumed by tlsclientconfig.SetupCertificates() depending on ctx and hostPort. +func dockerCertDir(sys *types.SystemContext, hostPort string) (string, error) { + if sys != nil && sys.DockerCertPath != "" { + return sys.DockerCertPath, nil + } + if sys != nil && sys.DockerPerHostCertDirPath != "" { + return filepath.Join(sys.DockerPerHostCertDirPath, hostPort), nil + } + + var ( + hostCertDir string + fullCertDirPath string + ) + + for _, perHostCertDir := range append([]certPath{{path: filepath.Join(homedir.Get(), homeCertDir), absolute: false}}, perHostCertDirs...) { + if sys != nil && sys.RootForImplicitAbsolutePaths != "" && perHostCertDir.absolute { + hostCertDir = filepath.Join(sys.RootForImplicitAbsolutePaths, perHostCertDir.path) + } else { + hostCertDir = perHostCertDir.path + } + + fullCertDirPath = filepath.Join(hostCertDir, hostPort) + _, err := os.Stat(fullCertDirPath) + if err == nil { + break + } + if os.IsNotExist(err) { + continue + } + if os.IsPermission(err) { + logrus.Debugf("error accessing certs directory due to permissions: %v", err) + continue + } + return "", err + } + return fullCertDirPath, nil +} + +// newDockerClientFromRef returns a new dockerClient instance for refHostname (a host a specified in the Docker image reference, not canonicalized to dockerRegistry) +// “write” specifies whether the client will be used for "write" access (in particular passed to lookaside.go:toplevelFromSection) +// signatureBase is always set in the return value +func newDockerClientFromRef(sys *types.SystemContext, ref dockerReference, write bool, actions string) (*dockerClient, error) { + auth, err := config.GetCredentialsForRef(sys, ref.ref) + if err != nil { + return nil, errors.Wrapf(err, "getting username and password") + } + + sigBase, err := SignatureStorageBaseURL(sys, ref, write) + if err != nil { + return nil, err + } + + registry := reference.Domain(ref.ref) + client, err := newDockerClient(sys, registry, ref.ref.Name()) + if err != nil { + return nil, err + } + client.auth = auth + if sys != nil { + client.registryToken = sys.DockerBearerRegistryToken + } + client.signatureBase = sigBase + client.scope.actions = actions + client.scope.remoteName = reference.Path(ref.ref) + return client, nil +} + +// newDockerClient returns a new dockerClient instance for the given registry +// and reference. The reference is used to query the registry configuration +// and can either be a registry (e.g, "registry.com[:5000]"), a repository +// (e.g., "registry.com[:5000][/some/namespace]/repo"). +// Please note that newDockerClient does not set all members of dockerClient +// (e.g., username and password); those must be set by callers if necessary. +func newDockerClient(sys *types.SystemContext, registry, reference string) (*dockerClient, error) { + hostName := registry + if registry == dockerHostname { + registry = dockerRegistry + } + tlsClientConfig := serverDefault() + + // It is undefined whether the host[:port] string for dockerHostname should be dockerHostname or dockerRegistry, + // because docker/docker does not read the certs.d subdirectory at all in that case. We use the user-visible + // dockerHostname here, because it is more symmetrical to read the configuration in that case as well, and because + // generally the UI hides the existence of the different dockerRegistry. But note that this behavior is + // undocumented and may change if docker/docker changes. + certDir, err := dockerCertDir(sys, hostName) + if err != nil { + return nil, err + } + if err := tlsclientconfig.SetupCertificates(certDir, tlsClientConfig); err != nil { + return nil, err + } + + // Check if TLS verification shall be skipped (default=false) which can + // be specified in the sysregistriesv2 configuration. + skipVerify := false + reg, err := sysregistriesv2.FindRegistry(sys, reference) + if err != nil { + return nil, errors.Wrapf(err, "loading registries") + } + if reg != nil { + if reg.Blocked { + return nil, fmt.Errorf("registry %s is blocked in %s or %s", reg.Prefix, sysregistriesv2.ConfigPath(sys), sysregistriesv2.ConfigDirPath(sys)) + } + skipVerify = reg.Insecure + } + tlsClientConfig.InsecureSkipVerify = skipVerify + + userAgent := defaultUserAgent + if sys != nil && sys.DockerRegistryUserAgent != "" { + userAgent = sys.DockerRegistryUserAgent + } + + return &dockerClient{ + sys: sys, + registry: registry, + userAgent: userAgent, + tlsClientConfig: tlsClientConfig, + }, nil +} + +// CheckAuth validates the credentials by attempting to log into the registry +// returns an error if an error occurred while making the http request or the status code received was 401 +func CheckAuth(ctx context.Context, sys *types.SystemContext, username, password, registry string) error { + client, err := newDockerClient(sys, registry, registry) + if err != nil { + return errors.Wrapf(err, "creating new docker client") + } + client.auth = types.DockerAuthConfig{ + Username: username, + Password: password, + } + + resp, err := client.makeRequest(ctx, http.MethodGet, "/v2/", nil, nil, v2Auth, nil) + if err != nil { + return err + } + defer resp.Body.Close() + + return httpResponseToError(resp, "") +} + +// SearchResult holds the information of each matching image +// It matches the output returned by the v1 endpoint +type SearchResult struct { + Name string `json:"name"` + Description string `json:"description"` + // StarCount states the number of stars the image has + StarCount int `json:"star_count"` + IsTrusted bool `json:"is_trusted"` + // IsAutomated states whether the image is an automated build + IsAutomated bool `json:"is_automated"` + // IsOfficial states whether the image is an official build + IsOfficial bool `json:"is_official"` +} + +// SearchRegistry queries a registry for images that contain "image" in their name +// The limit is the max number of results desired +// Note: The limit value doesn't work with all registries +// for example registry.access.redhat.com returns all the results without limiting it to the limit value +func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, image string, limit int) ([]SearchResult, error) { + type V2Results struct { + // Repositories holds the results returned by the /v2/_catalog endpoint + Repositories []string `json:"repositories"` + } + type V1Results struct { + // Results holds the results returned by the /v1/search endpoint + Results []SearchResult `json:"results"` + } + v1Res := &V1Results{} + + // Get credentials from authfile for the underlying hostname + // We can't use GetCredentialsForRef here because we want to search the whole registry. + auth, err := config.GetCredentials(sys, registry) + if err != nil { + return nil, errors.Wrapf(err, "getting username and password") + } + + // The /v2/_catalog endpoint has been disabled for docker.io therefore + // the call made to that endpoint will fail. So using the v1 hostname + // for docker.io for simplicity of implementation and the fact that it + // returns search results. + hostname := registry + if registry == dockerHostname { + hostname = dockerV1Hostname + } + + client, err := newDockerClient(sys, hostname, registry) + if err != nil { + return nil, errors.Wrapf(err, "creating new docker client") + } + client.auth = auth + if sys != nil { + client.registryToken = sys.DockerBearerRegistryToken + } + + // Only try the v1 search endpoint if the search query is not empty. If it is + // empty skip to the v2 endpoint. + if image != "" { + // set up the query values for the v1 endpoint + u := url.URL{ + Path: "/v1/search", + } + q := u.Query() + q.Set("q", image) + q.Set("n", strconv.Itoa(limit)) + u.RawQuery = q.Encode() + + logrus.Debugf("trying to talk to v1 search endpoint") + resp, err := client.makeRequest(ctx, http.MethodGet, u.String(), nil, nil, noAuth, nil) + if err != nil { + logrus.Debugf("error getting search results from v1 endpoint %q: %v", registry, err) + } else { + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + logrus.Debugf("error getting search results from v1 endpoint %q: %v", registry, httpResponseToError(resp, "")) + } else { + if err := json.NewDecoder(resp.Body).Decode(v1Res); err != nil { + return nil, err + } + return v1Res.Results, nil + } + } + } + + logrus.Debugf("trying to talk to v2 search endpoint") + searchRes := []SearchResult{} + path := "/v2/_catalog" + for len(searchRes) < limit { + resp, err := client.makeRequest(ctx, http.MethodGet, path, nil, nil, v2Auth, nil) + if err != nil { + logrus.Debugf("error getting search results from v2 endpoint %q: %v", registry, err) + return nil, errors.Wrapf(err, "couldn't search registry %q", registry) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + err := httpResponseToError(resp, "") + logrus.Errorf("error getting search results from v2 endpoint %q: %v", registry, err) + return nil, errors.Wrapf(err, "couldn't search registry %q", registry) + } + v2Res := &V2Results{} + if err := json.NewDecoder(resp.Body).Decode(v2Res); err != nil { + return nil, err + } + + for _, repo := range v2Res.Repositories { + if len(searchRes) == limit { + break + } + if strings.Contains(repo, image) { + res := SearchResult{ + Name: repo, + } + // bugzilla.redhat.com/show_bug.cgi?id=1976283 + // If we have a full match, make sure it's listed as the first result. + // (Note there might be a full match we never see if we reach the result limit first.) + if repo == image { + searchRes = append([]SearchResult{res}, searchRes...) + } else { + searchRes = append(searchRes, res) + } + } + } + + link := resp.Header.Get("Link") + if link == "" { + break + } + linkURLStr := strings.Trim(strings.Split(link, ";")[0], "<>") + linkURL, err := url.Parse(linkURLStr) + if err != nil { + return searchRes, err + } + + // can be relative or absolute, but we only want the path (and I + // guess we're in trouble if it forwards to a new place...) + path = linkURL.Path + if linkURL.RawQuery != "" { + path += "?" + path += linkURL.RawQuery + } + } + return searchRes, nil +} + +// makeRequest creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client. +// The host name and schema is taken from the client or autodetected, and the path is relative to it, i.e. the path usually starts with /v2/. +func (c *dockerClient) makeRequest(ctx context.Context, method, path string, headers map[string][]string, stream io.Reader, auth sendAuth, extraScope *authScope) (*http.Response, error) { + if err := c.detectProperties(ctx); err != nil { + return nil, err + } + + url := fmt.Sprintf("%s://%s%s", c.scheme, c.registry, path) + return c.makeRequestToResolvedURL(ctx, method, url, headers, stream, -1, auth, extraScope) +} + +// parseRetryAfter determines the delay required by the "Retry-After" header in res and returns it, +// silently falling back to fallbackDelay if the header is missing or invalid. +func parseRetryAfter(res *http.Response, fallbackDelay time.Duration) time.Duration { + after := res.Header.Get("Retry-After") + if after == "" { + return fallbackDelay + } + logrus.Debugf("Detected 'Retry-After' header %q", after) + // First, check if we have a numerical value. + if num, err := strconv.ParseInt(after, 10, 64); err == nil { + return time.Duration(num) * time.Second + } + // Second, check if we have an HTTP date. + // If the delta between the date and now is positive, use it. + // Otherwise, fall back to using the default exponential back off. + if t, err := http.ParseTime(after); err == nil { + delta := time.Until(t) + if delta > 0 { + return delta + } + logrus.Debugf("Retry-After date in the past, ignoring it") + return fallbackDelay + } + // If the header contents are bogus, fall back to using the default exponential back off. + logrus.Debugf("Invalid Retry-After format, ignoring it") + return fallbackDelay +} + +// makeRequestToResolvedURL creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client. +// streamLen, if not -1, specifies the length of the data expected on stream. +// makeRequest should generally be preferred. +// In case of an HTTP 429 status code in the response, it may automatically retry a few times. +// TODO(runcom): too many arguments here, use a struct +func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method, url string, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth, extraScope *authScope) (*http.Response, error) { + delay := backoffInitialDelay + attempts := 0 + for { + res, err := c.makeRequestToResolvedURLOnce(ctx, method, url, headers, stream, streamLen, auth, extraScope) + attempts++ + if res == nil || res.StatusCode != http.StatusTooManyRequests || // Only retry on StatusTooManyRequests, success or other failure is returned to caller immediately + stream != nil || // We can't retry with a body (which is not restartable in the general case) + attempts == backoffNumIterations { + return res, err + } + // close response body before retry or context done + res.Body.Close() + + delay = parseRetryAfter(res, delay) + if delay > backoffMaxDelay { + delay = backoffMaxDelay + } + logrus.Debugf("Too many requests to %s: sleeping for %f seconds before next attempt", url, delay.Seconds()) + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-time.After(delay): + // Nothing + } + delay = delay * 2 // exponential back off + } +} + +// makeRequestToResolvedURLOnce creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client. +// streamLen, if not -1, specifies the length of the data expected on stream. +// makeRequest should generally be preferred. +// Note that no exponential back off is performed when receiving an http 429 status code. +func (c *dockerClient) makeRequestToResolvedURLOnce(ctx context.Context, method, url string, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth, extraScope *authScope) (*http.Response, error) { + req, err := http.NewRequestWithContext(ctx, method, url, stream) + if err != nil { + return nil, err + } + if streamLen != -1 { // Do not blindly overwrite if streamLen == -1, http.NewRequest above can figure out the length of bytes.Reader and similar objects without us having to compute it. + req.ContentLength = streamLen + } + req.Header.Set("Docker-Distribution-API-Version", "registry/2.0") + for n, h := range headers { + for _, hh := range h { + req.Header.Add(n, hh) + } + } + req.Header.Add("User-Agent", c.userAgent) + if auth == v2Auth { + if err := c.setupRequestAuth(req, extraScope); err != nil { + return nil, err + } + } + logrus.Debugf("%s %s", method, url) + res, err := c.client.Do(req) + if err != nil { + return nil, err + } + return res, nil +} + +// we're using the challenges from the /v2/ ping response and not the one from the destination +// URL in this request because: +// +// 1) docker does that as well +// 2) gcr.io is sending 401 without a WWW-Authenticate header in the real request +// +// debugging: https://github.com/containers/image/pull/211#issuecomment-273426236 and follows up +func (c *dockerClient) setupRequestAuth(req *http.Request, extraScope *authScope) error { + if len(c.challenges) == 0 { + return nil + } + schemeNames := make([]string, 0, len(c.challenges)) + for _, challenge := range c.challenges { + schemeNames = append(schemeNames, challenge.Scheme) + switch challenge.Scheme { + case "basic": + req.SetBasicAuth(c.auth.Username, c.auth.Password) + return nil + case "bearer": + registryToken := c.registryToken + if registryToken == "" { + cacheKey := "" + scopes := []authScope{c.scope} + if extraScope != nil { + // Using ':' as a separator here is unambiguous because getBearerToken below uses the same separator when formatting a remote request (and because repository names can't contain colons). + cacheKey = fmt.Sprintf("%s:%s", extraScope.remoteName, extraScope.actions) + scopes = append(scopes, *extraScope) + } + var token bearerToken + t, inCache := c.tokenCache.Load(cacheKey) + if inCache { + token = t.(bearerToken) + } + if !inCache || time.Now().After(token.expirationTime) { + var ( + t *bearerToken + err error + ) + if c.auth.IdentityToken != "" { + t, err = c.getBearerTokenOAuth2(req.Context(), challenge, scopes) + } else { + t, err = c.getBearerToken(req.Context(), challenge, scopes) + } + if err != nil { + return err + } + + token = *t + c.tokenCache.Store(cacheKey, token) + } + registryToken = token.Token + } + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", registryToken)) + return nil + default: + logrus.Debugf("no handler for %s authentication", challenge.Scheme) + } + } + logrus.Infof("None of the challenges sent by server (%s) are supported, trying an unauthenticated request anyway", strings.Join(schemeNames, ", ")) + return nil +} + +func (c *dockerClient) getBearerTokenOAuth2(ctx context.Context, challenge challenge, + scopes []authScope) (*bearerToken, error) { + realm, ok := challenge.Parameters["realm"] + if !ok { + return nil, errors.Errorf("missing realm in bearer auth challenge") + } + + authReq, err := http.NewRequestWithContext(ctx, http.MethodPost, realm, nil) + if err != nil { + return nil, err + } + + // Make the form data required against the oauth2 authentication + // More details here: https://docs.docker.com/registry/spec/auth/oauth/ + params := authReq.URL.Query() + if service, ok := challenge.Parameters["service"]; ok && service != "" { + params.Add("service", service) + } + for _, scope := range scopes { + if scope.remoteName != "" && scope.actions != "" { + params.Add("scope", fmt.Sprintf("repository:%s:%s", scope.remoteName, scope.actions)) + } + } + params.Add("grant_type", "refresh_token") + params.Add("refresh_token", c.auth.IdentityToken) + params.Add("client_id", "containers/image") + + authReq.Body = ioutil.NopCloser(bytes.NewBufferString(params.Encode())) + authReq.Header.Add("User-Agent", c.userAgent) + authReq.Header.Add("Content-Type", "application/x-www-form-urlencoded") + logrus.Debugf("%s %s", authReq.Method, authReq.URL.String()) + res, err := c.client.Do(authReq) + if err != nil { + return nil, err + } + defer res.Body.Close() + if err := httpResponseToError(res, "Trying to obtain access token"); err != nil { + return nil, err + } + + tokenBlob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxAuthTokenBodySize) + if err != nil { + return nil, err + } + + return newBearerTokenFromJSONBlob(tokenBlob) +} + +func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge, + scopes []authScope) (*bearerToken, error) { + realm, ok := challenge.Parameters["realm"] + if !ok { + return nil, errors.Errorf("missing realm in bearer auth challenge") + } + + authReq, err := http.NewRequestWithContext(ctx, http.MethodGet, realm, nil) + if err != nil { + return nil, err + } + + params := authReq.URL.Query() + if c.auth.Username != "" { + params.Add("account", c.auth.Username) + } + + if service, ok := challenge.Parameters["service"]; ok && service != "" { + params.Add("service", service) + } + + for _, scope := range scopes { + if scope.remoteName != "" && scope.actions != "" { + params.Add("scope", fmt.Sprintf("repository:%s:%s", scope.remoteName, scope.actions)) + } + } + + authReq.URL.RawQuery = params.Encode() + + if c.auth.Username != "" && c.auth.Password != "" { + authReq.SetBasicAuth(c.auth.Username, c.auth.Password) + } + authReq.Header.Add("User-Agent", c.userAgent) + + logrus.Debugf("%s %s", authReq.Method, authReq.URL.String()) + res, err := c.client.Do(authReq) + if err != nil { + return nil, err + } + defer res.Body.Close() + if err := httpResponseToError(res, "Requesting bearer token"); err != nil { + return nil, err + } + tokenBlob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxAuthTokenBodySize) + if err != nil { + return nil, err + } + + return newBearerTokenFromJSONBlob(tokenBlob) +} + +// detectPropertiesHelper performs the work of detectProperties which executes +// it at most once. +func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error { + // We overwrite the TLS clients `InsecureSkipVerify` only if explicitly + // specified by the system context + if c.sys != nil && c.sys.DockerInsecureSkipTLSVerify != types.OptionalBoolUndefined { + c.tlsClientConfig.InsecureSkipVerify = c.sys.DockerInsecureSkipTLSVerify == types.OptionalBoolTrue + } + tr := tlsclientconfig.NewTransport() + tr.TLSClientConfig = c.tlsClientConfig + c.client = &http.Client{Transport: tr} + + ping := func(scheme string) error { + url := fmt.Sprintf(resolvedPingV2URL, scheme, c.registry) + resp, err := c.makeRequestToResolvedURL(ctx, http.MethodGet, url, nil, nil, -1, noAuth, nil) + if err != nil { + logrus.Debugf("Ping %s err %s (%#v)", url, err.Error(), err) + return err + } + defer resp.Body.Close() + logrus.Debugf("Ping %s status %d", url, resp.StatusCode) + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized { + return httpResponseToError(resp, "") + } + c.challenges = parseAuthHeader(resp.Header) + c.scheme = scheme + c.supportsSignatures = resp.Header.Get("X-Registry-Supports-Signatures") == "1" + return nil + } + err := ping("https") + if err != nil && c.tlsClientConfig.InsecureSkipVerify { + err = ping("http") + } + if err != nil { + err = errors.Wrapf(err, "pinging container registry %s", c.registry) + if c.sys != nil && c.sys.DockerDisableV1Ping { + return err + } + // best effort to understand if we're talking to a V1 registry + pingV1 := func(scheme string) bool { + url := fmt.Sprintf(resolvedPingV1URL, scheme, c.registry) + resp, err := c.makeRequestToResolvedURL(ctx, http.MethodGet, url, nil, nil, -1, noAuth, nil) + if err != nil { + logrus.Debugf("Ping %s err %s (%#v)", url, err.Error(), err) + return false + } + defer resp.Body.Close() + logrus.Debugf("Ping %s status %d", url, resp.StatusCode) + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized { + return false + } + return true + } + isV1 := pingV1("https") + if !isV1 && c.tlsClientConfig.InsecureSkipVerify { + isV1 = pingV1("http") + } + if isV1 { + err = ErrV1NotSupported + } + } + return err +} + +// detectProperties detects various properties of the registry. +// See the dockerClient documentation for members which are affected by this. +func (c *dockerClient) detectProperties(ctx context.Context) error { + c.detectPropertiesOnce.Do(func() { c.detectPropertiesError = c.detectPropertiesHelper(ctx) }) + return c.detectPropertiesError +} + +// getExtensionsSignatures returns signatures from the X-Registry-Supports-Signatures API extension, +// using the original data structures. +func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerReference, manifestDigest digest.Digest) (*extensionSignatureList, error) { + path := fmt.Sprintf(extensionsSignaturePath, reference.Path(ref.ref), manifestDigest) + res, err := c.makeRequest(ctx, http.MethodGet, path, nil, nil, v2Auth, nil) + if err != nil { + return nil, err + } + defer res.Body.Close() + + if res.StatusCode != http.StatusOK { + return nil, errors.Wrapf(clientLib.HandleErrorResponse(res), "downloading signatures for %s in %s", manifestDigest, ref.ref.Name()) + } + + body, err := iolimits.ReadAtMost(res.Body, iolimits.MaxSignatureListBodySize) + if err != nil { + return nil, err + } + + var parsedBody extensionSignatureList + if err := json.Unmarshal(body, &parsedBody); err != nil { + return nil, errors.Wrapf(err, "decoding signature list") + } + return &parsedBody, nil +} diff --git a/vendor/github.com/containers/image/v5/docker/docker_image.go b/vendor/github.com/containers/image/v5/docker/docker_image.go new file mode 100644 index 00000000000..c84bb37d2ab --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/docker_image.go @@ -0,0 +1,153 @@ +package docker + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "net/url" + "strings" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/image" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +// Image is a Docker-specific implementation of types.ImageCloser with a few extra methods +// which are specific to Docker. +type Image struct { + types.ImageCloser + src *dockerImageSource +} + +// newImage returns a new Image interface type after setting up +// a client to the registry hosting the given image. +// The caller must call .Close() on the returned Image. +func newImage(ctx context.Context, sys *types.SystemContext, ref dockerReference) (types.ImageCloser, error) { + s, err := newImageSource(ctx, sys, ref) + if err != nil { + return nil, err + } + img, err := image.FromSource(ctx, sys, s) + if err != nil { + return nil, err + } + return &Image{ImageCloser: img, src: s}, nil +} + +// SourceRefFullName returns a fully expanded name for the repository this image is in. +func (i *Image) SourceRefFullName() string { + return i.src.logicalRef.ref.Name() +} + +// GetRepositoryTags list all tags available in the repository. The tag +// provided inside the ImageReference will be ignored. (This is a +// backward-compatible shim method which calls the module-level +// GetRepositoryTags) +func (i *Image) GetRepositoryTags(ctx context.Context) ([]string, error) { + return GetRepositoryTags(ctx, i.src.c.sys, i.src.logicalRef) +} + +// GetRepositoryTags list all tags available in the repository. The tag +// provided inside the ImageReference will be ignored. +func GetRepositoryTags(ctx context.Context, sys *types.SystemContext, ref types.ImageReference) ([]string, error) { + dr, ok := ref.(dockerReference) + if !ok { + return nil, errors.Errorf("ref must be a dockerReference") + } + + path := fmt.Sprintf(tagsPath, reference.Path(dr.ref)) + client, err := newDockerClientFromRef(sys, dr, false, "pull") + if err != nil { + return nil, errors.Wrap(err, "failed to create client") + } + + tags := make([]string, 0) + + for { + res, err := client.makeRequest(ctx, http.MethodGet, path, nil, nil, v2Auth, nil) + if err != nil { + return nil, err + } + defer res.Body.Close() + if err := httpResponseToError(res, "fetching tags list"); err != nil { + return nil, err + } + + var tagsHolder struct { + Tags []string + } + if err = json.NewDecoder(res.Body).Decode(&tagsHolder); err != nil { + return nil, err + } + tags = append(tags, tagsHolder.Tags...) + + link := res.Header.Get("Link") + if link == "" { + break + } + + linkURLStr := strings.Trim(strings.Split(link, ";")[0], "<>") + linkURL, err := url.Parse(linkURLStr) + if err != nil { + return tags, err + } + + // can be relative or absolute, but we only want the path (and I + // guess we're in trouble if it forwards to a new place...) + path = linkURL.Path + if linkURL.RawQuery != "" { + path += "?" + path += linkURL.RawQuery + } + } + return tags, nil +} + +// GetDigest returns the image's digest +// Use this to optimize and avoid use of an ImageSource based on the returned digest; +// if you are going to use an ImageSource anyway, it’s more efficient to create it first +// and compute the digest from the value returned by GetManifest. +// NOTE: Implemented to avoid Docker Hub API limits, and mirror configuration may be +// ignored (but may be implemented in the future) +func GetDigest(ctx context.Context, sys *types.SystemContext, ref types.ImageReference) (digest.Digest, error) { + dr, ok := ref.(dockerReference) + if !ok { + return "", errors.Errorf("ref must be a dockerReference") + } + + tagOrDigest, err := dr.tagOrDigest() + if err != nil { + return "", err + } + + client, err := newDockerClientFromRef(sys, dr, false, "pull") + if err != nil { + return "", errors.Wrap(err, "failed to create client") + } + + path := fmt.Sprintf(manifestPath, reference.Path(dr.ref), tagOrDigest) + headers := map[string][]string{ + "Accept": manifest.DefaultRequestedManifestMIMETypes, + } + + res, err := client.makeRequest(ctx, http.MethodHead, path, headers, nil, v2Auth, nil) + if err != nil { + return "", err + } + + defer res.Body.Close() + if res.StatusCode != http.StatusOK { + return "", errors.Wrapf(registryHTTPResponseToError(res), "reading digest %s in %s", tagOrDigest, dr.ref.Name()) + } + + dig, err := digest.Parse(res.Header.Get("Docker-Content-Digest")) + if err != nil { + return "", err + } + + return dig, nil +} diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go new file mode 100644 index 00000000000..e7af8f93d53 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go @@ -0,0 +1,706 @@ +package docker + +import ( + "bytes" + "context" + "crypto/rand" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "path/filepath" + "strings" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/blobinfocache" + "github.com/containers/image/v5/internal/putblobdigest" + "github.com/containers/image/v5/internal/streamdigest" + "github.com/containers/image/v5/internal/uploadreader" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" + "github.com/docker/distribution/registry/api/errcode" + v2 "github.com/docker/distribution/registry/api/v2" + "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +type dockerImageDestination struct { + ref dockerReference + c *dockerClient + // State + manifestDigest digest.Digest // or "" if not yet known. +} + +// newImageDestination creates a new ImageDestination for the specified image reference. +func newImageDestination(sys *types.SystemContext, ref dockerReference) (types.ImageDestination, error) { + c, err := newDockerClientFromRef(sys, ref, true, "pull,push") + if err != nil { + return nil, err + } + return &dockerImageDestination{ + ref: ref, + c: c, + }, nil +} + +// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, +// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. +func (d *dockerImageDestination) Reference() types.ImageReference { + return d.ref +} + +// Close removes resources associated with an initialized ImageDestination, if any. +func (d *dockerImageDestination) Close() error { + return nil +} + +func (d *dockerImageDestination) SupportedManifestMIMETypes() []string { + mimeTypes := []string{ + imgspecv1.MediaTypeImageManifest, + manifest.DockerV2Schema2MediaType, + imgspecv1.MediaTypeImageIndex, + manifest.DockerV2ListMediaType, + } + if d.c.sys == nil || !d.c.sys.DockerDisableDestSchema1MIMETypes { + mimeTypes = append(mimeTypes, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema1MediaType) + } + return mimeTypes +} + +// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. +// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. +func (d *dockerImageDestination) SupportsSignatures(ctx context.Context) error { + if err := d.c.detectProperties(ctx); err != nil { + return err + } + switch { + case d.c.supportsSignatures: + return nil + case d.c.signatureBase != nil: + return nil + default: + return errors.Errorf("Internal error: X-Registry-Supports-Signatures extension not supported, and lookaside should not be empty configuration") + } +} + +func (d *dockerImageDestination) DesiredLayerCompression() types.LayerCompression { + return types.Compress +} + +// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually +// uploaded to the image destination, true otherwise. +func (d *dockerImageDestination) AcceptsForeignLayerURLs() bool { + return true +} + +// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise. +func (d *dockerImageDestination) MustMatchRuntimeOS() bool { + return false +} + +// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), +// and would prefer to receive an unmodified manifest instead of one modified for the destination. +// Does not make a difference if Reference().DockerReference() is nil. +func (d *dockerImageDestination) IgnoresEmbeddedDockerReference() bool { + return false // We do want the manifest updated; older registry versions refuse manifests if the embedded reference does not match. +} + +// sizeCounter is an io.Writer which only counts the total size of its input. +type sizeCounter struct{ size int64 } + +func (c *sizeCounter) Write(p []byte) (n int, err error) { + c.size += int64(len(p)) + return len(p), nil +} + +// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. +func (d *dockerImageDestination) HasThreadSafePutBlob() bool { + return true +} + +// PutBlob writes contents of stream and returns data representing the result (with all data filled in). +// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. +// inputInfo.Size is the expected length of stream, if known. +// May update cache. +// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available +// to any other readers for download using the supplied digest. +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. +func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { + // If requested, precompute the blob digest to prevent uploading layers that already exist on the registry. + // This functionality is particularly useful when BlobInfoCache has not been populated with compressed digests, + // the source blob is uncompressed, and the destination blob is being compressed "on the fly". + if inputInfo.Digest == "" && d.c.sys.DockerRegistryPushPrecomputeDigests { + logrus.Debugf("Precomputing digest layer for %s", reference.Path(d.ref.ref)) + streamCopy, cleanup, err := streamdigest.ComputeBlobInfo(d.c.sys, stream, &inputInfo) + if err != nil { + return types.BlobInfo{}, err + } + defer cleanup() + stream = streamCopy + } + + if inputInfo.Digest != "" { + // This should not really be necessary, at least the copy code calls TryReusingBlob automatically. + // Still, we need to check, if only because the "initiate upload" endpoint does not have a documented "blob already exists" return value. + haveBlob, reusedInfo, err := d.tryReusingExactBlob(ctx, inputInfo, cache) + if err != nil { + return types.BlobInfo{}, err + } + if haveBlob { + return reusedInfo, nil + } + } + + // FIXME? Chunked upload, progress reporting, etc. + uploadPath := fmt.Sprintf(blobUploadPath, reference.Path(d.ref.ref)) + logrus.Debugf("Uploading %s", uploadPath) + res, err := d.c.makeRequest(ctx, http.MethodPost, uploadPath, nil, nil, v2Auth, nil) + if err != nil { + return types.BlobInfo{}, err + } + defer res.Body.Close() + if res.StatusCode != http.StatusAccepted { + logrus.Debugf("Error initiating layer upload, response %#v", *res) + return types.BlobInfo{}, errors.Wrapf(registryHTTPResponseToError(res), "initiating layer upload to %s in %s", uploadPath, d.c.registry) + } + uploadLocation, err := res.Location() + if err != nil { + return types.BlobInfo{}, errors.Wrap(err, "determining upload URL") + } + + digester, stream := putblobdigest.DigestIfCanonicalUnknown(stream, inputInfo) + sizeCounter := &sizeCounter{} + stream = io.TeeReader(stream, sizeCounter) + + uploadLocation, err = func() (*url.URL, error) { // A scope for defer + uploadReader := uploadreader.NewUploadReader(stream) + // This error text should never be user-visible, we terminate only after makeRequestToResolvedURL + // returns, so there isn’t a way for the error text to be provided to any of our callers. + defer uploadReader.Terminate(errors.New("Reading data from an already terminated upload")) + res, err = d.c.makeRequestToResolvedURL(ctx, http.MethodPatch, uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, uploadReader, inputInfo.Size, v2Auth, nil) + if err != nil { + logrus.Debugf("Error uploading layer chunked %v", err) + return nil, err + } + defer res.Body.Close() + if !successStatus(res.StatusCode) { + return nil, errors.Wrapf(registryHTTPResponseToError(res), "uploading layer chunked") + } + uploadLocation, err := res.Location() + if err != nil { + return nil, errors.Wrap(err, "determining upload URL") + } + return uploadLocation, nil + }() + if err != nil { + return types.BlobInfo{}, err + } + blobDigest := digester.Digest() + + // FIXME: DELETE uploadLocation on failure (does not really work in docker/distribution servers, which incorrectly require the "delete" action in the token's scope) + + locationQuery := uploadLocation.Query() + locationQuery.Set("digest", blobDigest.String()) + uploadLocation.RawQuery = locationQuery.Encode() + res, err = d.c.makeRequestToResolvedURL(ctx, http.MethodPut, uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, nil, -1, v2Auth, nil) + if err != nil { + return types.BlobInfo{}, err + } + defer res.Body.Close() + if res.StatusCode != http.StatusCreated { + logrus.Debugf("Error uploading layer, response %#v", *res) + return types.BlobInfo{}, errors.Wrapf(registryHTTPResponseToError(res), "uploading layer to %s", uploadLocation) + } + + logrus.Debugf("Upload of layer %s complete", blobDigest) + cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), blobDigest, newBICLocationReference(d.ref)) + return types.BlobInfo{Digest: blobDigest, Size: sizeCounter.size}, nil +} + +// blobExists returns true iff repo contains a blob with digest, and if so, also its size. +// If the destination does not contain the blob, or it is unknown, blobExists ordinarily returns (false, -1, nil); +// it returns a non-nil error only on an unexpected failure. +func (d *dockerImageDestination) blobExists(ctx context.Context, repo reference.Named, digest digest.Digest, extraScope *authScope) (bool, int64, error) { + checkPath := fmt.Sprintf(blobsPath, reference.Path(repo), digest.String()) + logrus.Debugf("Checking %s", checkPath) + res, err := d.c.makeRequest(ctx, http.MethodHead, checkPath, nil, nil, v2Auth, extraScope) + if err != nil { + return false, -1, err + } + defer res.Body.Close() + switch res.StatusCode { + case http.StatusOK: + logrus.Debugf("... already exists") + return true, getBlobSize(res), nil + case http.StatusUnauthorized: + logrus.Debugf("... not authorized") + return false, -1, errors.Wrapf(registryHTTPResponseToError(res), "checking whether a blob %s exists in %s", digest, repo.Name()) + case http.StatusNotFound: + logrus.Debugf("... not present") + return false, -1, nil + default: + return false, -1, errors.Errorf("failed to read from destination repository %s: %d (%s)", reference.Path(d.ref.ref), res.StatusCode, http.StatusText(res.StatusCode)) + } +} + +// mountBlob tries to mount blob srcDigest from srcRepo to the current destination. +func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo reference.Named, srcDigest digest.Digest, extraScope *authScope) error { + u := url.URL{ + Path: fmt.Sprintf(blobUploadPath, reference.Path(d.ref.ref)), + RawQuery: url.Values{ + "mount": {srcDigest.String()}, + "from": {reference.Path(srcRepo)}, + }.Encode(), + } + mountPath := u.String() + logrus.Debugf("Trying to mount %s", mountPath) + res, err := d.c.makeRequest(ctx, http.MethodPost, mountPath, nil, nil, v2Auth, extraScope) + if err != nil { + return err + } + defer res.Body.Close() + switch res.StatusCode { + case http.StatusCreated: + logrus.Debugf("... mount OK") + return nil + case http.StatusAccepted: + // Oops, the mount was ignored - either the registry does not support that yet, or the blob does not exist; the registry has started an ordinary upload process. + // Abort, and let the ultimate caller do an upload when its ready, instead. + // NOTE: This does not really work in docker/distribution servers, which incorrectly require the "delete" action in the token's scope, and is thus entirely untested. + uploadLocation, err := res.Location() + if err != nil { + return errors.Wrap(err, "determining upload URL after a mount attempt") + } + logrus.Debugf("... started an upload instead of mounting, trying to cancel at %s", uploadLocation.String()) + res2, err := d.c.makeRequestToResolvedURL(ctx, http.MethodDelete, uploadLocation.String(), nil, nil, -1, v2Auth, extraScope) + if err != nil { + logrus.Debugf("Error trying to cancel an inadvertent upload: %s", err) + } else { + defer res2.Body.Close() + if res2.StatusCode != http.StatusNoContent { + logrus.Debugf("Error trying to cancel an inadvertent upload, status %s", http.StatusText(res.StatusCode)) + } + } + // Anyway, if canceling the upload fails, ignore it and return the more important error: + return fmt.Errorf("Mounting %s from %s to %s started an upload instead", srcDigest, srcRepo.Name(), d.ref.ref.Name()) + default: + logrus.Debugf("Error mounting, response %#v", *res) + return errors.Wrapf(registryHTTPResponseToError(res), "mounting %s from %s to %s", srcDigest, srcRepo.Name(), d.ref.ref.Name()) + } +} + +// tryReusingExactBlob is a subset of TryReusingBlob which _only_ looks for exactly the specified +// blob in the current repository, with no cross-repo reuse or mounting; cache may be updated, it is not read. +// The caller must ensure info.Digest is set. +func (d *dockerImageDestination) tryReusingExactBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (bool, types.BlobInfo, error) { + exists, size, err := d.blobExists(ctx, d.ref.ref, info.Digest, nil) + if err != nil { + return false, types.BlobInfo{}, err + } + if exists { + cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, newBICLocationReference(d.ref)) + return true, types.BlobInfo{Digest: info.Digest, MediaType: info.MediaType, Size: size}, nil + } + return false, types.BlobInfo{}, nil +} + +// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. +// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may +// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be +// reflected in the manifest that will be written. +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +// May use and/or update cache. +func (d *dockerImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { + if info.Digest == "" { + return false, types.BlobInfo{}, errors.Errorf(`"Can not check for a blob with unknown digest`) + } + + // First, check whether the blob happens to already exist at the destination. + haveBlob, reusedInfo, err := d.tryReusingExactBlob(ctx, info, cache) + if err != nil { + return false, types.BlobInfo{}, err + } + if haveBlob { + return true, reusedInfo, nil + } + + // Then try reusing blobs from other locations. + bic := blobinfocache.FromBlobInfoCache(cache) + candidates := bic.CandidateLocations2(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, canSubstitute) + for _, candidate := range candidates { + candidateRepo, err := parseBICLocationReference(candidate.Location) + if err != nil { + logrus.Debugf("Error parsing BlobInfoCache location reference: %s", err) + continue + } + if candidate.CompressorName != blobinfocache.Uncompressed { + logrus.Debugf("Trying to reuse cached location %s compressed with %s in %s", candidate.Digest.String(), candidate.CompressorName, candidateRepo.Name()) + } else { + logrus.Debugf("Trying to reuse cached location %s with no compression in %s", candidate.Digest.String(), candidateRepo.Name()) + } + + // Sanity checks: + if reference.Domain(candidateRepo) != reference.Domain(d.ref.ref) { + logrus.Debugf("... Internal error: domain %s does not match destination %s", reference.Domain(candidateRepo), reference.Domain(d.ref.ref)) + continue + } + if candidateRepo.Name() == d.ref.ref.Name() && candidate.Digest == info.Digest { + logrus.Debug("... Already tried the primary destination") + continue + } + + // Whatever happens here, don't abort the entire operation. It's likely we just don't have permissions, and if it is a critical network error, we will find out soon enough anyway. + + // Checking candidateRepo, and mounting from it, requires an + // expanded token scope. + extraScope := &authScope{ + remoteName: reference.Path(candidateRepo), + actions: "pull", + } + // This existence check is not, strictly speaking, necessary: We only _really_ need it to get the blob size, and we could record that in the cache instead. + // But a "failed" d.mountBlob currently leaves around an unterminated server-side upload, which we would try to cancel. + // So, without this existence check, it would be 1 request on success, 2 requests on failure; with it, it is 2 requests on success, 1 request on failure. + // On success we avoid the actual costly upload; so, in a sense, the success case is "free", but failures are always costly. + // Even worse, docker/distribution does not actually reasonably implement canceling uploads + // (it would require a "delete" action in the token, and Quay does not give that to anyone, so we can't ask); + // so, be a nice client and don't create unnecessary upload sessions on the server. + exists, size, err := d.blobExists(ctx, candidateRepo, candidate.Digest, extraScope) + if err != nil { + logrus.Debugf("... Failed: %v", err) + continue + } + if !exists { + // FIXME? Should we drop the blob from cache here (and elsewhere?)? + continue // logrus.Debug() already happened in blobExists + } + if candidateRepo.Name() != d.ref.ref.Name() { + if err := d.mountBlob(ctx, candidateRepo, candidate.Digest, extraScope); err != nil { + logrus.Debugf("... Mount failed: %v", err) + continue + } + } + + bic.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), candidate.Digest, newBICLocationReference(d.ref)) + + compressionOperation, compressionAlgorithm, err := blobinfocache.OperationAndAlgorithmForCompressor(candidate.CompressorName) + if err != nil { + logrus.Debugf("... Failed: %v", err) + continue + } + + return true, types.BlobInfo{Digest: candidate.Digest, MediaType: info.MediaType, Size: size, CompressionOperation: compressionOperation, CompressionAlgorithm: compressionAlgorithm}, nil + } + + return false, types.BlobInfo{}, nil +} + +// PutManifest writes manifest to the destination. +// When the primary manifest is a manifest list, if instanceDigest is nil, we're saving the list +// itself, else instanceDigest contains a digest of the specific manifest instance to overwrite the +// manifest for; when the primary manifest is not a manifest list, instanceDigest should always be nil. +// FIXME? This should also receive a MIME type if known, to differentiate between schema versions. +// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), +// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. +func (d *dockerImageDestination) PutManifest(ctx context.Context, m []byte, instanceDigest *digest.Digest) error { + refTail := "" + if instanceDigest != nil { + // If the instanceDigest is provided, then use it as the refTail, because the reference, + // whether it includes a tag or a digest, refers to the list as a whole, and not this + // particular instance. + refTail = instanceDigest.String() + // Double-check that the manifest we've been given matches the digest we've been given. + matches, err := manifest.MatchesDigest(m, *instanceDigest) + if err != nil { + return errors.Wrapf(err, "digesting manifest in PutManifest") + } + if !matches { + manifestDigest, merr := manifest.Digest(m) + if merr != nil { + return errors.Wrapf(err, "Attempted to PutManifest using an explicitly specified digest (%q) that didn't match the manifest's digest (%v attempting to compute it)", instanceDigest.String(), merr) + } + return errors.Errorf("Attempted to PutManifest using an explicitly specified digest (%q) that didn't match the manifest's digest (%q)", instanceDigest.String(), manifestDigest.String()) + } + } else { + // Compute the digest of the main manifest, or the list if it's a list, so that we + // have a digest value to use if we're asked to save a signature for the manifest. + digest, err := manifest.Digest(m) + if err != nil { + return err + } + d.manifestDigest = digest + // The refTail should be either a digest (which we expect to match the value we just + // computed) or a tag name. + refTail, err = d.ref.tagOrDigest() + if err != nil { + return err + } + } + + path := fmt.Sprintf(manifestPath, reference.Path(d.ref.ref), refTail) + + headers := map[string][]string{} + mimeType := manifest.GuessMIMEType(m) + if mimeType != "" { + headers["Content-Type"] = []string{mimeType} + } + res, err := d.c.makeRequest(ctx, http.MethodPut, path, headers, bytes.NewReader(m), v2Auth, nil) + if err != nil { + return err + } + defer res.Body.Close() + if !successStatus(res.StatusCode) { + rawErr := registryHTTPResponseToError(res) + err := errors.Wrapf(rawErr, "uploading manifest %s to %s", refTail, d.ref.ref.Name()) + if isManifestInvalidError(rawErr) { + err = types.ManifestTypeRejectedError{Err: err} + } + return err + } + // A HTTP server may not be a registry at all, and just return 200 OK to everything + // (in particular that can fairly easily happen after tearing down a website and + // replacing it with a global 302 redirect to a new website, completely ignoring the + // path in the request); in that case we could “succeed” uploading a whole image. + // With docker/distribution we could rely on a Docker-Content-Digest header being present + // (because docker/distribution/registry/client has been failing uploads if it was missing), + // but that has been defined as explicitly optional by + // https://github.com/opencontainers/distribution-spec/blob/ec90a2af85fe4d612cf801e1815b95bfa40ae72b/spec.md#legacy-docker-support-http-headers + // So, just note the missing header in a debug log. + if v := res.Header.Values("Docker-Content-Digest"); len(v) == 0 { + logrus.Debugf("Manifest upload response didn’t contain a Docker-Content-Digest header, it might not be a container registry") + } + return nil +} + +// successStatus returns true if the argument is a successful HTTP response +// code (in the range 200 - 399 inclusive). +func successStatus(status int) bool { + return status >= 200 && status <= 399 +} + +// isManifestInvalidError returns true iff err from client.HandleErrorResponse is a “manifest invalid” error. +func isManifestInvalidError(err error) bool { + errors, ok := err.(errcode.Errors) + if !ok || len(errors) == 0 { + return false + } + err = errors[0] + ec, ok := err.(errcode.ErrorCoder) + if !ok { + return false + } + + switch ec.ErrorCode() { + // ErrorCodeManifestInvalid is returned by OpenShift with acceptschema2=false. + case v2.ErrorCodeManifestInvalid: + return true + // ErrorCodeTagInvalid is returned by docker/distribution (at least as of commit ec87e9b6971d831f0eff752ddb54fb64693e51cd) + // when uploading to a tag (because it can’t find a matching tag inside the manifest) + case v2.ErrorCodeTagInvalid: + return true + // ErrorCodeUnsupported with 'Invalid JSON syntax' is returned by AWS ECR when + // uploading an OCI manifest that is (correctly, according to the spec) missing + // a top-level media type. See libpod issue #1719 + // FIXME: remove this case when ECR behavior is fixed + case errcode.ErrorCodeUnsupported: + return strings.Contains(err.Error(), "Invalid JSON syntax") + default: + return false + } +} + +// PutSignatures uploads a set of signatures to the relevant lookaside or API extension point. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to upload the signatures for (when +// the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. +func (d *dockerImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error { + // Do not fail if we don’t really need to support signatures. + if len(signatures) == 0 { + return nil + } + if instanceDigest == nil { + if d.manifestDigest == "" { + // This shouldn’t happen, ImageDestination users are required to call PutManifest before PutSignatures + return errors.Errorf("Unknown manifest digest, can't add signatures") + } + instanceDigest = &d.manifestDigest + } + + if err := d.c.detectProperties(ctx); err != nil { + return err + } + switch { + case d.c.supportsSignatures: + return d.putSignaturesToAPIExtension(ctx, signatures, *instanceDigest) + case d.c.signatureBase != nil: + return d.putSignaturesToLookaside(signatures, *instanceDigest) + default: + return errors.Errorf("Internal error: X-Registry-Supports-Signatures extension not supported, and lookaside should not be empty configuration") + } +} + +// putSignaturesToLookaside implements PutSignatures() from the lookaside location configured in s.c.signatureBase, +// which is not nil, for a manifest with manifestDigest. +func (d *dockerImageDestination) putSignaturesToLookaside(signatures [][]byte, manifestDigest digest.Digest) error { + // FIXME? This overwrites files one at a time, definitely not atomic. + // A failure when updating signatures with a reordered copy could lose some of them. + + // Skip dealing with the manifest digest if not necessary. + if len(signatures) == 0 { + return nil + } + + // NOTE: Keep this in sync with docs/signature-protocols.md! + for i, signature := range signatures { + url := signatureStorageURL(d.c.signatureBase, manifestDigest, i) + err := d.putOneSignature(url, signature) + if err != nil { + return err + } + } + // Remove any other signatures, if present. + // We stop at the first missing signature; if a previous deleting loop aborted + // prematurely, this may not clean up all of them, but one missing signature + // is enough for dockerImageSource to stop looking for other signatures, so that + // is sufficient. + for i := len(signatures); ; i++ { + url := signatureStorageURL(d.c.signatureBase, manifestDigest, i) + missing, err := d.c.deleteOneSignature(url) + if err != nil { + return err + } + if missing { + break + } + } + + return nil +} + +// putOneSignature stores one signature to url. +// NOTE: Keep this in sync with docs/signature-protocols.md! +func (d *dockerImageDestination) putOneSignature(url *url.URL, signature []byte) error { + switch url.Scheme { + case "file": + logrus.Debugf("Writing to %s", url.Path) + err := os.MkdirAll(filepath.Dir(url.Path), 0755) + if err != nil { + return err + } + err = ioutil.WriteFile(url.Path, signature, 0644) + if err != nil { + return err + } + return nil + + case "http", "https": + return errors.Errorf("Writing directly to a %s sigstore %s is not supported. Configure a sigstore-staging: location", url.Scheme, url.String()) + default: + return errors.Errorf("Unsupported scheme when writing signature to %s", url.String()) + } +} + +// deleteOneSignature deletes a signature from url, if it exists. +// If it successfully determines that the signature does not exist, returns (true, nil) +// NOTE: Keep this in sync with docs/signature-protocols.md! +func (c *dockerClient) deleteOneSignature(url *url.URL) (missing bool, err error) { + switch url.Scheme { + case "file": + logrus.Debugf("Deleting %s", url.Path) + err := os.Remove(url.Path) + if err != nil && os.IsNotExist(err) { + return true, nil + } + return false, err + + case "http", "https": + return false, errors.Errorf("Writing directly to a %s sigstore %s is not supported. Configure a sigstore-staging: location", url.Scheme, url.String()) + default: + return false, errors.Errorf("Unsupported scheme when deleting signature from %s", url.String()) + } +} + +// putSignaturesToAPIExtension implements PutSignatures() using the X-Registry-Supports-Signatures API extension, +// for a manifest with manifestDigest. +func (d *dockerImageDestination) putSignaturesToAPIExtension(ctx context.Context, signatures [][]byte, manifestDigest digest.Digest) error { + // Skip dealing with the manifest digest, or reading the old state, if not necessary. + if len(signatures) == 0 { + return nil + } + + // Because image signatures are a shared resource in Atomic Registry, the default upload + // always adds signatures. Eventually we should also allow removing signatures, + // but the X-Registry-Supports-Signatures API extension does not support that yet. + + existingSignatures, err := d.c.getExtensionsSignatures(ctx, d.ref, manifestDigest) + if err != nil { + return err + } + existingSigNames := map[string]struct{}{} + for _, sig := range existingSignatures.Signatures { + existingSigNames[sig.Name] = struct{}{} + } + +sigExists: + for _, newSig := range signatures { + for _, existingSig := range existingSignatures.Signatures { + if existingSig.Version == extensionSignatureSchemaVersion && existingSig.Type == extensionSignatureTypeAtomic && bytes.Equal(existingSig.Content, newSig) { + continue sigExists + } + } + + // The API expect us to invent a new unique name. This is racy, but hopefully good enough. + var signatureName string + for { + randBytes := make([]byte, 16) + n, err := rand.Read(randBytes) + if err != nil || n != 16 { + return errors.Wrapf(err, "generating random signature len %d", n) + } + signatureName = fmt.Sprintf("%s@%032x", manifestDigest.String(), randBytes) + if _, ok := existingSigNames[signatureName]; !ok { + break + } + } + sig := extensionSignature{ + Version: extensionSignatureSchemaVersion, + Name: signatureName, + Type: extensionSignatureTypeAtomic, + Content: newSig, + } + body, err := json.Marshal(sig) + if err != nil { + return err + } + + path := fmt.Sprintf(extensionsSignaturePath, reference.Path(d.ref.ref), manifestDigest.String()) + res, err := d.c.makeRequest(ctx, http.MethodPut, path, nil, bytes.NewReader(body), v2Auth, nil) + if err != nil { + return err + } + defer res.Body.Close() + if res.StatusCode != http.StatusCreated { + logrus.Debugf("Error uploading signature, status %d, %#v", res.StatusCode, res) + return errors.Wrapf(registryHTTPResponseToError(res), "uploading signature to %s in %s", path, d.c.registry) + } + } + + return nil +} + +// Commit marks the process of storing the image as successful and asks for the image to be persisted. +// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list +// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the +// original manifest list digest, if desired. +// WARNING: This does not have any transactional semantics: +// - Uploaded data MAY be visible to others before Commit() is called +// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) +func (d *dockerImageDestination) Commit(context.Context, types.UnparsedImage) error { + return nil +} diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_src.go b/vendor/github.com/containers/image/v5/docker/docker_image_src.go new file mode 100644 index 00000000000..314e9b394ee --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/docker_image_src.go @@ -0,0 +1,765 @@ +package docker + +import ( + "context" + "fmt" + "io" + "io/ioutil" + "mime" + "mime/multipart" + "net/http" + "net/url" + "os" + "strconv" + "strings" + "sync" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/iolimits" + internalTypes "github.com/containers/image/v5/internal/types" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/sysregistriesv2" + "github.com/containers/image/v5/types" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +type dockerImageSource struct { + logicalRef dockerReference // The reference the user requested. + physicalRef dockerReference // The actual reference we are accessing (possibly a mirror) + c *dockerClient + // State + cachedManifest []byte // nil if not loaded yet + cachedManifestMIMEType string // Only valid if cachedManifest != nil +} + +// newImageSource creates a new ImageSource for the specified image reference. +// The caller must call .Close() on the returned ImageSource. +func newImageSource(ctx context.Context, sys *types.SystemContext, ref dockerReference) (*dockerImageSource, error) { + registry, err := sysregistriesv2.FindRegistry(sys, ref.ref.Name()) + if err != nil { + return nil, errors.Wrapf(err, "loading registries configuration") + } + if registry == nil { + // No configuration was found for the provided reference, so use the + // equivalent of a default configuration. + registry = &sysregistriesv2.Registry{ + Endpoint: sysregistriesv2.Endpoint{ + Location: ref.ref.String(), + }, + Prefix: ref.ref.String(), + } + } + + // Check all endpoints for the manifest availability. If we find one that does + // contain the image, it will be used for all future pull actions. Always try the + // non-mirror original location last; this both transparently handles the case + // of no mirrors configured, and ensures we return the error encountered when + // accessing the upstream location if all endpoints fail. + pullSources, err := registry.PullSourcesFromReference(ref.ref) + if err != nil { + return nil, err + } + type attempt struct { + ref reference.Named + err error + } + attempts := []attempt{} + for _, pullSource := range pullSources { + if sys != nil && sys.DockerLogMirrorChoice { + logrus.Infof("Trying to access %q", pullSource.Reference) + } else { + logrus.Debugf("Trying to access %q", pullSource.Reference) + } + s, err := newImageSourceAttempt(ctx, sys, ref, pullSource) + if err == nil { + return s, nil + } + logrus.Debugf("Accessing %q failed: %v", pullSource.Reference, err) + attempts = append(attempts, attempt{ + ref: pullSource.Reference, + err: err, + }) + } + switch len(attempts) { + case 0: + return nil, errors.New("Internal error: newImageSource returned without trying any endpoint") + case 1: + return nil, attempts[0].err // If no mirrors are used, perfectly preserve the error type and add no noise. + default: + // Don’t just build a string, try to preserve the typed error. + primary := &attempts[len(attempts)-1] + extras := []string{} + for i := 0; i < len(attempts)-1; i++ { + // This is difficult to fit into a single-line string, when the error can contain arbitrary strings including any metacharacters we decide to use. + // The paired [] at least have some chance of being unambiguous. + extras = append(extras, fmt.Sprintf("[%s: %v]", attempts[i].ref.String(), attempts[i].err)) + } + return nil, errors.Wrapf(primary.err, "(Mirrors also failed: %s): %s", strings.Join(extras, "\n"), primary.ref.String()) + } +} + +// newImageSourceAttempt is an internal helper for newImageSource. Everyone else must call newImageSource. +// Given a logicalReference and a pullSource, return a dockerImageSource if it is reachable. +// The caller must call .Close() on the returned ImageSource. +func newImageSourceAttempt(ctx context.Context, sys *types.SystemContext, logicalRef dockerReference, pullSource sysregistriesv2.PullSource) (*dockerImageSource, error) { + physicalRef, err := newReference(pullSource.Reference) + if err != nil { + return nil, err + } + + endpointSys := sys + // sys.DockerAuthConfig does not explicitly specify a registry; we must not blindly send the credentials intended for the primary endpoint to mirrors. + if endpointSys != nil && endpointSys.DockerAuthConfig != nil && reference.Domain(physicalRef.ref) != reference.Domain(logicalRef.ref) { + copy := *endpointSys + copy.DockerAuthConfig = nil + copy.DockerBearerRegistryToken = "" + endpointSys = © + } + + client, err := newDockerClientFromRef(endpointSys, physicalRef, false, "pull") + if err != nil { + return nil, err + } + client.tlsClientConfig.InsecureSkipVerify = pullSource.Endpoint.Insecure + + s := &dockerImageSource{ + logicalRef: logicalRef, + physicalRef: physicalRef, + c: client, + } + + if err := s.ensureManifestIsLoaded(ctx); err != nil { + return nil, err + } + return s, nil +} + +// Reference returns the reference used to set up this source, _as specified by the user_ +// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. +func (s *dockerImageSource) Reference() types.ImageReference { + return s.logicalRef +} + +// Close removes resources associated with an initialized ImageSource, if any. +func (s *dockerImageSource) Close() error { + return nil +} + +// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer +// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob() +// to read the image's layers. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (s *dockerImageSource) LayerInfosForCopy(context.Context, *digest.Digest) ([]types.BlobInfo, error) { + return nil, nil +} + +// simplifyContentType drops parameters from a HTTP media type (see https://tools.ietf.org/html/rfc7231#section-3.1.1.1) +// Alternatively, an empty string is returned unchanged, and invalid values are "simplified" to an empty string. +func simplifyContentType(contentType string) string { + if contentType == "" { + return contentType + } + mimeType, _, err := mime.ParseMediaType(contentType) + if err != nil { + return "" + } + return mimeType +} + +// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). +// It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); +// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). +func (s *dockerImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { + if instanceDigest != nil { + return s.fetchManifest(ctx, instanceDigest.String()) + } + err := s.ensureManifestIsLoaded(ctx) + if err != nil { + return nil, "", err + } + return s.cachedManifest, s.cachedManifestMIMEType, nil +} + +func (s *dockerImageSource) fetchManifest(ctx context.Context, tagOrDigest string) ([]byte, string, error) { + path := fmt.Sprintf(manifestPath, reference.Path(s.physicalRef.ref), tagOrDigest) + headers := map[string][]string{ + "Accept": manifest.DefaultRequestedManifestMIMETypes, + } + res, err := s.c.makeRequest(ctx, http.MethodGet, path, headers, nil, v2Auth, nil) + if err != nil { + return nil, "", err + } + logrus.Debugf("Content-Type from manifest GET is %q", res.Header.Get("Content-Type")) + defer res.Body.Close() + if res.StatusCode != http.StatusOK { + return nil, "", errors.Wrapf(registryHTTPResponseToError(res), "reading manifest %s in %s", tagOrDigest, s.physicalRef.ref.Name()) + } + + manblob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxManifestBodySize) + if err != nil { + return nil, "", err + } + return manblob, simplifyContentType(res.Header.Get("Content-Type")), nil +} + +// ensureManifestIsLoaded sets s.cachedManifest and s.cachedManifestMIMEType +// +// ImageSource implementations are not required or expected to do any caching, +// but because our signatures are “attached” to the manifest digest, +// we need to ensure that the digest of the manifest returned by GetManifest(ctx, nil) +// and used by GetSignatures(ctx, nil) are consistent, otherwise we would get spurious +// signature verification failures when pulling while a tag is being updated. +func (s *dockerImageSource) ensureManifestIsLoaded(ctx context.Context) error { + if s.cachedManifest != nil { + return nil + } + + reference, err := s.physicalRef.tagOrDigest() + if err != nil { + return err + } + + manblob, mt, err := s.fetchManifest(ctx, reference) + if err != nil { + return err + } + // We might validate manblob against the Docker-Content-Digest header here to protect against transport errors. + s.cachedManifest = manblob + s.cachedManifestMIMEType = mt + return nil +} + +// getExternalBlob returns the reader of the first available blob URL from urls, which must not be empty. +// This function can return nil reader when no url is supported by this function. In this case, the caller +// should fallback to fetch the non-external blob (i.e. pull from the registry). +func (s *dockerImageSource) getExternalBlob(ctx context.Context, urls []string) (io.ReadCloser, int64, error) { + var ( + resp *http.Response + err error + ) + if len(urls) == 0 { + return nil, 0, errors.New("internal error: getExternalBlob called with no URLs") + } + for _, u := range urls { + if u, err := url.Parse(u); err != nil || (u.Scheme != "http" && u.Scheme != "https") { + continue // unsupported url. skip this url. + } + // NOTE: we must not authenticate on additional URLs as those + // can be abused to leak credentials or tokens. Please + // refer to CVE-2020-15157 for more information. + resp, err = s.c.makeRequestToResolvedURL(ctx, http.MethodGet, u, nil, nil, -1, noAuth, nil) + if err == nil { + if resp.StatusCode != http.StatusOK { + err = errors.Errorf("error fetching external blob from %q: %d (%s)", u, resp.StatusCode, http.StatusText(resp.StatusCode)) + logrus.Debug(err) + resp.Body.Close() + continue + } + break + } + } + if resp == nil && err == nil { + return nil, 0, nil // fallback to non-external blob + } + if err != nil { + return nil, 0, err + } + return resp.Body, getBlobSize(resp), nil +} + +func getBlobSize(resp *http.Response) int64 { + size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64) + if err != nil { + size = -1 + } + return size +} + +// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. +func (s *dockerImageSource) HasThreadSafeGetBlob() bool { + return true +} + +// splitHTTP200ResponseToPartial splits a 200 response in multiple streams as specified by the chunks +func splitHTTP200ResponseToPartial(streams chan io.ReadCloser, errs chan error, body io.ReadCloser, chunks []internalTypes.ImageSourceChunk) { + defer close(streams) + defer close(errs) + currentOffset := uint64(0) + + body = makeBufferedNetworkReader(body, 64, 16384) + defer body.Close() + for _, c := range chunks { + if c.Offset != currentOffset { + if c.Offset < currentOffset { + errs <- fmt.Errorf("invalid chunk offset specified %v (expected >= %v)", c.Offset, currentOffset) + break + } + toSkip := c.Offset - currentOffset + if _, err := io.Copy(ioutil.Discard, io.LimitReader(body, int64(toSkip))); err != nil { + errs <- err + break + } + currentOffset += toSkip + } + s := signalCloseReader{ + closed: make(chan interface{}), + stream: ioutil.NopCloser(io.LimitReader(body, int64(c.Length))), + consumeStream: true, + } + streams <- s + + // Wait until the stream is closed before going to the next chunk + <-s.closed + currentOffset += c.Length + } +} + +// handle206Response reads a 206 response and send each part as a separate ReadCloser to the streams chan. +func handle206Response(streams chan io.ReadCloser, errs chan error, body io.ReadCloser, chunks []internalTypes.ImageSourceChunk, mediaType string, params map[string]string) { + defer close(streams) + defer close(errs) + if !strings.HasPrefix(mediaType, "multipart/") { + streams <- body + return + } + boundary, found := params["boundary"] + if !found { + errs <- errors.Errorf("could not find boundary") + body.Close() + return + } + buffered := makeBufferedNetworkReader(body, 64, 16384) + defer buffered.Close() + mr := multipart.NewReader(buffered, boundary) + for { + p, err := mr.NextPart() + if err != nil { + if err != io.EOF { + errs <- err + } + return + } + s := signalCloseReader{ + closed: make(chan interface{}), + stream: p, + } + streams <- s + // NextPart() cannot be called while the current part + // is being read, so wait until it is closed + <-s.closed + } +} + +// GetBlobAt returns a stream for the specified blob. +// The specified chunks must be not overlapping and sorted by their offset. +func (s *dockerImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []internalTypes.ImageSourceChunk) (chan io.ReadCloser, chan error, error) { + headers := make(map[string][]string) + + var rangeVals []string + for _, c := range chunks { + rangeVals = append(rangeVals, fmt.Sprintf("%d-%d", c.Offset, c.Offset+c.Length-1)) + } + + headers["Range"] = []string{fmt.Sprintf("bytes=%s", strings.Join(rangeVals, ","))} + + if len(info.URLs) != 0 { + return nil, nil, fmt.Errorf("external URLs not supported with GetBlobAt") + } + + path := fmt.Sprintf(blobsPath, reference.Path(s.physicalRef.ref), info.Digest.String()) + logrus.Debugf("Downloading %s", path) + res, err := s.c.makeRequest(ctx, http.MethodGet, path, headers, nil, v2Auth, nil) + if err != nil { + return nil, nil, err + } + + switch res.StatusCode { + case http.StatusOK: + // if the server replied with a 200 status code, convert the full body response to a series of + // streams as it would have been done with 206. + streams := make(chan io.ReadCloser) + errs := make(chan error) + go splitHTTP200ResponseToPartial(streams, errs, res.Body, chunks) + return streams, errs, nil + case http.StatusPartialContent: + mediaType, params, err := mime.ParseMediaType(res.Header.Get("Content-Type")) + if err != nil { + return nil, nil, err + } + + streams := make(chan io.ReadCloser) + errs := make(chan error) + + go handle206Response(streams, errs, res.Body, chunks, mediaType, params) + return streams, errs, nil + case http.StatusBadRequest: + res.Body.Close() + return nil, nil, internalTypes.BadPartialRequestError{Status: res.Status} + default: + err := httpResponseToError(res, "Error fetching partial blob") + if err == nil { + err = errors.Errorf("invalid status code returned when fetching blob %d (%s)", res.StatusCode, http.StatusText(res.StatusCode)) + } + res.Body.Close() + return nil, nil, err + } +} + +// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). +// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. +// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. +func (s *dockerImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { + if len(info.URLs) != 0 { + r, s, err := s.getExternalBlob(ctx, info.URLs) + if err != nil { + return nil, 0, err + } else if r != nil { + return r, s, nil + } + } + + path := fmt.Sprintf(blobsPath, reference.Path(s.physicalRef.ref), info.Digest.String()) + logrus.Debugf("Downloading %s", path) + res, err := s.c.makeRequest(ctx, http.MethodGet, path, nil, nil, v2Auth, nil) + if err != nil { + return nil, 0, err + } + if err := httpResponseToError(res, "Error fetching blob"); err != nil { + res.Body.Close() + return nil, 0, err + } + cache.RecordKnownLocation(s.physicalRef.Transport(), bicTransportScope(s.physicalRef), info.Digest, newBICLocationReference(s.physicalRef)) + return res.Body, getBlobSize(res), nil +} + +// GetSignatures returns the image's signatures. It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +func (s *dockerImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + if err := s.c.detectProperties(ctx); err != nil { + return nil, err + } + switch { + case s.c.supportsSignatures: + return s.getSignaturesFromAPIExtension(ctx, instanceDigest) + case s.c.signatureBase != nil: + return s.getSignaturesFromLookaside(ctx, instanceDigest) + default: + return nil, errors.Errorf("Internal error: X-Registry-Supports-Signatures extension not supported, and lookaside should not be empty configuration") + } +} + +// manifestDigest returns a digest of the manifest, from instanceDigest if non-nil; or from the supplied reference, +// or finally, from a fetched manifest. +func (s *dockerImageSource) manifestDigest(ctx context.Context, instanceDigest *digest.Digest) (digest.Digest, error) { + if instanceDigest != nil { + return *instanceDigest, nil + } + if digested, ok := s.physicalRef.ref.(reference.Digested); ok { + d := digested.Digest() + if d.Algorithm() == digest.Canonical { + return d, nil + } + } + if err := s.ensureManifestIsLoaded(ctx); err != nil { + return "", err + } + return manifest.Digest(s.cachedManifest) +} + +// getSignaturesFromLookaside implements GetSignatures() from the lookaside location configured in s.c.signatureBase, +// which is not nil. +func (s *dockerImageSource) getSignaturesFromLookaside(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + manifestDigest, err := s.manifestDigest(ctx, instanceDigest) + if err != nil { + return nil, err + } + + // NOTE: Keep this in sync with docs/signature-protocols.md! + signatures := [][]byte{} + for i := 0; ; i++ { + url := signatureStorageURL(s.c.signatureBase, manifestDigest, i) + signature, missing, err := s.getOneSignature(ctx, url) + if err != nil { + return nil, err + } + if missing { + break + } + signatures = append(signatures, signature) + } + return signatures, nil +} + +// getOneSignature downloads one signature from url. +// If it successfully determines that the signature does not exist, returns with missing set to true and error set to nil. +// NOTE: Keep this in sync with docs/signature-protocols.md! +func (s *dockerImageSource) getOneSignature(ctx context.Context, url *url.URL) (signature []byte, missing bool, err error) { + switch url.Scheme { + case "file": + logrus.Debugf("Reading %s", url.Path) + sig, err := ioutil.ReadFile(url.Path) + if err != nil { + if os.IsNotExist(err) { + return nil, true, nil + } + return nil, false, err + } + return sig, false, nil + + case "http", "https": + logrus.Debugf("GET %s", url) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url.String(), nil) + if err != nil { + return nil, false, err + } + res, err := s.c.client.Do(req) + if err != nil { + return nil, false, err + } + defer res.Body.Close() + if res.StatusCode == http.StatusNotFound { + return nil, true, nil + } else if res.StatusCode != http.StatusOK { + return nil, false, errors.Errorf("Error reading signature from %s: status %d (%s)", url.String(), res.StatusCode, http.StatusText(res.StatusCode)) + } + sig, err := iolimits.ReadAtMost(res.Body, iolimits.MaxSignatureBodySize) + if err != nil { + return nil, false, err + } + return sig, false, nil + + default: + return nil, false, errors.Errorf("Unsupported scheme when reading signature from %s", url.String()) + } +} + +// getSignaturesFromAPIExtension implements GetSignatures() using the X-Registry-Supports-Signatures API extension. +func (s *dockerImageSource) getSignaturesFromAPIExtension(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + manifestDigest, err := s.manifestDigest(ctx, instanceDigest) + if err != nil { + return nil, err + } + + parsedBody, err := s.c.getExtensionsSignatures(ctx, s.physicalRef, manifestDigest) + if err != nil { + return nil, err + } + + var sigs [][]byte + for _, sig := range parsedBody.Signatures { + if sig.Version == extensionSignatureSchemaVersion && sig.Type == extensionSignatureTypeAtomic { + sigs = append(sigs, sig.Content) + } + } + return sigs, nil +} + +// deleteImage deletes the named image from the registry, if supported. +func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerReference) error { + // docker/distribution does not document what action should be used for deleting images. + // + // Current docker/distribution requires "pull" for reading the manifest and "delete" for deleting it. + // quay.io requires "push" (an explicit "pull" is unnecessary), does not grant any token (fails parsing the request) if "delete" is included. + // OpenShift ignores the action string (both the password and the token is an OpenShift API token identifying a user). + // + // We have to hard-code a single string, luckily both docker/distribution and quay.io support "*" to mean "everything". + c, err := newDockerClientFromRef(sys, ref, true, "*") + if err != nil { + return err + } + + headers := map[string][]string{ + "Accept": manifest.DefaultRequestedManifestMIMETypes, + } + refTail, err := ref.tagOrDigest() + if err != nil { + return err + } + getPath := fmt.Sprintf(manifestPath, reference.Path(ref.ref), refTail) + get, err := c.makeRequest(ctx, http.MethodGet, getPath, headers, nil, v2Auth, nil) + if err != nil { + return err + } + defer get.Body.Close() + manifestBody, err := iolimits.ReadAtMost(get.Body, iolimits.MaxManifestBodySize) + if err != nil { + return err + } + switch get.StatusCode { + case http.StatusOK: + case http.StatusNotFound: + return errors.Errorf("Unable to delete %v. Image may not exist or is not stored with a v2 Schema in a v2 registry", ref.ref) + default: + return errors.Errorf("Failed to delete %v: %s (%v)", ref.ref, manifestBody, get.Status) + } + + digest := get.Header.Get("Docker-Content-Digest") + deletePath := fmt.Sprintf(manifestPath, reference.Path(ref.ref), digest) + + // When retrieving the digest from a registry >= 2.3 use the following header: + // "Accept": "application/vnd.docker.distribution.manifest.v2+json" + delete, err := c.makeRequest(ctx, http.MethodDelete, deletePath, headers, nil, v2Auth, nil) + if err != nil { + return err + } + defer delete.Body.Close() + + body, err := iolimits.ReadAtMost(delete.Body, iolimits.MaxErrorBodySize) + if err != nil { + return err + } + if delete.StatusCode != http.StatusAccepted { + return errors.Errorf("Failed to delete %v: %s (%v)", deletePath, string(body), delete.Status) + } + + manifestDigest, err := manifest.Digest(manifestBody) + if err != nil { + return err + } + + for i := 0; ; i++ { + url := signatureStorageURL(c.signatureBase, manifestDigest, i) + missing, err := c.deleteOneSignature(url) + if err != nil { + return err + } + if missing { + break + } + } + + return nil +} + +type bufferedNetworkReaderBuffer struct { + data []byte + len int + consumed int + err error +} + +type bufferedNetworkReader struct { + stream io.ReadCloser + emptyBuffer chan *bufferedNetworkReaderBuffer + readyBuffer chan *bufferedNetworkReaderBuffer + terminate chan bool + current *bufferedNetworkReaderBuffer + mutex sync.Mutex + gotEOF bool +} + +// handleBufferedNetworkReader runs in a goroutine +func handleBufferedNetworkReader(br *bufferedNetworkReader) { + defer close(br.readyBuffer) + for { + select { + case b := <-br.emptyBuffer: + b.len, b.err = br.stream.Read(b.data) + br.readyBuffer <- b + if b.err != nil { + return + } + case <-br.terminate: + return + } + } +} + +func (n *bufferedNetworkReader) Close() error { + close(n.terminate) + close(n.emptyBuffer) + return n.stream.Close() +} + +func (n *bufferedNetworkReader) read(p []byte) (int, error) { + if n.current != nil { + copied := copy(p, n.current.data[n.current.consumed:n.current.len]) + n.current.consumed += copied + if n.current.consumed == n.current.len { + n.emptyBuffer <- n.current + n.current = nil + } + if copied > 0 { + return copied, nil + } + } + if n.gotEOF { + return 0, io.EOF + } + + var b *bufferedNetworkReaderBuffer + + select { + case b = <-n.readyBuffer: + if b.err != nil { + if b.err != io.EOF { + return b.len, b.err + } + n.gotEOF = true + } + b.consumed = 0 + n.current = b + return n.read(p) + case <-n.terminate: + return 0, io.EOF + } +} + +func (n *bufferedNetworkReader) Read(p []byte) (int, error) { + n.mutex.Lock() + defer n.mutex.Unlock() + + return n.read(p) +} + +func makeBufferedNetworkReader(stream io.ReadCloser, nBuffers, bufferSize uint) *bufferedNetworkReader { + br := bufferedNetworkReader{ + stream: stream, + emptyBuffer: make(chan *bufferedNetworkReaderBuffer, nBuffers), + readyBuffer: make(chan *bufferedNetworkReaderBuffer, nBuffers), + terminate: make(chan bool), + } + + go func() { + handleBufferedNetworkReader(&br) + }() + + for i := uint(0); i < nBuffers; i++ { + b := bufferedNetworkReaderBuffer{ + data: make([]byte, bufferSize), + } + br.emptyBuffer <- &b + } + + return &br +} + +type signalCloseReader struct { + closed chan interface{} + stream io.ReadCloser + consumeStream bool +} + +func (s signalCloseReader) Read(p []byte) (int, error) { + return s.stream.Read(p) +} + +func (s signalCloseReader) Close() error { + defer close(s.closed) + if s.consumeStream { + if _, err := io.Copy(ioutil.Discard, s.stream); err != nil { + s.stream.Close() + return err + } + } + return s.stream.Close() +} diff --git a/vendor/github.com/containers/image/v5/docker/docker_transport.go b/vendor/github.com/containers/image/v5/docker/docker_transport.go new file mode 100644 index 00000000000..541e053f3c8 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/docker_transport.go @@ -0,0 +1,168 @@ +package docker + +import ( + "context" + "fmt" + "strings" + + "github.com/containers/image/v5/docker/policyconfiguration" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" + "github.com/pkg/errors" +) + +func init() { + transports.Register(Transport) +} + +// Transport is an ImageTransport for container registry-hosted images. +var Transport = dockerTransport{} + +type dockerTransport struct{} + +func (t dockerTransport) Name() string { + return "docker" +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. +func (t dockerTransport) ParseReference(reference string) (types.ImageReference, error) { + return ParseReference(reference) +} + +// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys +// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). +// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. +// scope passed to this function will not be "", that value is always allowed. +func (t dockerTransport) ValidatePolicyConfigurationScope(scope string) error { + // FIXME? We could be verifying the various character set and length restrictions + // from docker/distribution/reference.regexp.go, but other than that there + // are few semantically invalid strings. + return nil +} + +// dockerReference is an ImageReference for Docker images. +type dockerReference struct { + ref reference.Named // By construction we know that !reference.IsNameOnly(ref) +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an Docker ImageReference. +func ParseReference(refString string) (types.ImageReference, error) { + if !strings.HasPrefix(refString, "//") { + return nil, errors.Errorf("docker: image reference %s does not start with //", refString) + } + ref, err := reference.ParseNormalizedNamed(strings.TrimPrefix(refString, "//")) + if err != nil { + return nil, err + } + ref = reference.TagNameOnly(ref) + return NewReference(ref) +} + +// NewReference returns a Docker reference for a named reference. The reference must satisfy !reference.IsNameOnly(). +func NewReference(ref reference.Named) (types.ImageReference, error) { + return newReference(ref) +} + +// newReference returns a dockerReference for a named reference. +func newReference(ref reference.Named) (dockerReference, error) { + if reference.IsNameOnly(ref) { + return dockerReference{}, errors.Errorf("Docker reference %s has neither a tag nor a digest", reference.FamiliarString(ref)) + } + // A github.com/distribution/reference value can have a tag and a digest at the same time! + // The docker/distribution API does not really support that (we can’t ask for an image with a specific + // tag and digest), so fail. This MAY be accepted in the future. + // (Even if it were supported, the semantics of policy namespaces are unclear - should we drop + // the tag or the digest first?) + _, isTagged := ref.(reference.NamedTagged) + _, isDigested := ref.(reference.Canonical) + if isTagged && isDigested { + return dockerReference{}, errors.Errorf("Docker references with both a tag and digest are currently not supported") + } + + return dockerReference{ + ref: ref, + }, nil +} + +func (ref dockerReference) Transport() types.ImageTransport { + return Transport +} + +// StringWithinTransport returns a string representation of the reference, which MUST be such that +// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. +// NOTE: The returned string is not promised to be equal to the original input to ParseReference; +// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. +// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. +func (ref dockerReference) StringWithinTransport() string { + return "//" + reference.FamiliarString(ref.ref) +} + +// DockerReference returns a Docker reference associated with this reference +// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, +// not e.g. after redirect or alias processing), or nil if unknown/not applicable. +func (ref dockerReference) DockerReference() reference.Named { + return ref.ref +} + +// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. +// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; +// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical +// (i.e. various references with exactly the same semantics should return the same configuration identity) +// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but +// not required/guaranteed that it will be a valid input to Transport().ParseReference(). +// Returns "" if configuration identities for these references are not supported. +func (ref dockerReference) PolicyConfigurationIdentity() string { + res, err := policyconfiguration.DockerReferenceIdentity(ref.ref) + if res == "" || err != nil { // Coverage: Should never happen, NewReference above should refuse values which could cause a failure. + panic(fmt.Sprintf("Internal inconsistency: policyconfiguration.DockerReferenceIdentity returned %#v, %v", res, err)) + } + return res +} + +// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search +// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed +// in order, terminating on first match, and an implicit "" is always checked at the end. +// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), +// and each following element to be a prefix of the element preceding it. +func (ref dockerReference) PolicyConfigurationNamespaces() []string { + return policyconfiguration.DockerReferenceNamespaces(ref.ref) +} + +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (ref dockerReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + return newImage(ctx, sys, ref) +} + +// NewImageSource returns a types.ImageSource for this reference. +// The caller must call .Close() on the returned ImageSource. +func (ref dockerReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { + return newImageSource(ctx, sys, ref) +} + +// NewImageDestination returns a types.ImageDestination for this reference. +// The caller must call .Close() on the returned ImageDestination. +func (ref dockerReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { + return newImageDestination(sys, ref) +} + +// DeleteImage deletes the named image from the registry, if supported. +func (ref dockerReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + return deleteImage(ctx, sys, ref) +} + +// tagOrDigest returns a tag or digest from the reference. +func (ref dockerReference) tagOrDigest() (string, error) { + if ref, ok := ref.ref.(reference.Canonical); ok { + return ref.Digest().String(), nil + } + if ref, ok := ref.ref.(reference.NamedTagged); ok { + return ref.Tag(), nil + } + // This should not happen, NewReference above refuses reference.IsNameOnly values. + return "", errors.Errorf("Internal inconsistency: Reference %s unexpectedly has neither a digest nor a tag", reference.FamiliarString(ref.ref)) +} diff --git a/vendor/github.com/containers/image/v5/docker/errors.go b/vendor/github.com/containers/image/v5/docker/errors.go new file mode 100644 index 00000000000..6f707db7dbd --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/errors.go @@ -0,0 +1,61 @@ +package docker + +import ( + "errors" + "fmt" + "net/http" + + "github.com/docker/distribution/registry/client" + perrors "github.com/pkg/errors" +) + +var ( + // ErrV1NotSupported is returned when we're trying to talk to a + // docker V1 registry. + ErrV1NotSupported = errors.New("can't talk to a V1 container registry") + // ErrTooManyRequests is returned when the status code returned is 429 + ErrTooManyRequests = errors.New("too many requests to registry") +) + +// ErrUnauthorizedForCredentials is returned when the status code returned is 401 +type ErrUnauthorizedForCredentials struct { // We only use a struct to allow a type assertion, without limiting the contents of the error otherwise. + Err error +} + +func (e ErrUnauthorizedForCredentials) Error() string { + return fmt.Sprintf("unable to retrieve auth token: invalid username/password: %s", e.Err.Error()) +} + +// httpResponseToError translates the https.Response into an error, possibly prefixing it with the supplied context. It returns +// nil if the response is not considered an error. +// NOTE: Almost all callers in this package should use registryHTTPResponseToError instead. +func httpResponseToError(res *http.Response, context string) error { + switch res.StatusCode { + case http.StatusOK: + return nil + case http.StatusTooManyRequests: + return ErrTooManyRequests + case http.StatusUnauthorized: + err := client.HandleErrorResponse(res) + return ErrUnauthorizedForCredentials{Err: err} + default: + if context != "" { + context = context + ": " + } + return perrors.Errorf("%sinvalid status code from registry %d (%s)", context, res.StatusCode, http.StatusText(res.StatusCode)) + } +} + +// registryHTTPResponseToError creates a Go error from an HTTP error response of a docker/distribution +// registry +func registryHTTPResponseToError(res *http.Response) error { + err := client.HandleErrorResponse(res) + if e, ok := err.(*client.UnexpectedHTTPResponseError); ok { + response := string(e.Response) + if len(response) > 50 { + response = response[:50] + "..." + } + err = fmt.Errorf("StatusCode: %d, %s", e.StatusCode, response) + } + return err +} diff --git a/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go b/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go new file mode 100644 index 00000000000..7e1580990fd --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go @@ -0,0 +1,200 @@ +package tarfile + +import ( + "bytes" + "context" + "encoding/json" + "io" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/iolimits" + "github.com/containers/image/v5/internal/streamdigest" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// Destination is a partial implementation of types.ImageDestination for writing to an io.Writer. +type Destination struct { + archive *Writer + repoTags []reference.NamedTagged + // Other state. + config []byte + sysCtx *types.SystemContext +} + +// NewDestination returns a tarfile.Destination adding images to the specified Writer. +func NewDestination(sys *types.SystemContext, archive *Writer, ref reference.NamedTagged) *Destination { + repoTags := []reference.NamedTagged{} + if ref != nil { + repoTags = append(repoTags, ref) + } + return &Destination{ + archive: archive, + repoTags: repoTags, + sysCtx: sys, + } +} + +// AddRepoTags adds the specified tags to the destination's repoTags. +func (d *Destination) AddRepoTags(tags []reference.NamedTagged) { + d.repoTags = append(d.repoTags, tags...) +} + +// SupportedManifestMIMETypes tells which manifest mime types the destination supports +// If an empty slice or nil it's returned, then any mime type can be tried to upload +func (d *Destination) SupportedManifestMIMETypes() []string { + return []string{ + manifest.DockerV2Schema2MediaType, // We rely on the types.Image.UpdatedImage schema conversion capabilities. + } +} + +// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. +// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. +func (d *Destination) SupportsSignatures(ctx context.Context) error { + return errors.Errorf("Storing signatures for docker tar files is not supported") +} + +// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually +// uploaded to the image destination, true otherwise. +func (d *Destination) AcceptsForeignLayerURLs() bool { + return false +} + +// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise. +func (d *Destination) MustMatchRuntimeOS() bool { + return false +} + +// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), +// and would prefer to receive an unmodified manifest instead of one modified for the destination. +// Does not make a difference if Reference().DockerReference() is nil. +func (d *Destination) IgnoresEmbeddedDockerReference() bool { + return false // N/A, we only accept schema2 images where EmbeddedDockerReferenceConflicts() is always false. +} + +// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. +func (d *Destination) HasThreadSafePutBlob() bool { + // The code _is_ actually thread-safe, but apart from computing sizes/digests of layers where + // this is unknown in advance, the actual copy is serialized by d.archive, so there probably isn’t + // much benefit from concurrency, mostly just extra CPU, memory and I/O contention. + return false +} + +// PutBlob writes contents of stream and returns data representing the result (with all data filled in). +// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. +// inputInfo.Size is the expected length of stream, if known. +// May update cache. +// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available +// to any other readers for download using the supplied digest. +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. +func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { + // Ouch, we need to stream the blob into a temporary file just to determine the size. + // When the layer is decompressed, we also have to generate the digest on uncompressed data. + if inputInfo.Size == -1 || inputInfo.Digest == "" { + logrus.Debugf("docker tarfile: input with unknown size, streaming to disk first ...") + streamCopy, cleanup, err := streamdigest.ComputeBlobInfo(d.sysCtx, stream, &inputInfo) + if err != nil { + return types.BlobInfo{}, err + } + defer cleanup() + stream = streamCopy + logrus.Debugf("... streaming done") + } + + if err := d.archive.lock(); err != nil { + return types.BlobInfo{}, err + } + defer d.archive.unlock() + + // Maybe the blob has been already sent + ok, reusedInfo, err := d.archive.tryReusingBlobLocked(inputInfo) + if err != nil { + return types.BlobInfo{}, err + } + if ok { + return reusedInfo, nil + } + + if isConfig { + buf, err := iolimits.ReadAtMost(stream, iolimits.MaxConfigBodySize) + if err != nil { + return types.BlobInfo{}, errors.Wrap(err, "reading Config file stream") + } + d.config = buf + if err := d.archive.sendFileLocked(d.archive.configPath(inputInfo.Digest), inputInfo.Size, bytes.NewReader(buf)); err != nil { + return types.BlobInfo{}, errors.Wrap(err, "writing Config file") + } + } else { + if err := d.archive.sendFileLocked(d.archive.physicalLayerPath(inputInfo.Digest), inputInfo.Size, stream); err != nil { + return types.BlobInfo{}, err + } + } + d.archive.recordBlobLocked(types.BlobInfo{Digest: inputInfo.Digest, Size: inputInfo.Size}) + return types.BlobInfo{Digest: inputInfo.Digest, Size: inputInfo.Size}, nil +} + +// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. +// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may +// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be +// reflected in the manifest that will be written. +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +// May use and/or update cache. +func (d *Destination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { + if err := d.archive.lock(); err != nil { + return false, types.BlobInfo{}, err + } + defer d.archive.unlock() + + return d.archive.tryReusingBlobLocked(info) +} + +// PutManifest writes manifest to the destination. +// The instanceDigest value is expected to always be nil, because this transport does not support manifest lists, so +// there can be no secondary manifests. +// FIXME? This should also receive a MIME type if known, to differentiate between schema versions. +// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), +// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. +func (d *Destination) PutManifest(ctx context.Context, m []byte, instanceDigest *digest.Digest) error { + if instanceDigest != nil { + return errors.New(`Manifest lists are not supported for docker tar files`) + } + // We do not bother with types.ManifestTypeRejectedError; our .SupportedManifestMIMETypes() above is already providing only one alternative, + // so the caller trying a different manifest kind would be pointless. + var man manifest.Schema2 + if err := json.Unmarshal(m, &man); err != nil { + return errors.Wrap(err, "parsing manifest") + } + if man.SchemaVersion != 2 || man.MediaType != manifest.DockerV2Schema2MediaType { + return errors.Errorf("Unsupported manifest type, need a Docker schema 2 manifest") + } + + if err := d.archive.lock(); err != nil { + return err + } + defer d.archive.unlock() + + if err := d.archive.writeLegacyMetadataLocked(man.LayersDescriptors, d.config, d.repoTags); err != nil { + return err + } + + return d.archive.ensureManifestItemLocked(man.LayersDescriptors, man.ConfigDescriptor.Digest, d.repoTags) +} + +// PutSignatures would add the given signatures to the docker tarfile (currently not supported). +// The instanceDigest value is expected to always be nil, because this transport does not support manifest lists, so +// there can be no secondary manifests. MUST be called after PutManifest (signatures reference manifest contents). +func (d *Destination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error { + if instanceDigest != nil { + return errors.Errorf(`Manifest lists are not supported for docker tar files`) + } + if len(signatures) != 0 { + return errors.Errorf("Storing signatures for docker tar files is not supported") + } + return nil +} diff --git a/vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go b/vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go new file mode 100644 index 00000000000..6164ceb66ed --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go @@ -0,0 +1,269 @@ +package tarfile + +import ( + "archive/tar" + "encoding/json" + "io" + "io/ioutil" + "os" + "path" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/iolimits" + "github.com/containers/image/v5/internal/tmpdir" + "github.com/containers/image/v5/pkg/compression" + "github.com/containers/image/v5/types" + "github.com/pkg/errors" +) + +// Reader is a ((docker save)-formatted) tar archive that allows random access to any component. +type Reader struct { + // None of the fields below are modified after the archive is created, until .Close(); + // this allows concurrent readers of the same archive. + path string // "" if the archive has already been closed. + removeOnClose bool // Remove file on close if true + Manifest []ManifestItem // Guaranteed to exist after the archive is created. +} + +// NewReaderFromFile returns a Reader for the specified path. +// The caller should call .Close() on the returned archive when done. +func NewReaderFromFile(sys *types.SystemContext, path string) (*Reader, error) { + file, err := os.Open(path) + if err != nil { + return nil, errors.Wrapf(err, "opening file %q", path) + } + defer file.Close() + + // If the file is already not compressed we can just return the file itself + // as a source. Otherwise we pass the stream to NewReaderFromStream. + stream, isCompressed, err := compression.AutoDecompress(file) + if err != nil { + return nil, errors.Wrapf(err, "detecting compression for file %q", path) + } + defer stream.Close() + if !isCompressed { + return newReader(path, false) + } + return NewReaderFromStream(sys, stream) +} + +// NewReaderFromStream returns a Reader for the specified inputStream, +// which can be either compressed or uncompressed. The caller can close the +// inputStream immediately after NewReaderFromFile returns. +// The caller should call .Close() on the returned archive when done. +func NewReaderFromStream(sys *types.SystemContext, inputStream io.Reader) (*Reader, error) { + // Save inputStream to a temporary file + tarCopyFile, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(sys), "docker-tar") + if err != nil { + return nil, errors.Wrap(err, "creating temporary file") + } + defer tarCopyFile.Close() + + succeeded := false + defer func() { + if !succeeded { + os.Remove(tarCopyFile.Name()) + } + }() + + // In order to be compatible with docker-load, we need to support + // auto-decompression (it's also a nice quality-of-life thing to avoid + // giving users really confusing "invalid tar header" errors). + uncompressedStream, _, err := compression.AutoDecompress(inputStream) + if err != nil { + return nil, errors.Wrap(err, "auto-decompressing input") + } + defer uncompressedStream.Close() + + // Copy the plain archive to the temporary file. + // + // TODO: This can take quite some time, and should ideally be cancellable + // using a context.Context. + if _, err := io.Copy(tarCopyFile, uncompressedStream); err != nil { + return nil, errors.Wrapf(err, "copying contents to temporary file %q", tarCopyFile.Name()) + } + succeeded = true + + return newReader(tarCopyFile.Name(), true) +} + +// newReader creates a Reader for the specified path and removeOnClose flag. +// The caller should call .Close() on the returned archive when done. +func newReader(path string, removeOnClose bool) (*Reader, error) { + // This is a valid enough archive, except Manifest is not yet filled. + r := Reader{ + path: path, + removeOnClose: removeOnClose, + } + succeeded := false + defer func() { + if !succeeded { + r.Close() + } + }() + + // We initialize Manifest immediately when constructing the Reader instead + // of later on-demand because every caller will need the data, and because doing it now + // removes the need to synchronize the access/creation of the data if the archive is later + // used from multiple goroutines to access different images. + + // FIXME? Do we need to deal with the legacy format? + bytes, err := r.readTarComponent(manifestFileName, iolimits.MaxTarFileManifestSize) + if err != nil { + return nil, err + } + if err := json.Unmarshal(bytes, &r.Manifest); err != nil { + return nil, errors.Wrap(err, "decoding tar manifest.json") + } + + succeeded = true + return &r, nil +} + +// Close removes resources associated with an initialized Reader, if any. +func (r *Reader) Close() error { + path := r.path + r.path = "" // Mark the archive as closed + if r.removeOnClose { + return os.Remove(path) + } + return nil +} + +// ChooseManifestItem selects a manifest item from r.Manifest matching (ref, sourceIndex), one or +// both of which should be (nil, -1). +// On success, it returns the manifest item and an index of the matching tag, if a tag was used +// for matching; the index is -1 if a tag was not used. +func (r *Reader) ChooseManifestItem(ref reference.NamedTagged, sourceIndex int) (*ManifestItem, int, error) { + switch { + case ref != nil && sourceIndex != -1: + return nil, -1, errors.Errorf("Internal error: Cannot have both ref %s and source index @%d", + ref.String(), sourceIndex) + + case ref != nil: + refString := ref.String() + for i := range r.Manifest { + for tagIndex, tag := range r.Manifest[i].RepoTags { + parsedTag, err := reference.ParseNormalizedNamed(tag) + if err != nil { + return nil, -1, errors.Wrapf(err, "Invalid tag %#v in manifest.json item @%d", tag, i) + } + if parsedTag.String() == refString { + return &r.Manifest[i], tagIndex, nil + } + } + } + return nil, -1, errors.Errorf("Tag %#v not found", refString) + + case sourceIndex != -1: + if sourceIndex >= len(r.Manifest) { + return nil, -1, errors.Errorf("Invalid source index @%d, only %d manifest items available", + sourceIndex, len(r.Manifest)) + } + return &r.Manifest[sourceIndex], -1, nil + + default: + if len(r.Manifest) != 1 { + return nil, -1, errors.Errorf("Unexpected tar manifest.json: expected 1 item, got %d", len(r.Manifest)) + } + return &r.Manifest[0], -1, nil + } +} + +// tarReadCloser is a way to close the backing file of a tar.Reader when the user no longer needs the tar component. +type tarReadCloser struct { + *tar.Reader + backingFile *os.File +} + +func (t *tarReadCloser) Close() error { + return t.backingFile.Close() +} + +// openTarComponent returns a ReadCloser for the specific file within the archive. +// This is linear scan; we assume that the tar file will have a fairly small amount of files (~layers), +// and that filesystem caching will make the repeated seeking over the (uncompressed) tarPath cheap enough. +// It is safe to call this method from multiple goroutines simultaneously. +// The caller should call .Close() on the returned stream. +func (r *Reader) openTarComponent(componentPath string) (io.ReadCloser, error) { + // This is only a sanity check; if anyone did concurrently close ra, this access is technically + // racy against the write in .Close(). + if r.path == "" { + return nil, errors.New("Internal error: trying to read an already closed tarfile.Reader") + } + + f, err := os.Open(r.path) + if err != nil { + return nil, err + } + succeeded := false + defer func() { + if !succeeded { + f.Close() + } + }() + + tarReader, header, err := findTarComponent(f, componentPath) + if err != nil { + return nil, err + } + if header == nil { + return nil, os.ErrNotExist + } + if header.FileInfo().Mode()&os.ModeType == os.ModeSymlink { // FIXME: untested + // We follow only one symlink; so no loops are possible. + if _, err := f.Seek(0, io.SeekStart); err != nil { + return nil, err + } + // The new path could easily point "outside" the archive, but we only compare it to existing tar headers without extracting the archive, + // so we don't care. + tarReader, header, err = findTarComponent(f, path.Join(path.Dir(componentPath), header.Linkname)) + if err != nil { + return nil, err + } + if header == nil { + return nil, os.ErrNotExist + } + } + + if !header.FileInfo().Mode().IsRegular() { + return nil, errors.Errorf("Error reading tar archive component %s: not a regular file", header.Name) + } + succeeded = true + return &tarReadCloser{Reader: tarReader, backingFile: f}, nil +} + +// findTarComponent returns a header and a reader matching componentPath within inputFile, +// or (nil, nil, nil) if not found. +func findTarComponent(inputFile io.Reader, componentPath string) (*tar.Reader, *tar.Header, error) { + t := tar.NewReader(inputFile) + componentPath = path.Clean(componentPath) + for { + h, err := t.Next() + if err == io.EOF { + break + } + if err != nil { + return nil, nil, err + } + if path.Clean(h.Name) == componentPath { + return t, h, nil + } + } + return nil, nil, nil +} + +// readTarComponent returns full contents of componentPath. +// It is safe to call this method from multiple goroutines simultaneously. +func (r *Reader) readTarComponent(path string, limit int) ([]byte, error) { + file, err := r.openTarComponent(path) + if err != nil { + return nil, errors.Wrapf(err, "loading tar component %s", path) + } + defer file.Close() + bytes, err := iolimits.ReadAtMost(file, limit) + if err != nil { + return nil, err + } + return bytes, nil +} diff --git a/vendor/github.com/containers/image/v5/docker/internal/tarfile/src.go b/vendor/github.com/containers/image/v5/docker/internal/tarfile/src.go new file mode 100644 index 00000000000..b8d84d2452a --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/internal/tarfile/src.go @@ -0,0 +1,331 @@ +package tarfile + +import ( + "archive/tar" + "bytes" + "context" + "encoding/json" + "io" + "io/ioutil" + "os" + "path" + "sync" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/iolimits" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/compression" + "github.com/containers/image/v5/types" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +// Source is a partial implementation of types.ImageSource for reading from tarPath. +type Source struct { + archive *Reader + closeArchive bool // .Close() the archive when the source is closed. + // If ref is nil and sourceIndex is -1, indicates the only image in the archive. + ref reference.NamedTagged // May be nil + sourceIndex int // May be -1 + // The following data is only available after ensureCachedDataIsPresent() succeeds + tarManifest *ManifestItem // nil if not available yet. + configBytes []byte + configDigest digest.Digest + orderedDiffIDList []digest.Digest + knownLayers map[digest.Digest]*layerInfo + // Other state + generatedManifest []byte // Private cache for GetManifest(), nil if not set yet. + cacheDataLock sync.Once // Private state for ensureCachedDataIsPresent to make it concurrency-safe + cacheDataResult error // Private state for ensureCachedDataIsPresent +} + +type layerInfo struct { + path string + size int64 +} + +// NewSource returns a tarfile.Source for an image in the specified archive matching ref +// and sourceIndex (or the only image if they are (nil, -1)). +// The archive will be closed if closeArchive +func NewSource(archive *Reader, closeArchive bool, ref reference.NamedTagged, sourceIndex int) *Source { + return &Source{ + archive: archive, + closeArchive: closeArchive, + ref: ref, + sourceIndex: sourceIndex, + } +} + +// ensureCachedDataIsPresent loads data necessary for any of the public accessors. +// It is safe to call this from multi-threaded code. +func (s *Source) ensureCachedDataIsPresent() error { + s.cacheDataLock.Do(func() { + s.cacheDataResult = s.ensureCachedDataIsPresentPrivate() + }) + return s.cacheDataResult +} + +// ensureCachedDataIsPresentPrivate is a private implementation detail of ensureCachedDataIsPresent. +// Call ensureCachedDataIsPresent instead. +func (s *Source) ensureCachedDataIsPresentPrivate() error { + tarManifest, _, err := s.archive.ChooseManifestItem(s.ref, s.sourceIndex) + if err != nil { + return err + } + + // Read and parse config. + configBytes, err := s.archive.readTarComponent(tarManifest.Config, iolimits.MaxConfigBodySize) + if err != nil { + return err + } + var parsedConfig manifest.Schema2Image // There's a lot of info there, but we only really care about layer DiffIDs. + if err := json.Unmarshal(configBytes, &parsedConfig); err != nil { + return errors.Wrapf(err, "decoding tar config %s", tarManifest.Config) + } + if parsedConfig.RootFS == nil { + return errors.Errorf("Invalid image config (rootFS is not set): %s", tarManifest.Config) + } + + knownLayers, err := s.prepareLayerData(tarManifest, &parsedConfig) + if err != nil { + return err + } + + // Success; commit. + s.tarManifest = tarManifest + s.configBytes = configBytes + s.configDigest = digest.FromBytes(configBytes) + s.orderedDiffIDList = parsedConfig.RootFS.DiffIDs + s.knownLayers = knownLayers + return nil +} + +// Close removes resources associated with an initialized Source, if any. +func (s *Source) Close() error { + if s.closeArchive { + return s.archive.Close() + } + return nil +} + +// TarManifest returns contents of manifest.json +func (s *Source) TarManifest() []ManifestItem { + return s.archive.Manifest +} + +func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manifest.Schema2Image) (map[digest.Digest]*layerInfo, error) { + // Collect layer data available in manifest and config. + if len(tarManifest.Layers) != len(parsedConfig.RootFS.DiffIDs) { + return nil, errors.Errorf("Inconsistent layer count: %d in manifest, %d in config", len(tarManifest.Layers), len(parsedConfig.RootFS.DiffIDs)) + } + knownLayers := map[digest.Digest]*layerInfo{} + unknownLayerSizes := map[string]*layerInfo{} // Points into knownLayers, a "to do list" of items with unknown sizes. + for i, diffID := range parsedConfig.RootFS.DiffIDs { + if _, ok := knownLayers[diffID]; ok { + // Apparently it really can happen that a single image contains the same layer diff more than once. + // In that case, the diffID validation ensures that both layers truly are the same, and it should not matter + // which of the tarManifest.Layers paths is used; (docker save) actually makes the duplicates symlinks to the original. + continue + } + layerPath := path.Clean(tarManifest.Layers[i]) + if _, ok := unknownLayerSizes[layerPath]; ok { + return nil, errors.Errorf("Layer tarfile %s used for two different DiffID values", layerPath) + } + li := &layerInfo{ // A new element in each iteration + path: layerPath, + size: -1, + } + knownLayers[diffID] = li + unknownLayerSizes[layerPath] = li + } + + // Scan the tar file to collect layer sizes. + file, err := os.Open(s.archive.path) + if err != nil { + return nil, err + } + defer file.Close() + t := tar.NewReader(file) + for { + h, err := t.Next() + if err == io.EOF { + break + } + if err != nil { + return nil, err + } + layerPath := path.Clean(h.Name) + // FIXME: Cache this data across images in Reader. + if li, ok := unknownLayerSizes[layerPath]; ok { + // Since GetBlob will decompress layers that are compressed we need + // to do the decompression here as well, otherwise we will + // incorrectly report the size. Pretty critical, since tools like + // umoci always compress layer blobs. Obviously we only bother with + // the slower method of checking if it's compressed. + uncompressedStream, isCompressed, err := compression.AutoDecompress(t) + if err != nil { + return nil, errors.Wrapf(err, "auto-decompressing %s to determine its size", layerPath) + } + defer uncompressedStream.Close() + + uncompressedSize := h.Size + if isCompressed { + uncompressedSize, err = io.Copy(ioutil.Discard, uncompressedStream) + if err != nil { + return nil, errors.Wrapf(err, "reading %s to find its size", layerPath) + } + } + li.size = uncompressedSize + delete(unknownLayerSizes, layerPath) + } + } + if len(unknownLayerSizes) != 0 { + return nil, errors.Errorf("Some layer tarfiles are missing in the tarball") // This could do with a better error reporting, if this ever happened in practice. + } + + return knownLayers, nil +} + +// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). +// It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); +// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). +// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil, +// as the primary manifest can not be a list, so there can be no secondary instances. +func (s *Source) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { + if instanceDigest != nil { + // How did we even get here? GetManifest(ctx, nil) has returned a manifest.DockerV2Schema2MediaType. + return nil, "", errors.New(`Manifest lists are not supported by "docker-daemon:"`) + } + if s.generatedManifest == nil { + if err := s.ensureCachedDataIsPresent(); err != nil { + return nil, "", err + } + m := manifest.Schema2{ + SchemaVersion: 2, + MediaType: manifest.DockerV2Schema2MediaType, + ConfigDescriptor: manifest.Schema2Descriptor{ + MediaType: manifest.DockerV2Schema2ConfigMediaType, + Size: int64(len(s.configBytes)), + Digest: s.configDigest, + }, + LayersDescriptors: []manifest.Schema2Descriptor{}, + } + for _, diffID := range s.orderedDiffIDList { + li, ok := s.knownLayers[diffID] + if !ok { + return nil, "", errors.Errorf("Internal inconsistency: Information about layer %s missing", diffID) + } + m.LayersDescriptors = append(m.LayersDescriptors, manifest.Schema2Descriptor{ + Digest: diffID, // diffID is a digest of the uncompressed tarball + MediaType: manifest.DockerV2Schema2LayerMediaType, + Size: li.size, + }) + } + manifestBytes, err := json.Marshal(&m) + if err != nil { + return nil, "", err + } + s.generatedManifest = manifestBytes + } + return s.generatedManifest, manifest.DockerV2Schema2MediaType, nil +} + +// uncompressedReadCloser is an io.ReadCloser that closes both the uncompressed stream and the underlying input. +type uncompressedReadCloser struct { + io.Reader + underlyingCloser func() error + uncompressedCloser func() error +} + +func (r uncompressedReadCloser) Close() error { + var res error + if err := r.uncompressedCloser(); err != nil { + res = err + } + if err := r.underlyingCloser(); err != nil && res == nil { + res = err + } + return res +} + +// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. +func (s *Source) HasThreadSafeGetBlob() bool { + return true +} + +// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). +// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. +// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. +func (s *Source) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { + if err := s.ensureCachedDataIsPresent(); err != nil { + return nil, 0, err + } + + if info.Digest == s.configDigest { // FIXME? Implement a more general algorithm matching instead of assuming sha256. + return ioutil.NopCloser(bytes.NewReader(s.configBytes)), int64(len(s.configBytes)), nil + } + + if li, ok := s.knownLayers[info.Digest]; ok { // diffID is a digest of the uncompressed tarball, + underlyingStream, err := s.archive.openTarComponent(li.path) + if err != nil { + return nil, 0, err + } + closeUnderlyingStream := true + defer func() { + if closeUnderlyingStream { + underlyingStream.Close() + } + }() + + // In order to handle the fact that digests != diffIDs (and thus that a + // caller which is trying to verify the blob will run into problems), + // we need to decompress blobs. This is a bit ugly, but it's a + // consequence of making everything addressable by their DiffID rather + // than by their digest... + // + // In particular, because the v2s2 manifest being generated uses + // DiffIDs, any caller of GetBlob is going to be asking for DiffIDs of + // layers not their _actual_ digest. The result is that copy/... will + // be verifying a "digest" which is not the actual layer's digest (but + // is instead the DiffID). + + uncompressedStream, _, err := compression.AutoDecompress(underlyingStream) + if err != nil { + return nil, 0, errors.Wrapf(err, "auto-decompressing blob %s", info.Digest) + } + + newStream := uncompressedReadCloser{ + Reader: uncompressedStream, + underlyingCloser: underlyingStream.Close, + uncompressedCloser: uncompressedStream.Close, + } + closeUnderlyingStream = false + + return newStream, li.size, nil + } + + return nil, 0, errors.Errorf("Unknown blob %s", info.Digest) +} + +// GetSignatures returns the image's signatures. It may use a remote (= slow) service. +// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil, +// as there can be no secondary manifests. +func (s *Source) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + if instanceDigest != nil { + // How did we even get here? GetManifest(ctx, nil) has returned a manifest.DockerV2Schema2MediaType. + return nil, errors.Errorf(`Manifest lists are not supported by "docker-daemon:"`) + } + return [][]byte{}, nil +} + +// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer +// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob() +// to read the image's layers. +// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil, +// as the primary manifest can not be a list, so there can be no secondary manifests. +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (s *Source) LayerInfosForCopy(context.Context, *digest.Digest) ([]types.BlobInfo, error) { + return nil, nil +} diff --git a/vendor/github.com/containers/image/v5/docker/internal/tarfile/types.go b/vendor/github.com/containers/image/v5/docker/internal/tarfile/types.go new file mode 100644 index 00000000000..6e6ccd2d808 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/internal/tarfile/types.go @@ -0,0 +1,28 @@ +package tarfile + +import ( + "github.com/containers/image/v5/manifest" + "github.com/opencontainers/go-digest" +) + +// Various data structures. + +// Based on github.com/docker/docker/image/tarexport/tarexport.go +const ( + manifestFileName = "manifest.json" + legacyLayerFileName = "layer.tar" + legacyConfigFileName = "json" + legacyVersionFileName = "VERSION" + legacyRepositoriesFileName = "repositories" +) + +// ManifestItem is an element of the array stored in the top-level manifest.json file. +type ManifestItem struct { // NOTE: This is visible as docker/tarfile.ManifestItem, and a part of the stable API. + Config string + RepoTags []string + Layers []string + Parent imageID `json:",omitempty"` + LayerSources map[digest.Digest]manifest.Schema2Descriptor `json:",omitempty"` +} + +type imageID string diff --git a/vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go b/vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go new file mode 100644 index 00000000000..255f0d354ed --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go @@ -0,0 +1,381 @@ +package tarfile + +import ( + "archive/tar" + "bytes" + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" + "sync" + "time" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// Writer allows creating a (docker save)-formatted tar archive containing one or more images. +type Writer struct { + mutex sync.Mutex + // ALL of the following members can only be accessed with the mutex held. + // Use Writer.lock() to obtain the mutex. + writer io.Writer + tar *tar.Writer // nil if the Writer has already been closed. + // Other state. + blobs map[digest.Digest]types.BlobInfo // list of already-sent blobs + repositories map[string]map[string]string + legacyLayers map[string]struct{} // A set of IDs of legacy layers that have been already sent. + manifest []ManifestItem + manifestByConfig map[digest.Digest]int // A map from config digest to an entry index in manifest above. +} + +// NewWriter returns a Writer for the specified io.Writer. +// The caller must eventually call .Close() on the returned object to create a valid archive. +func NewWriter(dest io.Writer) *Writer { + return &Writer{ + writer: dest, + tar: tar.NewWriter(dest), + blobs: make(map[digest.Digest]types.BlobInfo), + repositories: map[string]map[string]string{}, + legacyLayers: map[string]struct{}{}, + manifestByConfig: map[digest.Digest]int{}, + } +} + +// lock does some sanity checks and locks the Writer. +// If this function succeeds, the caller must call w.unlock. +// Do not use Writer.mutex directly. +func (w *Writer) lock() error { + w.mutex.Lock() + if w.tar == nil { + w.mutex.Unlock() + return errors.New("Internal error: trying to use an already closed tarfile.Writer") + } + return nil +} + +// unlock releases the lock obtained by Writer.lock +// Do not use Writer.mutex directly. +func (w *Writer) unlock() { + w.mutex.Unlock() +} + +// tryReusingBlobLocked checks whether the transport already contains, a blob, and if so, returns its metadata. +// info.Digest must not be empty. +// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size. +// If the transport can not reuse the requested blob, tryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +// The caller must have locked the Writer. +func (w *Writer) tryReusingBlobLocked(info types.BlobInfo) (bool, types.BlobInfo, error) { + if info.Digest == "" { + return false, types.BlobInfo{}, errors.Errorf("Can not check for a blob with unknown digest") + } + if blob, ok := w.blobs[info.Digest]; ok { + return true, types.BlobInfo{Digest: info.Digest, Size: blob.Size}, nil + } + return false, types.BlobInfo{}, nil +} + +// recordBlob records metadata of a recorded blob, which must contain at least a digest and size. +// The caller must have locked the Writer. +func (w *Writer) recordBlobLocked(info types.BlobInfo) { + w.blobs[info.Digest] = info +} + +// ensureSingleLegacyLayerLocked writes legacy VERSION and configuration files for a single layer +// The caller must have locked the Writer. +func (w *Writer) ensureSingleLegacyLayerLocked(layerID string, layerDigest digest.Digest, configBytes []byte) error { + if _, ok := w.legacyLayers[layerID]; !ok { + // Create a symlink for the legacy format, where there is one subdirectory per layer ("image"). + // See also the comment in physicalLayerPath. + physicalLayerPath := w.physicalLayerPath(layerDigest) + if err := w.sendSymlinkLocked(filepath.Join(layerID, legacyLayerFileName), filepath.Join("..", physicalLayerPath)); err != nil { + return errors.Wrap(err, "creating layer symbolic link") + } + + b := []byte("1.0") + if err := w.sendBytesLocked(filepath.Join(layerID, legacyVersionFileName), b); err != nil { + return errors.Wrap(err, "writing VERSION file") + } + + if err := w.sendBytesLocked(filepath.Join(layerID, legacyConfigFileName), configBytes); err != nil { + return errors.Wrap(err, "writing config json file") + } + + w.legacyLayers[layerID] = struct{}{} + } + return nil +} + +// writeLegacyMetadataLocked writes legacy layer metadata and records tags for a single image. +func (w *Writer) writeLegacyMetadataLocked(layerDescriptors []manifest.Schema2Descriptor, configBytes []byte, repoTags []reference.NamedTagged) error { + var chainID digest.Digest + lastLayerID := "" + for i, l := range layerDescriptors { + // The legacy format requires a config file per layer + layerConfig := make(map[string]interface{}) + + // The root layer doesn't have any parent + if lastLayerID != "" { + layerConfig["parent"] = lastLayerID + } + // The top layer configuration file is generated by using subpart of the image configuration + if i == len(layerDescriptors)-1 { + var config map[string]*json.RawMessage + err := json.Unmarshal(configBytes, &config) + if err != nil { + return errors.Wrap(err, "unmarshaling config") + } + for _, attr := range [7]string{"architecture", "config", "container", "container_config", "created", "docker_version", "os"} { + layerConfig[attr] = config[attr] + } + } + + // This chainID value matches the computation in docker/docker/layer.CreateChainID … + if chainID == "" { + chainID = l.Digest + } else { + chainID = digest.Canonical.FromString(chainID.String() + " " + l.Digest.String()) + } + // … but note that the image ID does not _exactly_ match docker/docker/image/v1.CreateID, primarily because + // we create the image configs differently in details. At least recent versions allocate new IDs on load, + // so this is fine as long as the IDs we use are unique / cannot loop. + // + // For intermediate images, we could just use the chainID as an image ID, but using a digest of ~the created + // config makes sure that everything uses the same “namespace”; a bit less efficient but clearer. + // + // Temporarily add the chainID to the config, only for the purpose of generating the image ID. + layerConfig["layer_id"] = chainID + b, err := json.Marshal(layerConfig) // Note that layerConfig["id"] is not set yet at this point. + if err != nil { + return errors.Wrap(err, "marshaling layer config") + } + delete(layerConfig, "layer_id") + layerID := digest.Canonical.FromBytes(b).Hex() + layerConfig["id"] = layerID + + configBytes, err := json.Marshal(layerConfig) + if err != nil { + return errors.Wrap(err, "marshaling layer config") + } + + if err := w.ensureSingleLegacyLayerLocked(layerID, l.Digest, configBytes); err != nil { + return err + } + + lastLayerID = layerID + } + + if lastLayerID != "" { + for _, repoTag := range repoTags { + if val, ok := w.repositories[repoTag.Name()]; ok { + val[repoTag.Tag()] = lastLayerID + } else { + w.repositories[repoTag.Name()] = map[string]string{repoTag.Tag(): lastLayerID} + } + } + } + return nil +} + +// checkManifestItemsMatch checks that a and b describe the same image, +// and returns an error if that’s not the case (which should never happen). +func checkManifestItemsMatch(a, b *ManifestItem) error { + if a.Config != b.Config { + return fmt.Errorf("Internal error: Trying to reuse ManifestItem values with configs %#v vs. %#v", a.Config, b.Config) + } + if len(a.Layers) != len(b.Layers) { + return fmt.Errorf("Internal error: Trying to reuse ManifestItem values with layers %#v vs. %#v", a.Layers, b.Layers) + } + for i := range a.Layers { + if a.Layers[i] != b.Layers[i] { + return fmt.Errorf("Internal error: Trying to reuse ManifestItem values with layers[i] %#v vs. %#v", a.Layers[i], b.Layers[i]) + } + } + // Ignore RepoTags, that will be built later. + // Ignore Parent and LayerSources, which we don’t set to anything meaningful. + return nil +} + +// ensureManifestItemLocked ensures that there is a manifest item pointing to (layerDescriptors, configDigest) with repoTags +// The caller must have locked the Writer. +func (w *Writer) ensureManifestItemLocked(layerDescriptors []manifest.Schema2Descriptor, configDigest digest.Digest, repoTags []reference.NamedTagged) error { + layerPaths := []string{} + for _, l := range layerDescriptors { + layerPaths = append(layerPaths, w.physicalLayerPath(l.Digest)) + } + + var item *ManifestItem + newItem := ManifestItem{ + Config: w.configPath(configDigest), + RepoTags: []string{}, + Layers: layerPaths, + Parent: "", // We don’t have this information + LayerSources: nil, + } + if i, ok := w.manifestByConfig[configDigest]; ok { + item = &w.manifest[i] + if err := checkManifestItemsMatch(item, &newItem); err != nil { + return err + } + } else { + i := len(w.manifest) + w.manifestByConfig[configDigest] = i + w.manifest = append(w.manifest, newItem) + item = &w.manifest[i] + } + + knownRepoTags := map[string]struct{}{} + for _, repoTag := range item.RepoTags { + knownRepoTags[repoTag] = struct{}{} + } + for _, tag := range repoTags { + // For github.com/docker/docker consumers, this works just as well as + // refString := ref.String() + // because when reading the RepoTags strings, github.com/docker/docker/reference + // normalizes both of them to the same value. + // + // Doing it this way to include the normalized-out `docker.io[/library]` does make + // a difference for github.com/projectatomic/docker consumers, with the + // “Add --add-registry and --block-registry options to docker daemon” patch. + // These consumers treat reference strings which include a hostname and reference + // strings without a hostname differently. + // + // Using the host name here is more explicit about the intent, and it has the same + // effect as (docker pull) in projectatomic/docker, which tags the result using + // a hostname-qualified reference. + // See https://github.com/containers/image/issues/72 for a more detailed + // analysis and explanation. + refString := fmt.Sprintf("%s:%s", tag.Name(), tag.Tag()) + + if _, ok := knownRepoTags[refString]; !ok { + item.RepoTags = append(item.RepoTags, refString) + knownRepoTags[refString] = struct{}{} + } + } + + return nil +} + +// Close writes all outstanding data about images to the archive, and finishes writing data +// to the underlying io.Writer. +// No more images can be added after this is called. +func (w *Writer) Close() error { + if err := w.lock(); err != nil { + return err + } + defer w.unlock() + + b, err := json.Marshal(&w.manifest) + if err != nil { + return err + } + if err := w.sendBytesLocked(manifestFileName, b); err != nil { + return err + } + + b, err = json.Marshal(w.repositories) + if err != nil { + return errors.Wrap(err, "marshaling repositories") + } + if err := w.sendBytesLocked(legacyRepositoriesFileName, b); err != nil { + return errors.Wrap(err, "writing config json file") + } + + if err := w.tar.Close(); err != nil { + return err + } + w.tar = nil // Mark the Writer as closed. + return nil +} + +// configPath returns a path we choose for storing a config with the specified digest. +// NOTE: This is an internal implementation detail, not a format property, and can change +// any time. +func (w *Writer) configPath(configDigest digest.Digest) string { + return configDigest.Hex() + ".json" +} + +// physicalLayerPath returns a path we choose for storing a layer with the specified digest +// (the actual path, i.e. a regular file, not a symlink that may be used in the legacy format). +// NOTE: This is an internal implementation detail, not a format property, and can change +// any time. +func (w *Writer) physicalLayerPath(layerDigest digest.Digest) string { + // Note that this can't be e.g. filepath.Join(l.Digest.Hex(), legacyLayerFileName); due to the way + // writeLegacyMetadata constructs layer IDs differently from inputinfo.Digest values (as described + // inside it), most of the layers would end up in subdirectories alone without any metadata; (docker load) + // tries to load every subdirectory as an image and fails if the config is missing. So, keep the layers + // in the root of the tarball. + return layerDigest.Hex() + ".tar" +} + +type tarFI struct { + path string + size int64 + isSymlink bool +} + +func (t *tarFI) Name() string { + return t.path +} +func (t *tarFI) Size() int64 { + return t.size +} +func (t *tarFI) Mode() os.FileMode { + if t.isSymlink { + return os.ModeSymlink + } + return 0444 +} +func (t *tarFI) ModTime() time.Time { + return time.Unix(0, 0) +} +func (t *tarFI) IsDir() bool { + return false +} +func (t *tarFI) Sys() interface{} { + return nil +} + +// sendSymlinkLocked sends a symlink into the tar stream. +// The caller must have locked the Writer. +func (w *Writer) sendSymlinkLocked(path string, target string) error { + hdr, err := tar.FileInfoHeader(&tarFI{path: path, size: 0, isSymlink: true}, target) + if err != nil { + return nil + } + logrus.Debugf("Sending as tar link %s -> %s", path, target) + return w.tar.WriteHeader(hdr) +} + +// sendBytesLocked sends a path into the tar stream. +// The caller must have locked the Writer. +func (w *Writer) sendBytesLocked(path string, b []byte) error { + return w.sendFileLocked(path, int64(len(b)), bytes.NewReader(b)) +} + +// sendFileLocked sends a file into the tar stream. +// The caller must have locked the Writer. +func (w *Writer) sendFileLocked(path string, expectedSize int64, stream io.Reader) error { + hdr, err := tar.FileInfoHeader(&tarFI{path: path, size: expectedSize}, "") + if err != nil { + return nil + } + logrus.Debugf("Sending as tar file %s", path) + if err := w.tar.WriteHeader(hdr); err != nil { + return err + } + // TODO: This can take quite some time, and should ideally be cancellable using a context.Context. + size, err := io.Copy(w.tar, stream) + if err != nil { + return err + } + if size != expectedSize { + return errors.Errorf("Size mismatch when copying %s, expected %d, got %d", path, expectedSize, size) + } + return nil +} diff --git a/vendor/github.com/containers/image/v5/docker/lookaside.go b/vendor/github.com/containers/image/v5/docker/lookaside.go new file mode 100644 index 00000000000..515e59327d3 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/lookaside.go @@ -0,0 +1,236 @@ +package docker + +import ( + "fmt" + "io/ioutil" + "net/url" + "os" + "path" + "path/filepath" + "strings" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/rootless" + "github.com/containers/image/v5/types" + "github.com/containers/storage/pkg/homedir" + "github.com/ghodss/yaml" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// systemRegistriesDirPath is the path to registries.d, used for locating lookaside Docker signature storage. +// You can override this at build time with +// -ldflags '-X github.com/containers/image/v5/docker.systemRegistriesDirPath=$your_path' +var systemRegistriesDirPath = builtinRegistriesDirPath + +// builtinRegistriesDirPath is the path to registries.d. +// DO NOT change this, instead see systemRegistriesDirPath above. +const builtinRegistriesDirPath = "/etc/containers/registries.d" + +// userRegistriesDirPath is the path to the per user registries.d. +var userRegistriesDir = filepath.FromSlash(".config/containers/registries.d") + +// defaultUserDockerDir is the default sigstore directory for unprivileged user +var defaultUserDockerDir = filepath.FromSlash(".local/share/containers/sigstore") + +// defaultDockerDir is the default sigstore directory for root +var defaultDockerDir = "/var/lib/containers/sigstore" + +// registryConfiguration is one of the files in registriesDirPath configuring lookaside locations, or the result of merging them all. +// NOTE: Keep this in sync with docs/registries.d.md! +type registryConfiguration struct { + DefaultDocker *registryNamespace `json:"default-docker"` + // The key is a namespace, using fully-expanded Docker reference format or parent namespaces (per dockerReference.PolicyConfiguration*), + Docker map[string]registryNamespace `json:"docker"` +} + +// registryNamespace defines lookaside locations for a single namespace. +type registryNamespace struct { + SigStore string `json:"sigstore"` // For reading, and if SigStoreStaging is not present, for writing. + SigStoreStaging string `json:"sigstore-staging"` // For writing only. +} + +// signatureStorageBase is an "opaque" type representing a lookaside Docker signature storage. +// Users outside of this file should use SignatureStorageBaseURL and signatureStorageURL below. +type signatureStorageBase *url.URL + +// SignatureStorageBaseURL reads configuration to find an appropriate signature storage URL for ref, for write access if “write”. +// the usage of the BaseURL is defined under docker/distribution registries—separate storage of docs/signature-protocols.md +// Warning: This function only exposes configuration in registries.d; +// just because this function returns an URL does not mean that the URL will be used by c/image/docker (e.g. if the registry natively supports X-R-S-S). +func SignatureStorageBaseURL(sys *types.SystemContext, ref types.ImageReference, write bool) (*url.URL, error) { + dr, ok := ref.(dockerReference) + if !ok { + return nil, errors.Errorf("ref must be a dockerReference") + } + // FIXME? Loading and parsing the config could be cached across calls. + dirPath := registriesDirPath(sys) + logrus.Debugf(`Using registries.d directory %s for sigstore configuration`, dirPath) + config, err := loadAndMergeConfig(dirPath) + if err != nil { + return nil, err + } + + topLevel := config.signatureTopLevel(dr, write) + var url *url.URL + if topLevel != "" { + url, err = url.Parse(topLevel) + if err != nil { + return nil, errors.Wrapf(err, "Invalid signature storage URL %s", topLevel) + } + } else { + // returns default directory if no sigstore specified in configuration file + url = builtinDefaultSignatureStorageDir(rootless.GetRootlessEUID()) + logrus.Debugf(" No signature storage configuration found for %s, using built-in default %s", dr.PolicyConfigurationIdentity(), url.String()) + } + // NOTE: Keep this in sync with docs/signature-protocols.md! + // FIXME? Restrict to explicitly supported schemes? + repo := reference.Path(dr.ref) // Note that this is without a tag or digest. + if path.Clean(repo) != repo { // Coverage: This should not be reachable because /./ and /../ components are not valid in docker references + return nil, errors.Errorf("Unexpected path elements in Docker reference %s for signature storage", dr.ref.String()) + } + url.Path = url.Path + "/" + repo + return url, nil +} + +// registriesDirPath returns a path to registries.d +func registriesDirPath(sys *types.SystemContext) string { + return registriesDirPathWithHomeDir(sys, homedir.Get()) +} + +// registriesDirPathWithHomeDir is an internal implementation detail of registriesDirPath, +// it exists only to allow testing it with an artificial home directory. +func registriesDirPathWithHomeDir(sys *types.SystemContext, homeDir string) string { + if sys != nil && sys.RegistriesDirPath != "" { + return sys.RegistriesDirPath + } + userRegistriesDirPath := filepath.Join(homeDir, userRegistriesDir) + if _, err := os.Stat(userRegistriesDirPath); err == nil { + return userRegistriesDirPath + } + if sys != nil && sys.RootForImplicitAbsolutePaths != "" { + return filepath.Join(sys.RootForImplicitAbsolutePaths, systemRegistriesDirPath) + } + + return systemRegistriesDirPath +} + +// builtinDefaultSignatureStorageDir returns default signature storage URL as per euid +func builtinDefaultSignatureStorageDir(euid int) *url.URL { + if euid != 0 { + return &url.URL{Scheme: "file", Path: filepath.Join(homedir.Get(), defaultUserDockerDir)} + } + return &url.URL{Scheme: "file", Path: defaultDockerDir} +} + +// loadAndMergeConfig loads configuration files in dirPath +func loadAndMergeConfig(dirPath string) (*registryConfiguration, error) { + mergedConfig := registryConfiguration{Docker: map[string]registryNamespace{}} + dockerDefaultMergedFrom := "" + nsMergedFrom := map[string]string{} + + dir, err := os.Open(dirPath) + if err != nil { + if os.IsNotExist(err) { + return &mergedConfig, nil + } + return nil, err + } + configNames, err := dir.Readdirnames(0) + if err != nil { + return nil, err + } + for _, configName := range configNames { + if !strings.HasSuffix(configName, ".yaml") { + continue + } + configPath := filepath.Join(dirPath, configName) + configBytes, err := ioutil.ReadFile(configPath) + if err != nil { + return nil, err + } + + var config registryConfiguration + err = yaml.Unmarshal(configBytes, &config) + if err != nil { + return nil, errors.Wrapf(err, "parsing %s", configPath) + } + + if config.DefaultDocker != nil { + if mergedConfig.DefaultDocker != nil { + return nil, errors.Errorf(`Error parsing signature storage configuration: "default-docker" defined both in "%s" and "%s"`, + dockerDefaultMergedFrom, configPath) + } + mergedConfig.DefaultDocker = config.DefaultDocker + dockerDefaultMergedFrom = configPath + } + + for nsName, nsConfig := range config.Docker { // includes config.Docker == nil + if _, ok := mergedConfig.Docker[nsName]; ok { + return nil, errors.Errorf(`Error parsing signature storage configuration: "docker" namespace "%s" defined both in "%s" and "%s"`, + nsName, nsMergedFrom[nsName], configPath) + } + mergedConfig.Docker[nsName] = nsConfig + nsMergedFrom[nsName] = configPath + } + } + + return &mergedConfig, nil +} + +// config.signatureTopLevel returns an URL string configured in config for ref, for write access if “write”. +// (the top level of the storage, namespaced by repo.FullName etc.), or "" if nothing has been configured. +func (config *registryConfiguration) signatureTopLevel(ref dockerReference, write bool) string { + if config.Docker != nil { + // Look for a full match. + identity := ref.PolicyConfigurationIdentity() + if ns, ok := config.Docker[identity]; ok { + logrus.Debugf(` Using "docker" namespace %s`, identity) + if url := ns.signatureTopLevel(write); url != "" { + return url + } + } + + // Look for a match of the possible parent namespaces. + for _, name := range ref.PolicyConfigurationNamespaces() { + if ns, ok := config.Docker[name]; ok { + logrus.Debugf(` Using "docker" namespace %s`, name) + if url := ns.signatureTopLevel(write); url != "" { + return url + } + } + } + } + // Look for a default location + if config.DefaultDocker != nil { + logrus.Debugf(` Using "default-docker" configuration`) + if url := config.DefaultDocker.signatureTopLevel(write); url != "" { + return url + } + } + return "" +} + +// ns.signatureTopLevel returns an URL string configured in ns for ref, for write access if “write”. +// or "" if nothing has been configured. +func (ns registryNamespace) signatureTopLevel(write bool) string { + if write && ns.SigStoreStaging != "" { + logrus.Debugf(` Using %s`, ns.SigStoreStaging) + return ns.SigStoreStaging + } + if ns.SigStore != "" { + logrus.Debugf(` Using %s`, ns.SigStore) + return ns.SigStore + } + return "" +} + +// signatureStorageURL returns an URL usable for accessing signature index in base with known manifestDigest. +// base is not nil from the caller +// NOTE: Keep this in sync with docs/signature-protocols.md! +func signatureStorageURL(base signatureStorageBase, manifestDigest digest.Digest, index int) *url.URL { + url := *base + url.Path = fmt.Sprintf("%s@%s=%s/signature-%d", url.Path, manifestDigest.Algorithm(), manifestDigest.Hex(), index+1) + return &url +} diff --git a/vendor/github.com/containers/image/v5/docker/policyconfiguration/naming.go b/vendor/github.com/containers/image/v5/docker/policyconfiguration/naming.go new file mode 100644 index 00000000000..94e9e5f234a --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/policyconfiguration/naming.go @@ -0,0 +1,77 @@ +package policyconfiguration + +import ( + "strings" + + "github.com/containers/image/v5/docker/reference" + "github.com/pkg/errors" +) + +// DockerReferenceIdentity returns a string representation of the reference, suitable for policy lookup, +// as a backend for ImageReference.PolicyConfigurationIdentity. +// The reference must satisfy !reference.IsNameOnly(). +func DockerReferenceIdentity(ref reference.Named) (string, error) { + res := ref.Name() + tagged, isTagged := ref.(reference.NamedTagged) + digested, isDigested := ref.(reference.Canonical) + switch { + case isTagged && isDigested: // Note that this CAN actually happen. + return "", errors.Errorf("Unexpected Docker reference %s with both a name and a digest", reference.FamiliarString(ref)) + case !isTagged && !isDigested: // This should not happen, the caller is expected to ensure !reference.IsNameOnly() + return "", errors.Errorf("Internal inconsistency: Docker reference %s with neither a tag nor a digest", reference.FamiliarString(ref)) + case isTagged: + res = res + ":" + tagged.Tag() + case isDigested: + res = res + "@" + digested.Digest().String() + default: // Coverage: The above was supposed to be exhaustive. + return "", errors.New("Internal inconsistency, unexpected default branch") + } + return res, nil +} + +// DockerReferenceNamespaces returns a list of other policy configuration namespaces to search, +// as a backend for ImageReference.PolicyConfigurationIdentity. +// The reference must satisfy !reference.IsNameOnly(). +func DockerReferenceNamespaces(ref reference.Named) []string { + // Look for a match of the repository, and then of the possible parent + // namespaces. Note that this only happens on the expanded host names + // and repository names, i.e. "busybox" is looked up as "docker.io/library/busybox", + // then in its parent "docker.io/library"; in none of "busybox", + // un-namespaced "library" nor in "" supposedly implicitly representing "library/". + // + // ref.FullName() == ref.Hostname() + "/" + ref.RemoteName(), so the last + // iteration matches the host name (for any namespace). + res := []string{} + name := ref.Name() + for { + res = append(res, name) + + lastSlash := strings.LastIndex(name, "/") + if lastSlash == -1 { + break + } + name = name[:lastSlash] + } + + // Strip port number if any, before appending to res slice. + // Currently, the most compatible behavior is to return + // example.com:8443/ns, example.com:8443, *.com. + // If a port number is not specified, the expected behavior would be + // example.com/ns, example.com, *.com + portNumColon := strings.Index(name, ":") + if portNumColon != -1 { + name = name[:portNumColon] + } + + // Append wildcarded domains to res slice + for { + firstDot := strings.Index(name, ".") + if firstDot == -1 { + break + } + name = name[firstDot+1:] + + res = append(res, "*."+name) + } + return res +} diff --git a/vendor/github.com/containers/image/v5/docker/reference/README.md b/vendor/github.com/containers/image/v5/docker/reference/README.md new file mode 100644 index 00000000000..3c4d74eb4d3 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/reference/README.md @@ -0,0 +1,2 @@ +This is a copy of github.com/docker/distribution/reference as of commit 3226863cbcba6dbc2f6c83a37b28126c934af3f8, +except that ParseAnyReferenceWithSet has been removed to drop the dependency on github.com/docker/distribution/digestset. \ No newline at end of file diff --git a/vendor/github.com/containers/image/v5/docker/reference/helpers.go b/vendor/github.com/containers/image/v5/docker/reference/helpers.go new file mode 100644 index 00000000000..978df7eabbf --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/reference/helpers.go @@ -0,0 +1,42 @@ +package reference + +import "path" + +// IsNameOnly returns true if reference only contains a repo name. +func IsNameOnly(ref Named) bool { + if _, ok := ref.(NamedTagged); ok { + return false + } + if _, ok := ref.(Canonical); ok { + return false + } + return true +} + +// FamiliarName returns the familiar name string +// for the given named, familiarizing if needed. +func FamiliarName(ref Named) string { + if nn, ok := ref.(normalizedNamed); ok { + return nn.Familiar().Name() + } + return ref.Name() +} + +// FamiliarString returns the familiar string representation +// for the given reference, familiarizing if needed. +func FamiliarString(ref Reference) string { + if nn, ok := ref.(normalizedNamed); ok { + return nn.Familiar().String() + } + return ref.String() +} + +// FamiliarMatch reports whether ref matches the specified pattern. +// See https://godoc.org/path#Match for supported patterns. +func FamiliarMatch(pattern string, ref Reference) (bool, error) { + matched, err := path.Match(pattern, FamiliarString(ref)) + if namedRef, isNamed := ref.(Named); isNamed && !matched { + matched, _ = path.Match(pattern, FamiliarName(namedRef)) + } + return matched, err +} diff --git a/vendor/github.com/containers/image/v5/docker/reference/normalize.go b/vendor/github.com/containers/image/v5/docker/reference/normalize.go new file mode 100644 index 00000000000..6a86ec64fdc --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/reference/normalize.go @@ -0,0 +1,181 @@ +package reference + +import ( + "errors" + "fmt" + "strings" + + "github.com/opencontainers/go-digest" +) + +var ( + legacyDefaultDomain = "index.docker.io" + defaultDomain = "docker.io" + officialRepoName = "library" + defaultTag = "latest" +) + +// normalizedNamed represents a name which has been +// normalized and has a familiar form. A familiar name +// is what is used in Docker UI. An example normalized +// name is "docker.io/library/ubuntu" and corresponding +// familiar name of "ubuntu". +type normalizedNamed interface { + Named + Familiar() Named +} + +// ParseNormalizedNamed parses a string into a named reference +// transforming a familiar name from Docker UI to a fully +// qualified reference. If the value may be an identifier +// use ParseAnyReference. +func ParseNormalizedNamed(s string) (Named, error) { + if ok := anchoredIdentifierRegexp.MatchString(s); ok { + return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s) + } + domain, remainder := splitDockerDomain(s) + var remoteName string + if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 { + remoteName = remainder[:tagSep] + } else { + remoteName = remainder + } + if strings.ToLower(remoteName) != remoteName { + return nil, errors.New("invalid reference format: repository name must be lowercase") + } + + ref, err := Parse(domain + "/" + remainder) + if err != nil { + return nil, err + } + named, isNamed := ref.(Named) + if !isNamed { + return nil, fmt.Errorf("reference %s has no name", ref.String()) + } + return named, nil +} + +// ParseDockerRef normalizes the image reference following the docker convention. This is added +// mainly for backward compatibility. +// The reference returned can only be either tagged or digested. For reference contains both tag +// and digest, the function returns digested reference, e.g. docker.io/library/busybox:latest@ +// sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa will be returned as +// docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa. +func ParseDockerRef(ref string) (Named, error) { + named, err := ParseNormalizedNamed(ref) + if err != nil { + return nil, err + } + if _, ok := named.(NamedTagged); ok { + if canonical, ok := named.(Canonical); ok { + // The reference is both tagged and digested, only + // return digested. + newNamed, err := WithName(canonical.Name()) + if err != nil { + return nil, err + } + newCanonical, err := WithDigest(newNamed, canonical.Digest()) + if err != nil { + return nil, err + } + return newCanonical, nil + } + } + return TagNameOnly(named), nil +} + +// splitDockerDomain splits a repository name to domain and remotename string. +// If no valid domain is found, the default domain is used. Repository name +// needs to be already validated before. +func splitDockerDomain(name string) (domain, remainder string) { + i := strings.IndexRune(name, '/') + if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") { + domain, remainder = defaultDomain, name + } else { + domain, remainder = name[:i], name[i+1:] + } + if domain == legacyDefaultDomain { + domain = defaultDomain + } + if domain == defaultDomain && !strings.ContainsRune(remainder, '/') { + remainder = officialRepoName + "/" + remainder + } + return +} + +// familiarizeName returns a shortened version of the name familiar +// to to the Docker UI. Familiar names have the default domain +// "docker.io" and "library/" repository prefix removed. +// For example, "docker.io/library/redis" will have the familiar +// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp". +// Returns a familiarized named only reference. +func familiarizeName(named namedRepository) repository { + repo := repository{ + domain: named.Domain(), + path: named.Path(), + } + + if repo.domain == defaultDomain { + repo.domain = "" + // Handle official repositories which have the pattern "library/" + if split := strings.Split(repo.path, "/"); len(split) == 2 && split[0] == officialRepoName { + repo.path = split[1] + } + } + return repo +} + +func (r reference) Familiar() Named { + return reference{ + namedRepository: familiarizeName(r.namedRepository), + tag: r.tag, + digest: r.digest, + } +} + +func (r repository) Familiar() Named { + return familiarizeName(r) +} + +func (t taggedReference) Familiar() Named { + return taggedReference{ + namedRepository: familiarizeName(t.namedRepository), + tag: t.tag, + } +} + +func (c canonicalReference) Familiar() Named { + return canonicalReference{ + namedRepository: familiarizeName(c.namedRepository), + digest: c.digest, + } +} + +// TagNameOnly adds the default tag "latest" to a reference if it only has +// a repo name. +func TagNameOnly(ref Named) Named { + if IsNameOnly(ref) { + namedTagged, err := WithTag(ref, defaultTag) + if err != nil { + // Default tag must be valid, to create a NamedTagged + // type with non-validated input the WithTag function + // should be used instead + panic(err) + } + return namedTagged + } + return ref +} + +// ParseAnyReference parses a reference string as a possible identifier, +// full digest, or familiar name. +func ParseAnyReference(ref string) (Reference, error) { + if ok := anchoredIdentifierRegexp.MatchString(ref); ok { + return digestReference("sha256:" + ref), nil + } + if dgst, err := digest.Parse(ref); err == nil { + return digestReference(dgst), nil + } + + return ParseNormalizedNamed(ref) +} diff --git a/vendor/github.com/containers/image/v5/docker/reference/reference.go b/vendor/github.com/containers/image/v5/docker/reference/reference.go new file mode 100644 index 00000000000..8c0c23b2fe1 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/reference/reference.go @@ -0,0 +1,433 @@ +// Package reference provides a general type to represent any way of referencing images within the registry. +// Its main purpose is to abstract tags and digests (content-addressable hash). +// +// Grammar +// +// reference := name [ ":" tag ] [ "@" digest ] +// name := [domain '/'] path-component ['/' path-component]* +// domain := domain-component ['.' domain-component]* [':' port-number] +// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ +// port-number := /[0-9]+/ +// path-component := alpha-numeric [separator alpha-numeric]* +// alpha-numeric := /[a-z0-9]+/ +// separator := /[_.]|__|[-]*/ +// +// tag := /[\w][\w.-]{0,127}/ +// +// digest := digest-algorithm ":" digest-hex +// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]* +// digest-algorithm-separator := /[+.-_]/ +// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/ +// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value +// +// identifier := /[a-f0-9]{64}/ +// short-identifier := /[a-f0-9]{6,64}/ +package reference + +import ( + "errors" + "fmt" + "strings" + + "github.com/opencontainers/go-digest" +) + +const ( + // NameTotalLengthMax is the maximum total number of characters in a repository name. + NameTotalLengthMax = 255 +) + +var ( + // ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference. + ErrReferenceInvalidFormat = errors.New("invalid reference format") + + // ErrTagInvalidFormat represents an error while trying to parse a string as a tag. + ErrTagInvalidFormat = errors.New("invalid tag format") + + // ErrDigestInvalidFormat represents an error while trying to parse a string as a tag. + ErrDigestInvalidFormat = errors.New("invalid digest format") + + // ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters. + ErrNameContainsUppercase = errors.New("repository name must be lowercase") + + // ErrNameEmpty is returned for empty, invalid repository names. + ErrNameEmpty = errors.New("repository name must have at least one component") + + // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax. + ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax) + + // ErrNameNotCanonical is returned when a name is not canonical. + ErrNameNotCanonical = errors.New("repository name must be canonical") +) + +// Reference is an opaque object reference identifier that may include +// modifiers such as a hostname, name, tag, and digest. +type Reference interface { + // String returns the full reference + String() string +} + +// Field provides a wrapper type for resolving correct reference types when +// working with encoding. +type Field struct { + reference Reference +} + +// AsField wraps a reference in a Field for encoding. +func AsField(reference Reference) Field { + return Field{reference} +} + +// Reference unwraps the reference type from the field to +// return the Reference object. This object should be +// of the appropriate type to further check for different +// reference types. +func (f Field) Reference() Reference { + return f.reference +} + +// MarshalText serializes the field to byte text which +// is the string of the reference. +func (f Field) MarshalText() (p []byte, err error) { + return []byte(f.reference.String()), nil +} + +// UnmarshalText parses text bytes by invoking the +// reference parser to ensure the appropriately +// typed reference object is wrapped by field. +func (f *Field) UnmarshalText(p []byte) error { + r, err := Parse(string(p)) + if err != nil { + return err + } + + f.reference = r + return nil +} + +// Named is an object with a full name +type Named interface { + Reference + Name() string +} + +// Tagged is an object which has a tag +type Tagged interface { + Reference + Tag() string +} + +// NamedTagged is an object including a name and tag. +type NamedTagged interface { + Named + Tag() string +} + +// Digested is an object which has a digest +// in which it can be referenced by +type Digested interface { + Reference + Digest() digest.Digest +} + +// Canonical reference is an object with a fully unique +// name including a name with domain and digest +type Canonical interface { + Named + Digest() digest.Digest +} + +// namedRepository is a reference to a repository with a name. +// A namedRepository has both domain and path components. +type namedRepository interface { + Named + Domain() string + Path() string +} + +// Domain returns the domain part of the Named reference +func Domain(named Named) string { + if r, ok := named.(namedRepository); ok { + return r.Domain() + } + domain, _ := splitDomain(named.Name()) + return domain +} + +// Path returns the name without the domain part of the Named reference +func Path(named Named) (name string) { + if r, ok := named.(namedRepository); ok { + return r.Path() + } + _, path := splitDomain(named.Name()) + return path +} + +func splitDomain(name string) (string, string) { + match := anchoredNameRegexp.FindStringSubmatch(name) + if len(match) != 3 { + return "", name + } + return match[1], match[2] +} + +// SplitHostname splits a named reference into a +// hostname and name string. If no valid hostname is +// found, the hostname is empty and the full value +// is returned as name +// DEPRECATED: Use Domain or Path +func SplitHostname(named Named) (string, string) { + if r, ok := named.(namedRepository); ok { + return r.Domain(), r.Path() + } + return splitDomain(named.Name()) +} + +// Parse parses s and returns a syntactically valid Reference. +// If an error was encountered it is returned, along with a nil Reference. +// NOTE: Parse will not handle short digests. +func Parse(s string) (Reference, error) { + matches := ReferenceRegexp.FindStringSubmatch(s) + if matches == nil { + if s == "" { + return nil, ErrNameEmpty + } + if ReferenceRegexp.FindStringSubmatch(strings.ToLower(s)) != nil { + return nil, ErrNameContainsUppercase + } + return nil, ErrReferenceInvalidFormat + } + + if len(matches[1]) > NameTotalLengthMax { + return nil, ErrNameTooLong + } + + var repo repository + + nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1]) + if len(nameMatch) == 3 { + repo.domain = nameMatch[1] + repo.path = nameMatch[2] + } else { + repo.domain = "" + repo.path = matches[1] + } + + ref := reference{ + namedRepository: repo, + tag: matches[2], + } + if matches[3] != "" { + var err error + ref.digest, err = digest.Parse(matches[3]) + if err != nil { + return nil, err + } + } + + r := getBestReferenceType(ref) + if r == nil { + return nil, ErrNameEmpty + } + + return r, nil +} + +// ParseNamed parses s and returns a syntactically valid reference implementing +// the Named interface. The reference must have a name and be in the canonical +// form, otherwise an error is returned. +// If an error was encountered it is returned, along with a nil Reference. +// NOTE: ParseNamed will not handle short digests. +func ParseNamed(s string) (Named, error) { + named, err := ParseNormalizedNamed(s) + if err != nil { + return nil, err + } + if named.String() != s { + return nil, ErrNameNotCanonical + } + return named, nil +} + +// WithName returns a named object representing the given string. If the input +// is invalid ErrReferenceInvalidFormat will be returned. +func WithName(name string) (Named, error) { + if len(name) > NameTotalLengthMax { + return nil, ErrNameTooLong + } + + match := anchoredNameRegexp.FindStringSubmatch(name) + if match == nil || len(match) != 3 { + return nil, ErrReferenceInvalidFormat + } + return repository{ + domain: match[1], + path: match[2], + }, nil +} + +// WithTag combines the name from "name" and the tag from "tag" to form a +// reference incorporating both the name and the tag. +func WithTag(name Named, tag string) (NamedTagged, error) { + if !anchoredTagRegexp.MatchString(tag) { + return nil, ErrTagInvalidFormat + } + var repo repository + if r, ok := name.(namedRepository); ok { + repo.domain = r.Domain() + repo.path = r.Path() + } else { + repo.path = name.Name() + } + if canonical, ok := name.(Canonical); ok { + return reference{ + namedRepository: repo, + tag: tag, + digest: canonical.Digest(), + }, nil + } + return taggedReference{ + namedRepository: repo, + tag: tag, + }, nil +} + +// WithDigest combines the name from "name" and the digest from "digest" to form +// a reference incorporating both the name and the digest. +func WithDigest(name Named, digest digest.Digest) (Canonical, error) { + if !anchoredDigestRegexp.MatchString(digest.String()) { + return nil, ErrDigestInvalidFormat + } + var repo repository + if r, ok := name.(namedRepository); ok { + repo.domain = r.Domain() + repo.path = r.Path() + } else { + repo.path = name.Name() + } + if tagged, ok := name.(Tagged); ok { + return reference{ + namedRepository: repo, + tag: tagged.Tag(), + digest: digest, + }, nil + } + return canonicalReference{ + namedRepository: repo, + digest: digest, + }, nil +} + +// TrimNamed removes any tag or digest from the named reference. +func TrimNamed(ref Named) Named { + domain, path := SplitHostname(ref) + return repository{ + domain: domain, + path: path, + } +} + +func getBestReferenceType(ref reference) Reference { + if ref.Name() == "" { + // Allow digest only references + if ref.digest != "" { + return digestReference(ref.digest) + } + return nil + } + if ref.tag == "" { + if ref.digest != "" { + return canonicalReference{ + namedRepository: ref.namedRepository, + digest: ref.digest, + } + } + return ref.namedRepository + } + if ref.digest == "" { + return taggedReference{ + namedRepository: ref.namedRepository, + tag: ref.tag, + } + } + + return ref +} + +type reference struct { + namedRepository + tag string + digest digest.Digest +} + +func (r reference) String() string { + return r.Name() + ":" + r.tag + "@" + r.digest.String() +} + +func (r reference) Tag() string { + return r.tag +} + +func (r reference) Digest() digest.Digest { + return r.digest +} + +type repository struct { + domain string + path string +} + +func (r repository) String() string { + return r.Name() +} + +func (r repository) Name() string { + if r.domain == "" { + return r.path + } + return r.domain + "/" + r.path +} + +func (r repository) Domain() string { + return r.domain +} + +func (r repository) Path() string { + return r.path +} + +type digestReference digest.Digest + +func (d digestReference) String() string { + return digest.Digest(d).String() +} + +func (d digestReference) Digest() digest.Digest { + return digest.Digest(d) +} + +type taggedReference struct { + namedRepository + tag string +} + +func (t taggedReference) String() string { + return t.Name() + ":" + t.tag +} + +func (t taggedReference) Tag() string { + return t.tag +} + +type canonicalReference struct { + namedRepository + digest digest.Digest +} + +func (c canonicalReference) String() string { + return c.Name() + "@" + c.digest.String() +} + +func (c canonicalReference) Digest() digest.Digest { + return c.digest +} diff --git a/vendor/github.com/containers/image/v5/docker/reference/regexp.go b/vendor/github.com/containers/image/v5/docker/reference/regexp.go new file mode 100644 index 00000000000..78603493203 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/reference/regexp.go @@ -0,0 +1,143 @@ +package reference + +import "regexp" + +var ( + // alphaNumericRegexp defines the alpha numeric atom, typically a + // component of names. This only allows lower case characters and digits. + alphaNumericRegexp = match(`[a-z0-9]+`) + + // separatorRegexp defines the separators allowed to be embedded in name + // components. This allow one period, one or two underscore and multiple + // dashes. + separatorRegexp = match(`(?:[._]|__|[-]*)`) + + // nameComponentRegexp restricts registry path component names to start + // with at least one letter or number, with following parts able to be + // separated by one period, one or two underscore and multiple dashes. + nameComponentRegexp = expression( + alphaNumericRegexp, + optional(repeated(separatorRegexp, alphaNumericRegexp))) + + // domainComponentRegexp restricts the registry domain component of a + // repository name to start with a component as defined by DomainRegexp + // and followed by an optional port. + domainComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`) + + // DomainRegexp defines the structure of potential domain components + // that may be part of image names. This is purposely a subset of what is + // allowed by DNS to ensure backwards compatibility with Docker image + // names. + DomainRegexp = expression( + domainComponentRegexp, + optional(repeated(literal(`.`), domainComponentRegexp)), + optional(literal(`:`), match(`[0-9]+`))) + + // TagRegexp matches valid tag names. From docker/docker:graph/tags.go. + TagRegexp = match(`[\w][\w.-]{0,127}`) + + // anchoredTagRegexp matches valid tag names, anchored at the start and + // end of the matched string. + anchoredTagRegexp = anchored(TagRegexp) + + // DigestRegexp matches valid digests. + DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`) + + // anchoredDigestRegexp matches valid digests, anchored at the start and + // end of the matched string. + anchoredDigestRegexp = anchored(DigestRegexp) + + // NameRegexp is the format for the name component of references. The + // regexp has capturing groups for the domain and name part omitting + // the separating forward slash from either. + NameRegexp = expression( + optional(DomainRegexp, literal(`/`)), + nameComponentRegexp, + optional(repeated(literal(`/`), nameComponentRegexp))) + + // anchoredNameRegexp is used to parse a name value, capturing the + // domain and trailing components. + anchoredNameRegexp = anchored( + optional(capture(DomainRegexp), literal(`/`)), + capture(nameComponentRegexp, + optional(repeated(literal(`/`), nameComponentRegexp)))) + + // ReferenceRegexp is the full supported format of a reference. The regexp + // is anchored and has capturing groups for name, tag, and digest + // components. + ReferenceRegexp = anchored(capture(NameRegexp), + optional(literal(":"), capture(TagRegexp)), + optional(literal("@"), capture(DigestRegexp))) + + // IdentifierRegexp is the format for string identifier used as a + // content addressable identifier using sha256. These identifiers + // are like digests without the algorithm, since sha256 is used. + IdentifierRegexp = match(`([a-f0-9]{64})`) + + // ShortIdentifierRegexp is the format used to represent a prefix + // of an identifier. A prefix may be used to match a sha256 identifier + // within a list of trusted identifiers. + ShortIdentifierRegexp = match(`([a-f0-9]{6,64})`) + + // anchoredIdentifierRegexp is used to check or match an + // identifier value, anchored at start and end of string. + anchoredIdentifierRegexp = anchored(IdentifierRegexp) + + // anchoredShortIdentifierRegexp is used to check if a value + // is a possible identifier prefix, anchored at start and end + // of string. + anchoredShortIdentifierRegexp = anchored(ShortIdentifierRegexp) +) + +// match compiles the string to a regular expression. +var match = regexp.MustCompile + +// literal compiles s into a literal regular expression, escaping any regexp +// reserved characters. +func literal(s string) *regexp.Regexp { + re := match(regexp.QuoteMeta(s)) + + if _, complete := re.LiteralPrefix(); !complete { + panic("must be a literal") + } + + return re +} + +// expression defines a full expression, where each regular expression must +// follow the previous. +func expression(res ...*regexp.Regexp) *regexp.Regexp { + var s string + for _, re := range res { + s += re.String() + } + + return match(s) +} + +// optional wraps the expression in a non-capturing group and makes the +// production optional. +func optional(res ...*regexp.Regexp) *regexp.Regexp { + return match(group(expression(res...)).String() + `?`) +} + +// repeated wraps the regexp in a non-capturing group to get one or more +// matches. +func repeated(res ...*regexp.Regexp) *regexp.Regexp { + return match(group(expression(res...)).String() + `+`) +} + +// group wraps the regexp in a non-capturing group. +func group(res ...*regexp.Regexp) *regexp.Regexp { + return match(`(?:` + expression(res...).String() + `)`) +} + +// capture wraps the expression in a capturing group. +func capture(res ...*regexp.Regexp) *regexp.Regexp { + return match(`(` + expression(res...).String() + `)`) +} + +// anchored anchors the regular expression by adding start and end delimiters. +func anchored(res ...*regexp.Regexp) *regexp.Regexp { + return match(`^` + expression(res...).String() + `$`) +} diff --git a/vendor/github.com/containers/image/v5/docker/wwwauthenticate.go b/vendor/github.com/containers/image/v5/docker/wwwauthenticate.go new file mode 100644 index 00000000000..d0bbbba8a54 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/wwwauthenticate.go @@ -0,0 +1,159 @@ +package docker + +// Based on github.com/docker/distribution/registry/client/auth/authchallenge.go, primarily stripping unnecessary dependencies. + +import ( + "net/http" + "strings" +) + +// challenge carries information from a WWW-Authenticate response header. +// See RFC 7235. +type challenge struct { + // Scheme is the auth-scheme according to RFC 7235 + Scheme string + + // Parameters are the auth-params according to RFC 7235 + Parameters map[string]string +} + +// Octet types from RFC 7230. +type octetType byte + +var octetTypes [256]octetType + +const ( + isToken octetType = 1 << iota + isSpace +) + +func init() { + // OCTET = + // CHAR = + // CTL = + // CR = + // LF = + // SP = + // HT = + // <"> = + // CRLF = CR LF + // LWS = [CRLF] 1*( SP | HT ) + // TEXT = + // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> + // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT + // token = 1* + // qdtext = > + + for c := 0; c < 256; c++ { + var t octetType + isCtl := c <= 31 || c == 127 + isChar := 0 <= c && c <= 127 + isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) + if strings.ContainsRune(" \t\r\n", rune(c)) { + t |= isSpace + } + if isChar && !isCtl && !isSeparator { + t |= isToken + } + octetTypes[c] = t + } +} + +func parseAuthHeader(header http.Header) []challenge { + challenges := []challenge{} + for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] { + v, p := parseValueAndParams(h) + if v != "" { + challenges = append(challenges, challenge{Scheme: v, Parameters: p}) + } + } + return challenges +} + +// NOTE: This is not a fully compliant parser per RFC 7235: +// Most notably it does not support more than one challenge within a single header +// Some of the whitespace parsing also seems noncompliant. +// But it is clearly better than what we used to have… +func parseValueAndParams(header string) (value string, params map[string]string) { + params = make(map[string]string) + value, s := expectToken(header) + if value == "" { + return + } + value = strings.ToLower(value) + s = "," + skipSpace(s) + for strings.HasPrefix(s, ",") { + var pkey string + pkey, s = expectToken(skipSpace(s[1:])) + if pkey == "" { + return + } + if !strings.HasPrefix(s, "=") { + return + } + var pvalue string + pvalue, s = expectTokenOrQuoted(s[1:]) + if pvalue == "" { + return + } + pkey = strings.ToLower(pkey) + params[pkey] = pvalue + s = skipSpace(s) + } + return +} + +func skipSpace(s string) (rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isSpace == 0 { + break + } + } + return s[i:] +} + +func expectToken(s string) (token, rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isToken == 0 { + break + } + } + return s[:i], s[i:] +} + +func expectTokenOrQuoted(s string) (value string, rest string) { + if !strings.HasPrefix(s, "\"") { + return expectToken(s) + } + s = s[1:] + for i := 0; i < len(s); i++ { + switch s[i] { + case '"': + return s[:i], s[i+1:] + case '\\': + p := make([]byte, len(s)-1) + j := copy(p, s[:i]) + escape := true + for i = i + 1; i < len(s); i++ { + b := s[i] + switch { + case escape: + escape = false + p[j] = b + j++ + case b == '\\': + escape = true + case b == '"': + return string(p[:j]), s[i+1:] + default: + p[j] = b + j++ + } + } + return "", "" + } + } + return "", "" +} diff --git a/vendor/github.com/containers/image/v5/image/docker_list.go b/vendor/github.com/containers/image/v5/image/docker_list.go new file mode 100644 index 00000000000..4fe84413c12 --- /dev/null +++ b/vendor/github.com/containers/image/v5/image/docker_list.go @@ -0,0 +1,34 @@ +package image + +import ( + "context" + + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" + "github.com/pkg/errors" +) + +func manifestSchema2FromManifestList(ctx context.Context, sys *types.SystemContext, src types.ImageSource, manblob []byte) (genericManifest, error) { + list, err := manifest.Schema2ListFromManifest(manblob) + if err != nil { + return nil, errors.Wrapf(err, "parsing schema2 manifest list") + } + targetManifestDigest, err := list.ChooseInstance(sys) + if err != nil { + return nil, errors.Wrapf(err, "choosing image instance") + } + manblob, mt, err := src.GetManifest(ctx, &targetManifestDigest) + if err != nil { + return nil, errors.Wrapf(err, "loading manifest for target platform") + } + + matches, err := manifest.MatchesDigest(manblob, targetManifestDigest) + if err != nil { + return nil, errors.Wrap(err, "computing manifest digest") + } + if !matches { + return nil, errors.Errorf("Image manifest does not match selected manifest digest %s", targetManifestDigest) + } + + return manifestInstanceFromBlob(ctx, sys, src, manblob, mt) +} diff --git a/vendor/github.com/containers/image/v5/image/docker_schema1.go b/vendor/github.com/containers/image/v5/image/docker_schema1.go new file mode 100644 index 00000000000..5f24970c371 --- /dev/null +++ b/vendor/github.com/containers/image/v5/image/docker_schema1.go @@ -0,0 +1,248 @@ +package image + +import ( + "context" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +type manifestSchema1 struct { + m *manifest.Schema1 +} + +func manifestSchema1FromManifest(manifestBlob []byte) (genericManifest, error) { + m, err := manifest.Schema1FromManifest(manifestBlob) + if err != nil { + return nil, err + } + return &manifestSchema1{m: m}, nil +} + +// manifestSchema1FromComponents builds a new manifestSchema1 from the supplied data. +func manifestSchema1FromComponents(ref reference.Named, fsLayers []manifest.Schema1FSLayers, history []manifest.Schema1History, architecture string) (genericManifest, error) { + m, err := manifest.Schema1FromComponents(ref, fsLayers, history, architecture) + if err != nil { + return nil, err + } + return &manifestSchema1{m: m}, nil +} + +func (m *manifestSchema1) serialize() ([]byte, error) { + return m.m.Serialize() +} + +func (m *manifestSchema1) manifestMIMEType() string { + return manifest.DockerV2Schema1SignedMediaType +} + +// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. +// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. +func (m *manifestSchema1) ConfigInfo() types.BlobInfo { + return m.m.ConfigInfo() +} + +// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. +// The result is cached; it is OK to call this however often you need. +func (m *manifestSchema1) ConfigBlob(context.Context) ([]byte, error) { + return nil, nil +} + +// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about +// layers in the resulting configuration isn't guaranteed to be returned to due how +// old image manifests work (docker v2s1 especially). +func (m *manifestSchema1) OCIConfig(ctx context.Context) (*imgspecv1.Image, error) { + v2s2, err := m.convertToManifestSchema2(ctx, &types.ManifestUpdateOptions{}) + if err != nil { + return nil, err + } + return v2s2.OCIConfig(ctx) +} + +// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (m *manifestSchema1) LayerInfos() []types.BlobInfo { + return manifestLayerInfosToBlobInfos(m.m.LayerInfos()) +} + +// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. +// It returns false if the manifest does not embed a Docker reference. +// (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.) +func (m *manifestSchema1) EmbeddedDockerReferenceConflicts(ref reference.Named) bool { + // This is a bit convoluted: We can’t just have a "get embedded docker reference" method + // and have the “does it conflict” logic in the generic copy code, because the manifest does not actually + // embed a full docker/distribution reference, but only the repo name and tag (without the host name). + // So we would have to provide a “return repo without host name, and tag” getter for the generic code, + // which would be very awkward. Instead, we do the matching here in schema1-specific code, and all the + // generic copy code needs to know about is reference.Named and that a manifest may need updating + // for some destinations. + name := reference.Path(ref) + var tag string + if tagged, isTagged := ref.(reference.NamedTagged); isTagged { + tag = tagged.Tag() + } else { + tag = "" + } + return m.m.Name != name || m.m.Tag != tag +} + +// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. +func (m *manifestSchema1) Inspect(context.Context) (*types.ImageInspectInfo, error) { + return m.m.Inspect(nil) +} + +// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. +// This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute +// (most importantly it forces us to download the full layers even if they are already present at the destination). +func (m *manifestSchema1) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool { + return (options.ManifestMIMEType == manifest.DockerV2Schema2MediaType || options.ManifestMIMEType == imgspecv1.MediaTypeImageManifest) +} + +// UpdatedImage returns a types.Image modified according to options. +// This does not change the state of the original Image object. +func (m *manifestSchema1) UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) { + copy := manifestSchema1{m: manifest.Schema1Clone(m.m)} + + // We have 2 MIME types for schema 1, which are basically equivalent (even the un-"Signed" MIME type will be rejected if there isn’t a signature; so, + // handle conversions between them by doing nothing. + if options.ManifestMIMEType != manifest.DockerV2Schema1MediaType && options.ManifestMIMEType != manifest.DockerV2Schema1SignedMediaType { + converted, err := convertManifestIfRequiredWithUpdate(ctx, options, map[string]manifestConvertFn{ + imgspecv1.MediaTypeImageManifest: copy.convertToManifestOCI1, + manifest.DockerV2Schema2MediaType: copy.convertToManifestSchema2Generic, + }) + if err != nil { + return nil, err + } + + if converted != nil { + return converted, nil + } + } + + // No conversion required, update manifest + if options.LayerInfos != nil { + if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil { + return nil, err + } + } + if options.EmbeddedDockerReference != nil { + copy.m.Name = reference.Path(options.EmbeddedDockerReference) + if tagged, isTagged := options.EmbeddedDockerReference.(reference.NamedTagged); isTagged { + copy.m.Tag = tagged.Tag() + } else { + copy.m.Tag = "" + } + } + + return memoryImageFromManifest(©), nil +} + +// convertToManifestSchema2Generic returns a genericManifest implementation converted to manifest.DockerV2Schema2MediaType. +// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned +// value. +// This does not change the state of the original manifestSchema1 object. +// +// We need this function just because a function returning an implementation of the genericManifest +// interface is not automatically assignable to a function type returning the genericManifest interface +func (m *manifestSchema1) convertToManifestSchema2Generic(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) { + return m.convertToManifestSchema2(ctx, options) +} + +// convertToManifestSchema2 returns a genericManifest implementation converted to manifest.DockerV2Schema2MediaType. +// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned +// value. +// This does not change the state of the original manifestSchema1 object. +// +// Based on github.com/docker/docker/distribution/pull_v2.go +func (m *manifestSchema1) convertToManifestSchema2(_ context.Context, options *types.ManifestUpdateOptions) (*manifestSchema2, error) { + uploadedLayerInfos := options.InformationOnly.LayerInfos + layerDiffIDs := options.InformationOnly.LayerDiffIDs + + if len(m.m.ExtractedV1Compatibility) == 0 { + // What would this even mean?! Anyhow, the rest of the code depends on FSLayers[0] and ExtractedV1Compatibility[0] existing. + return nil, errors.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema2MediaType) + } + if len(m.m.ExtractedV1Compatibility) != len(m.m.FSLayers) { + return nil, errors.Errorf("Inconsistent schema 1 manifest: %d history entries, %d fsLayers entries", len(m.m.ExtractedV1Compatibility), len(m.m.FSLayers)) + } + if uploadedLayerInfos != nil && len(uploadedLayerInfos) != len(m.m.FSLayers) { + return nil, errors.Errorf("Internal error: uploaded %d blobs, but schema1 manifest has %d fsLayers", len(uploadedLayerInfos), len(m.m.FSLayers)) + } + if layerDiffIDs != nil && len(layerDiffIDs) != len(m.m.FSLayers) { + return nil, errors.Errorf("Internal error: collected %d DiffID values, but schema1 manifest has %d fsLayers", len(layerDiffIDs), len(m.m.FSLayers)) + } + + var convertedLayerUpdates []types.BlobInfo // Only used if options.LayerInfos != nil + if options.LayerInfos != nil { + if len(options.LayerInfos) != len(m.m.FSLayers) { + return nil, errors.Errorf("Error converting image: layer edits for %d layers vs %d existing layers", + len(options.LayerInfos), len(m.m.FSLayers)) + } + convertedLayerUpdates = []types.BlobInfo{} + } + + // Build a list of the diffIDs for the non-empty layers. + diffIDs := []digest.Digest{} + var layers []manifest.Schema2Descriptor + for v1Index := len(m.m.ExtractedV1Compatibility) - 1; v1Index >= 0; v1Index-- { + v2Index := (len(m.m.ExtractedV1Compatibility) - 1) - v1Index + + if !m.m.ExtractedV1Compatibility[v1Index].ThrowAway { + var size int64 + if uploadedLayerInfos != nil { + size = uploadedLayerInfos[v2Index].Size + } + var d digest.Digest + if layerDiffIDs != nil { + d = layerDiffIDs[v2Index] + } + layers = append(layers, manifest.Schema2Descriptor{ + MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", + Size: size, + Digest: m.m.FSLayers[v1Index].BlobSum, + }) + if options.LayerInfos != nil { + convertedLayerUpdates = append(convertedLayerUpdates, options.LayerInfos[v2Index]) + } + diffIDs = append(diffIDs, d) + } + } + configJSON, err := m.m.ToSchema2Config(diffIDs) + if err != nil { + return nil, err + } + configDescriptor := manifest.Schema2Descriptor{ + MediaType: "application/vnd.docker.container.image.v1+json", + Size: int64(len(configJSON)), + Digest: digest.FromBytes(configJSON), + } + + if options.LayerInfos != nil { + options.LayerInfos = convertedLayerUpdates + } + return manifestSchema2FromComponents(configDescriptor, nil, configJSON, layers), nil +} + +// convertToManifestOCI1 returns a genericManifest implementation converted to imgspecv1.MediaTypeImageManifest. +// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned +// value. +// This does not change the state of the original manifestSchema1 object. +func (m *manifestSchema1) convertToManifestOCI1(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) { + // We can't directly convert to OCI, but we can transitively convert via a Docker V2.2 Distribution manifest + m2, err := m.convertToManifestSchema2(ctx, options) + if err != nil { + return nil, err + } + + return m2.convertToManifestOCI1(ctx, options) +} + +// SupportsEncryption returns if encryption is supported for the manifest type +func (m *manifestSchema1) SupportsEncryption(context.Context) bool { + return false +} diff --git a/vendor/github.com/containers/image/v5/image/docker_schema2.go b/vendor/github.com/containers/image/v5/image/docker_schema2.go new file mode 100644 index 00000000000..b250a6b1d2e --- /dev/null +++ b/vendor/github.com/containers/image/v5/image/docker_schema2.go @@ -0,0 +1,400 @@ +package image + +import ( + "bytes" + "context" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "fmt" + "strings" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/iolimits" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/blobinfocache/none" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// GzippedEmptyLayer is a gzip-compressed version of an empty tar file (1024 NULL bytes) +// This comes from github.com/docker/distribution/manifest/schema1/config_builder.go; there is +// a non-zero embedded timestamp; we could zero that, but that would just waste storage space +// in registries, so let’s use the same values. +var GzippedEmptyLayer = []byte{ + 31, 139, 8, 0, 0, 9, 110, 136, 0, 255, 98, 24, 5, 163, 96, 20, 140, 88, + 0, 8, 0, 0, 255, 255, 46, 175, 181, 239, 0, 4, 0, 0, +} + +// GzippedEmptyLayerDigest is a digest of GzippedEmptyLayer +const GzippedEmptyLayerDigest = digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4") + +type manifestSchema2 struct { + src types.ImageSource // May be nil if configBlob is not nil + configBlob []byte // If set, corresponds to contents of ConfigDescriptor. + m *manifest.Schema2 +} + +func manifestSchema2FromManifest(src types.ImageSource, manifestBlob []byte) (genericManifest, error) { + m, err := manifest.Schema2FromManifest(manifestBlob) + if err != nil { + return nil, err + } + return &manifestSchema2{ + src: src, + m: m, + }, nil +} + +// manifestSchema2FromComponents builds a new manifestSchema2 from the supplied data: +func manifestSchema2FromComponents(config manifest.Schema2Descriptor, src types.ImageSource, configBlob []byte, layers []manifest.Schema2Descriptor) *manifestSchema2 { + return &manifestSchema2{ + src: src, + configBlob: configBlob, + m: manifest.Schema2FromComponents(config, layers), + } +} + +func (m *manifestSchema2) serialize() ([]byte, error) { + return m.m.Serialize() +} + +func (m *manifestSchema2) manifestMIMEType() string { + return m.m.MediaType +} + +// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. +// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. +func (m *manifestSchema2) ConfigInfo() types.BlobInfo { + return m.m.ConfigInfo() +} + +// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about +// layers in the resulting configuration isn't guaranteed to be returned to due how +// old image manifests work (docker v2s1 especially). +func (m *manifestSchema2) OCIConfig(ctx context.Context) (*imgspecv1.Image, error) { + configBlob, err := m.ConfigBlob(ctx) + if err != nil { + return nil, err + } + // docker v2s2 and OCI v1 are mostly compatible but v2s2 contains more fields + // than OCI v1. This unmarshal makes sure we drop docker v2s2 + // fields that aren't needed in OCI v1. + configOCI := &imgspecv1.Image{} + if err := json.Unmarshal(configBlob, configOCI); err != nil { + return nil, err + } + return configOCI, nil +} + +// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. +// The result is cached; it is OK to call this however often you need. +func (m *manifestSchema2) ConfigBlob(ctx context.Context) ([]byte, error) { + if m.configBlob == nil { + if m.src == nil { + return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestSchema2") + } + stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromSchema2Descriptor(m.m.ConfigDescriptor), none.NoCache) + if err != nil { + return nil, err + } + defer stream.Close() + blob, err := iolimits.ReadAtMost(stream, iolimits.MaxConfigBodySize) + if err != nil { + return nil, err + } + computedDigest := digest.FromBytes(blob) + if computedDigest != m.m.ConfigDescriptor.Digest { + return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.ConfigDescriptor.Digest) + } + m.configBlob = blob + } + return m.configBlob, nil +} + +// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (m *manifestSchema2) LayerInfos() []types.BlobInfo { + return manifestLayerInfosToBlobInfos(m.m.LayerInfos()) +} + +// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. +// It returns false if the manifest does not embed a Docker reference. +// (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.) +func (m *manifestSchema2) EmbeddedDockerReferenceConflicts(ref reference.Named) bool { + return false +} + +// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. +func (m *manifestSchema2) Inspect(ctx context.Context) (*types.ImageInspectInfo, error) { + getter := func(info types.BlobInfo) ([]byte, error) { + if info.Digest != m.ConfigInfo().Digest { + // Shouldn't ever happen + return nil, errors.New("asked for a different config blob") + } + config, err := m.ConfigBlob(ctx) + if err != nil { + return nil, err + } + return config, nil + } + return m.m.Inspect(getter) +} + +// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. +// This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute +// (most importantly it forces us to download the full layers even if they are already present at the destination). +func (m *manifestSchema2) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool { + return false +} + +// UpdatedImage returns a types.Image modified according to options. +// This does not change the state of the original Image object. +// The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError +// if the CompressionOperation and CompressionAlgorithm specified in one or more +// options.LayerInfos items is anything other than gzip. +func (m *manifestSchema2) UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) { + copy := manifestSchema2{ // NOTE: This is not a deep copy, it still shares slices etc. + src: m.src, + configBlob: m.configBlob, + m: manifest.Schema2Clone(m.m), + } + + converted, err := convertManifestIfRequiredWithUpdate(ctx, options, map[string]manifestConvertFn{ + manifest.DockerV2Schema1MediaType: copy.convertToManifestSchema1, + manifest.DockerV2Schema1SignedMediaType: copy.convertToManifestSchema1, + imgspecv1.MediaTypeImageManifest: copy.convertToManifestOCI1, + }) + if err != nil { + return nil, err + } + + if converted != nil { + return converted, nil + } + + // No conversion required, update manifest + if options.LayerInfos != nil { + if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil { + return nil, err + } + } + // Ignore options.EmbeddedDockerReference: it may be set when converting from schema1 to schema2, but we really don't care. + + return memoryImageFromManifest(©), nil +} + +func oci1DescriptorFromSchema2Descriptor(d manifest.Schema2Descriptor) imgspecv1.Descriptor { + return imgspecv1.Descriptor{ + MediaType: d.MediaType, + Size: d.Size, + Digest: d.Digest, + URLs: d.URLs, + } +} + +// convertToManifestOCI1 returns a genericManifest implementation converted to imgspecv1.MediaTypeImageManifest. +// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned +// value. +// This does not change the state of the original manifestSchema2 object. +func (m *manifestSchema2) convertToManifestOCI1(ctx context.Context, _ *types.ManifestUpdateOptions) (genericManifest, error) { + configOCI, err := m.OCIConfig(ctx) + if err != nil { + return nil, err + } + configOCIBytes, err := json.Marshal(configOCI) + if err != nil { + return nil, err + } + + config := imgspecv1.Descriptor{ + MediaType: imgspecv1.MediaTypeImageConfig, + Size: int64(len(configOCIBytes)), + Digest: digest.FromBytes(configOCIBytes), + } + + layers := make([]imgspecv1.Descriptor, len(m.m.LayersDescriptors)) + for idx := range layers { + layers[idx] = oci1DescriptorFromSchema2Descriptor(m.m.LayersDescriptors[idx]) + switch m.m.LayersDescriptors[idx].MediaType { + case manifest.DockerV2Schema2ForeignLayerMediaType: + layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributable + case manifest.DockerV2Schema2ForeignLayerMediaTypeGzip: + layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributableGzip + case manifest.DockerV2SchemaLayerMediaTypeUncompressed: + layers[idx].MediaType = imgspecv1.MediaTypeImageLayer + case manifest.DockerV2Schema2LayerMediaType: + layers[idx].MediaType = imgspecv1.MediaTypeImageLayerGzip + default: + return nil, fmt.Errorf("Unknown media type during manifest conversion: %q", m.m.LayersDescriptors[idx].MediaType) + } + } + + return manifestOCI1FromComponents(config, m.src, configOCIBytes, layers), nil +} + +// convertToManifestSchema1 returns a genericManifest implementation converted to manifest.DockerV2Schema1{Signed,}MediaType. +// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned +// value. +// This does not change the state of the original manifestSchema2 object. +// +// Based on docker/distribution/manifest/schema1/config_builder.go +func (m *manifestSchema2) convertToManifestSchema1(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) { + dest := options.InformationOnly.Destination + + var convertedLayerUpdates []types.BlobInfo // Only used if options.LayerInfos != nil + if options.LayerInfos != nil { + if len(options.LayerInfos) != len(m.m.LayersDescriptors) { + return nil, fmt.Errorf("Error converting image: layer edits for %d layers vs %d existing layers", + len(options.LayerInfos), len(m.m.LayersDescriptors)) + } + convertedLayerUpdates = []types.BlobInfo{} + } + + configBytes, err := m.ConfigBlob(ctx) + if err != nil { + return nil, err + } + imageConfig := &manifest.Schema2Image{} + if err := json.Unmarshal(configBytes, imageConfig); err != nil { + return nil, err + } + + // Build fsLayers and History, discarding all configs. We will patch the top-level config in later. + fsLayers := make([]manifest.Schema1FSLayers, len(imageConfig.History)) + history := make([]manifest.Schema1History, len(imageConfig.History)) + nonemptyLayerIndex := 0 + var parentV1ID string // Set in the loop + v1ID := "" + haveGzippedEmptyLayer := false + if len(imageConfig.History) == 0 { + // What would this even mean?! Anyhow, the rest of the code depends on fsLayers[0] and history[0] existing. + return nil, errors.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema1SignedMediaType) + } + for v2Index, historyEntry := range imageConfig.History { + parentV1ID = v1ID + v1Index := len(imageConfig.History) - 1 - v2Index + + var blobDigest digest.Digest + if historyEntry.EmptyLayer { + emptyLayerBlobInfo := types.BlobInfo{Digest: GzippedEmptyLayerDigest, Size: int64(len(GzippedEmptyLayer))} + + if !haveGzippedEmptyLayer { + logrus.Debugf("Uploading empty layer during conversion to schema 1") + // Ideally we should update the relevant BlobInfoCache about this layer, but that would require passing it down here, + // and anyway this blob is so small that it’s easier to just copy it than to worry about figuring out another location where to get it. + info, err := dest.PutBlob(ctx, bytes.NewReader(GzippedEmptyLayer), emptyLayerBlobInfo, none.NoCache, false) + if err != nil { + return nil, errors.Wrap(err, "uploading empty layer") + } + if info.Digest != emptyLayerBlobInfo.Digest { + return nil, errors.Errorf("Internal error: Uploaded empty layer has digest %#v instead of %s", info.Digest, emptyLayerBlobInfo.Digest) + } + haveGzippedEmptyLayer = true + } + if options.LayerInfos != nil { + convertedLayerUpdates = append(convertedLayerUpdates, emptyLayerBlobInfo) + } + blobDigest = emptyLayerBlobInfo.Digest + } else { + if nonemptyLayerIndex >= len(m.m.LayersDescriptors) { + return nil, errors.Errorf("Invalid image configuration, needs more than the %d distributed layers", len(m.m.LayersDescriptors)) + } + if options.LayerInfos != nil { + convertedLayerUpdates = append(convertedLayerUpdates, options.LayerInfos[nonemptyLayerIndex]) + } + blobDigest = m.m.LayersDescriptors[nonemptyLayerIndex].Digest + nonemptyLayerIndex++ + } + + // AFAICT pull ignores these ID values, at least nowadays, so we could use anything unique, including a simple counter. Use what Docker uses for cargo-cult consistency. + v, err := v1IDFromBlobDigestAndComponents(blobDigest, parentV1ID) + if err != nil { + return nil, err + } + v1ID = v + + fakeImage := manifest.Schema1V1Compatibility{ + ID: v1ID, + Parent: parentV1ID, + Comment: historyEntry.Comment, + Created: historyEntry.Created, + Author: historyEntry.Author, + ThrowAway: historyEntry.EmptyLayer, + } + fakeImage.ContainerConfig.Cmd = []string{historyEntry.CreatedBy} + v1CompatibilityBytes, err := json.Marshal(&fakeImage) + if err != nil { + return nil, errors.Errorf("Internal error: Error creating v1compatibility for %#v", fakeImage) + } + + fsLayers[v1Index] = manifest.Schema1FSLayers{BlobSum: blobDigest} + history[v1Index] = manifest.Schema1History{V1Compatibility: string(v1CompatibilityBytes)} + // Note that parentV1ID of the top layer is preserved when exiting this loop + } + + // Now patch in real configuration for the top layer (v1Index == 0) + v1ID, err = v1IDFromBlobDigestAndComponents(fsLayers[0].BlobSum, parentV1ID, string(configBytes)) // See above WRT v1ID value generation and cargo-cult consistency. + if err != nil { + return nil, err + } + v1Config, err := v1ConfigFromConfigJSON(configBytes, v1ID, parentV1ID, imageConfig.History[len(imageConfig.History)-1].EmptyLayer) + if err != nil { + return nil, err + } + history[0].V1Compatibility = string(v1Config) + + if options.LayerInfos != nil { + options.LayerInfos = convertedLayerUpdates + } + m1, err := manifestSchema1FromComponents(dest.Reference().DockerReference(), fsLayers, history, imageConfig.Architecture) + if err != nil { + return nil, err // This should never happen, we should have created all the components correctly. + } + return m1, nil +} + +func v1IDFromBlobDigestAndComponents(blobDigest digest.Digest, others ...string) (string, error) { + if err := blobDigest.Validate(); err != nil { + return "", err + } + parts := append([]string{blobDigest.Hex()}, others...) + v1IDHash := sha256.Sum256([]byte(strings.Join(parts, " "))) + return hex.EncodeToString(v1IDHash[:]), nil +} + +func v1ConfigFromConfigJSON(configJSON []byte, v1ID, parentV1ID string, throwaway bool) ([]byte, error) { + // Preserve everything we don't specifically know about. + // (This must be a *json.RawMessage, even though *[]byte is fairly redundant, because only *RawMessage implements json.Marshaler.) + rawContents := map[string]*json.RawMessage{} + if err := json.Unmarshal(configJSON, &rawContents); err != nil { // We have already unmarshaled it before, using a more detailed schema?! + return nil, err + } + delete(rawContents, "rootfs") + delete(rawContents, "history") + + updates := map[string]interface{}{"id": v1ID} + if parentV1ID != "" { + updates["parent"] = parentV1ID + } + if throwaway { + updates["throwaway"] = throwaway + } + for field, value := range updates { + encoded, err := json.Marshal(value) + if err != nil { + return nil, err + } + rawContents[field] = (*json.RawMessage)(&encoded) + } + return json.Marshal(rawContents) +} + +// SupportsEncryption returns if encryption is supported for the manifest type +func (m *manifestSchema2) SupportsEncryption(context.Context) bool { + return false +} diff --git a/vendor/github.com/containers/image/v5/image/manifest.go b/vendor/github.com/containers/image/v5/image/manifest.go new file mode 100644 index 00000000000..36d70b5c23d --- /dev/null +++ b/vendor/github.com/containers/image/v5/image/manifest.go @@ -0,0 +1,113 @@ +package image + +import ( + "context" + "fmt" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +// genericManifest is an interface for parsing, modifying image manifests and related data. +// Note that the public methods are intended to be a subset of types.Image +// so that embedding a genericManifest into structs works. +// will support v1 one day... +type genericManifest interface { + serialize() ([]byte, error) + manifestMIMEType() string + // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. + // Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. + ConfigInfo() types.BlobInfo + // ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. + // The result is cached; it is OK to call this however often you need. + ConfigBlob(context.Context) ([]byte, error) + // OCIConfig returns the image configuration as per OCI v1 image-spec. Information about + // layers in the resulting configuration isn't guaranteed to be returned to due how + // old image manifests work (docker v2s1 especially). + OCIConfig(context.Context) (*imgspecv1.Image, error) + // LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). + // The Digest field is guaranteed to be provided; Size may be -1. + // WARNING: The list may contain duplicates, and they are semantically relevant. + LayerInfos() []types.BlobInfo + // EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. + // It returns false if the manifest does not embed a Docker reference. + // (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.) + EmbeddedDockerReferenceConflicts(ref reference.Named) bool + // Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. + Inspect(context.Context) (*types.ImageInspectInfo, error) + // UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. + // This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute + // (most importantly it forces us to download the full layers even if they are already present at the destination). + UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool + // UpdatedImage returns a types.Image modified according to options. + // This does not change the state of the original Image object. + UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) + // SupportsEncryption returns if encryption is supported for the manifest type + // + // Deprecated: Initially used to determine if a manifest can be copied from a source manifest type since + // the process of updating a manifest between different manifest types was to update then convert. + // This resulted in some fields in the update being lost. This has been fixed by: https://github.com/containers/image/pull/836 + SupportsEncryption(ctx context.Context) bool +} + +// manifestInstanceFromBlob returns a genericManifest implementation for (manblob, mt) in src. +// If manblob is a manifest list, it implicitly chooses an appropriate image from the list. +func manifestInstanceFromBlob(ctx context.Context, sys *types.SystemContext, src types.ImageSource, manblob []byte, mt string) (genericManifest, error) { + switch manifest.NormalizedMIMEType(mt) { + case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType: + return manifestSchema1FromManifest(manblob) + case imgspecv1.MediaTypeImageManifest: + return manifestOCI1FromManifest(src, manblob) + case manifest.DockerV2Schema2MediaType: + return manifestSchema2FromManifest(src, manblob) + case manifest.DockerV2ListMediaType: + return manifestSchema2FromManifestList(ctx, sys, src, manblob) + case imgspecv1.MediaTypeImageIndex: + return manifestOCI1FromImageIndex(ctx, sys, src, manblob) + default: // Note that this may not be reachable, manifest.NormalizedMIMEType has a default for unknown values. + return nil, fmt.Errorf("Unimplemented manifest MIME type %s", mt) + } +} + +// manifestLayerInfosToBlobInfos extracts a []types.BlobInfo from a []manifest.LayerInfo. +func manifestLayerInfosToBlobInfos(layers []manifest.LayerInfo) []types.BlobInfo { + blobs := make([]types.BlobInfo, len(layers)) + for i, layer := range layers { + blobs[i] = layer.BlobInfo + } + return blobs +} + +// manifestConvertFn (a method of genericManifest object) returns a genericManifest implementation +// converted to a specific manifest MIME type. +// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned +// value. +// This does not change the state of the original genericManifest object. +type manifestConvertFn func(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) + +// convertManifestIfRequiredWithUpdate will run conversion functions of a manifest if +// required and re-apply the options to the converted type. +// It returns (nil, nil) if no conversion was requested. +func convertManifestIfRequiredWithUpdate(ctx context.Context, options types.ManifestUpdateOptions, converters map[string]manifestConvertFn) (types.Image, error) { + if options.ManifestMIMEType == "" { + return nil, nil + } + + converter, ok := converters[options.ManifestMIMEType] + if !ok { + return nil, errors.Errorf("Unsupported conversion type: %v", options.ManifestMIMEType) + } + + optionsCopy := options + convertedManifest, err := converter(ctx, &optionsCopy) + if err != nil { + return nil, err + } + convertedImage := memoryImageFromManifest(convertedManifest) + + optionsCopy.ManifestMIMEType = "" + return convertedImage.UpdatedImage(ctx, optionsCopy) +} diff --git a/vendor/github.com/containers/image/v5/image/memory.go b/vendor/github.com/containers/image/v5/image/memory.go new file mode 100644 index 00000000000..4c96b37d889 --- /dev/null +++ b/vendor/github.com/containers/image/v5/image/memory.go @@ -0,0 +1,64 @@ +package image + +import ( + "context" + + "github.com/containers/image/v5/types" + "github.com/pkg/errors" +) + +// memoryImage is a mostly-implementation of types.Image assembled from data +// created in memory, used primarily as a return value of types.Image.UpdatedImage +// as a way to carry various structured information in a type-safe and easy-to-use way. +// Note that this _only_ carries the immediate metadata; it is _not_ a stand-alone +// collection of all related information, e.g. there is no way to get layer blobs +// from a memoryImage. +type memoryImage struct { + genericManifest + serializedManifest []byte // A private cache for Manifest() +} + +func memoryImageFromManifest(m genericManifest) types.Image { + return &memoryImage{ + genericManifest: m, + serializedManifest: nil, + } +} + +// Reference returns the reference used to set up this source, _as specified by the user_ +// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. +func (i *memoryImage) Reference() types.ImageReference { + // It would really be inappropriate to return the ImageReference of the image this was based on. + return nil +} + +// Size returns the size of the image as stored, if known, or -1 if not. +func (i *memoryImage) Size() (int64, error) { + return -1, nil +} + +// Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need. +func (i *memoryImage) Manifest(ctx context.Context) ([]byte, string, error) { + if i.serializedManifest == nil { + m, err := i.genericManifest.serialize() + if err != nil { + return nil, "", err + } + i.serializedManifest = m + } + return i.serializedManifest, i.genericManifest.manifestMIMEType(), nil +} + +// Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need. +func (i *memoryImage) Signatures(ctx context.Context) ([][]byte, error) { + // Modifying an image invalidates signatures; a caller asking the updated image for signatures + // is probably confused. + return nil, errors.New("Internal error: Image.Signatures() is not supported for images modified in memory") +} + +// LayerInfosForCopy returns an updated set of layer blob information which may not match the manifest. +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (i *memoryImage) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { + return nil, nil +} diff --git a/vendor/github.com/containers/image/v5/image/oci.go b/vendor/github.com/containers/image/v5/image/oci.go new file mode 100644 index 00000000000..58e9c03ba3a --- /dev/null +++ b/vendor/github.com/containers/image/v5/image/oci.go @@ -0,0 +1,248 @@ +package image + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/iolimits" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/blobinfocache/none" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +type manifestOCI1 struct { + src types.ImageSource // May be nil if configBlob is not nil + configBlob []byte // If set, corresponds to contents of m.Config. + m *manifest.OCI1 +} + +func manifestOCI1FromManifest(src types.ImageSource, manifestBlob []byte) (genericManifest, error) { + m, err := manifest.OCI1FromManifest(manifestBlob) + if err != nil { + return nil, err + } + return &manifestOCI1{ + src: src, + m: m, + }, nil +} + +// manifestOCI1FromComponents builds a new manifestOCI1 from the supplied data: +func manifestOCI1FromComponents(config imgspecv1.Descriptor, src types.ImageSource, configBlob []byte, layers []imgspecv1.Descriptor) genericManifest { + return &manifestOCI1{ + src: src, + configBlob: configBlob, + m: manifest.OCI1FromComponents(config, layers), + } +} + +func (m *manifestOCI1) serialize() ([]byte, error) { + return m.m.Serialize() +} + +func (m *manifestOCI1) manifestMIMEType() string { + return imgspecv1.MediaTypeImageManifest +} + +// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. +// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. +func (m *manifestOCI1) ConfigInfo() types.BlobInfo { + return m.m.ConfigInfo() +} + +// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. +// The result is cached; it is OK to call this however often you need. +func (m *manifestOCI1) ConfigBlob(ctx context.Context) ([]byte, error) { + if m.configBlob == nil { + if m.src == nil { + return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestOCI1") + } + stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromOCI1Descriptor(m.m.Config), none.NoCache) + if err != nil { + return nil, err + } + defer stream.Close() + blob, err := iolimits.ReadAtMost(stream, iolimits.MaxConfigBodySize) + if err != nil { + return nil, err + } + computedDigest := digest.FromBytes(blob) + if computedDigest != m.m.Config.Digest { + return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.Config.Digest) + } + m.configBlob = blob + } + return m.configBlob, nil +} + +// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about +// layers in the resulting configuration isn't guaranteed to be returned to due how +// old image manifests work (docker v2s1 especially). +func (m *manifestOCI1) OCIConfig(ctx context.Context) (*imgspecv1.Image, error) { + cb, err := m.ConfigBlob(ctx) + if err != nil { + return nil, err + } + configOCI := &imgspecv1.Image{} + if err := json.Unmarshal(cb, configOCI); err != nil { + return nil, err + } + return configOCI, nil +} + +// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (m *manifestOCI1) LayerInfos() []types.BlobInfo { + return manifestLayerInfosToBlobInfos(m.m.LayerInfos()) +} + +// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. +// It returns false if the manifest does not embed a Docker reference. +// (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.) +func (m *manifestOCI1) EmbeddedDockerReferenceConflicts(ref reference.Named) bool { + return false +} + +// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. +func (m *manifestOCI1) Inspect(ctx context.Context) (*types.ImageInspectInfo, error) { + getter := func(info types.BlobInfo) ([]byte, error) { + if info.Digest != m.ConfigInfo().Digest { + // Shouldn't ever happen + return nil, errors.New("asked for a different config blob") + } + config, err := m.ConfigBlob(ctx) + if err != nil { + return nil, err + } + return config, nil + } + return m.m.Inspect(getter) +} + +// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. +// This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute +// (most importantly it forces us to download the full layers even if they are already present at the destination). +func (m *manifestOCI1) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool { + return false +} + +// UpdatedImage returns a types.Image modified according to options. +// This does not change the state of the original Image object. +// The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError +// if the combination of CompressionOperation and CompressionAlgorithm specified +// in one or more options.LayerInfos items indicates that a layer is compressed using +// an algorithm that is not allowed in OCI. +func (m *manifestOCI1) UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) { + copy := manifestOCI1{ // NOTE: This is not a deep copy, it still shares slices etc. + src: m.src, + configBlob: m.configBlob, + m: manifest.OCI1Clone(m.m), + } + + converted, err := convertManifestIfRequiredWithUpdate(ctx, options, map[string]manifestConvertFn{ + manifest.DockerV2Schema2MediaType: copy.convertToManifestSchema2Generic, + manifest.DockerV2Schema1MediaType: copy.convertToManifestSchema1, + manifest.DockerV2Schema1SignedMediaType: copy.convertToManifestSchema1, + }) + if err != nil { + return nil, err + } + + if converted != nil { + return converted, nil + } + + // No conversion required, update manifest + if options.LayerInfos != nil { + if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil { + return nil, err + } + } + // Ignore options.EmbeddedDockerReference: it may be set when converting from schema1, but we really don't care. + + return memoryImageFromManifest(©), nil +} + +func schema2DescriptorFromOCI1Descriptor(d imgspecv1.Descriptor) manifest.Schema2Descriptor { + return manifest.Schema2Descriptor{ + MediaType: d.MediaType, + Size: d.Size, + Digest: d.Digest, + URLs: d.URLs, + } +} + +// convertToManifestSchema2Generic returns a genericManifest implementation converted to manifest.DockerV2Schema2MediaType. +// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned +// value. +// This does not change the state of the original manifestSchema1 object. +// +// We need this function just because a function returning an implementation of the genericManifest +// interface is not automatically assignable to a function type returning the genericManifest interface +func (m *manifestOCI1) convertToManifestSchema2Generic(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) { + return m.convertToManifestSchema2(ctx, options) +} + +// convertToManifestSchema2 returns a genericManifest implementation converted to manifest.DockerV2Schema2MediaType. +// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned +// value. +// This does not change the state of the original manifestOCI1 object. +func (m *manifestOCI1) convertToManifestSchema2(_ context.Context, _ *types.ManifestUpdateOptions) (*manifestSchema2, error) { + // Create a copy of the descriptor. + config := schema2DescriptorFromOCI1Descriptor(m.m.Config) + + // The only difference between OCI and DockerSchema2 is the mediatypes. The + // media type of the manifest is handled by manifestSchema2FromComponents. + config.MediaType = manifest.DockerV2Schema2ConfigMediaType + + layers := make([]manifest.Schema2Descriptor, len(m.m.Layers)) + for idx := range layers { + layers[idx] = schema2DescriptorFromOCI1Descriptor(m.m.Layers[idx]) + switch layers[idx].MediaType { + case imgspecv1.MediaTypeImageLayerNonDistributable: + layers[idx].MediaType = manifest.DockerV2Schema2ForeignLayerMediaType + case imgspecv1.MediaTypeImageLayerNonDistributableGzip: + layers[idx].MediaType = manifest.DockerV2Schema2ForeignLayerMediaTypeGzip + case imgspecv1.MediaTypeImageLayerNonDistributableZstd: + return nil, fmt.Errorf("Error during manifest conversion: %q: zstd compression is not supported for docker images", layers[idx].MediaType) + case imgspecv1.MediaTypeImageLayer: + layers[idx].MediaType = manifest.DockerV2SchemaLayerMediaTypeUncompressed + case imgspecv1.MediaTypeImageLayerGzip: + layers[idx].MediaType = manifest.DockerV2Schema2LayerMediaType + case imgspecv1.MediaTypeImageLayerZstd: + return nil, fmt.Errorf("Error during manifest conversion: %q: zstd compression is not supported for docker images", layers[idx].MediaType) + default: + return nil, fmt.Errorf("Unknown media type during manifest conversion: %q", layers[idx].MediaType) + } + } + + // Rather than copying the ConfigBlob now, we just pass m.src to the + // translated manifest, since the only difference is the mediatype of + // descriptors there is no change to any blob stored in m.src. + return manifestSchema2FromComponents(config, m.src, nil, layers), nil +} + +// convertToManifestSchema1 returns a genericManifest implementation converted to manifest.DockerV2Schema1{Signed,}MediaType. +// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned +// value. +// This does not change the state of the original manifestOCI1 object. +func (m *manifestOCI1) convertToManifestSchema1(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) { + // We can't directly convert to V1, but we can transitively convert via a V2 image + m2, err := m.convertToManifestSchema2(ctx, options) + if err != nil { + return nil, err + } + + return m2.convertToManifestSchema1(ctx, options) +} + +// SupportsEncryption returns if encryption is supported for the manifest type +func (m *manifestOCI1) SupportsEncryption(context.Context) bool { + return true +} diff --git a/vendor/github.com/containers/image/v5/image/oci_index.go b/vendor/github.com/containers/image/v5/image/oci_index.go new file mode 100644 index 00000000000..4e6ca879aad --- /dev/null +++ b/vendor/github.com/containers/image/v5/image/oci_index.go @@ -0,0 +1,34 @@ +package image + +import ( + "context" + + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" + "github.com/pkg/errors" +) + +func manifestOCI1FromImageIndex(ctx context.Context, sys *types.SystemContext, src types.ImageSource, manblob []byte) (genericManifest, error) { + index, err := manifest.OCI1IndexFromManifest(manblob) + if err != nil { + return nil, errors.Wrapf(err, "parsing OCI1 index") + } + targetManifestDigest, err := index.ChooseInstance(sys) + if err != nil { + return nil, errors.Wrapf(err, "choosing image instance") + } + manblob, mt, err := src.GetManifest(ctx, &targetManifestDigest) + if err != nil { + return nil, errors.Wrapf(err, "loading manifest for target platform") + } + + matches, err := manifest.MatchesDigest(manblob, targetManifestDigest) + if err != nil { + return nil, errors.Wrap(err, "computing manifest digest") + } + if !matches { + return nil, errors.Errorf("Image manifest does not match selected manifest digest %s", targetManifestDigest) + } + + return manifestInstanceFromBlob(ctx, sys, src, manblob, mt) +} diff --git a/vendor/github.com/containers/image/v5/image/sourced.go b/vendor/github.com/containers/image/v5/image/sourced.go new file mode 100644 index 00000000000..3a016e1d09a --- /dev/null +++ b/vendor/github.com/containers/image/v5/image/sourced.go @@ -0,0 +1,104 @@ +// Package image consolidates knowledge about various container image formats +// (as opposed to image storage mechanisms, which are handled by types.ImageSource) +// and exposes all of them using an unified interface. +package image + +import ( + "context" + + "github.com/containers/image/v5/types" +) + +// imageCloser implements types.ImageCloser, perhaps allowing simple users +// to use a single object without having keep a reference to a types.ImageSource +// only to call types.ImageSource.Close(). +type imageCloser struct { + types.Image + src types.ImageSource +} + +// FromSource returns a types.ImageCloser implementation for the default instance of source. +// If source is a manifest list, .Manifest() still returns the manifest list, +// but other methods transparently return data from an appropriate image instance. +// +// The caller must call .Close() on the returned ImageCloser. +// +// FromSource “takes ownership” of the input ImageSource and will call src.Close() +// when the image is closed. (This does not prevent callers from using both the +// Image and ImageSource objects simultaneously, but it means that they only need to +// the Image.) +// +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage instead of calling this function. +func FromSource(ctx context.Context, sys *types.SystemContext, src types.ImageSource) (types.ImageCloser, error) { + img, err := FromUnparsedImage(ctx, sys, UnparsedInstance(src, nil)) + if err != nil { + return nil, err + } + return &imageCloser{ + Image: img, + src: src, + }, nil +} + +func (ic *imageCloser) Close() error { + return ic.src.Close() +} + +// sourcedImage is a general set of utilities for working with container images, +// whatever is their underlying location (i.e. dockerImageSource-independent). +// Note the existence of skopeo/docker.Image: some instances of a `types.Image` +// may not be a `sourcedImage` directly. However, most users of `types.Image` +// do not care, and those who care about `skopeo/docker.Image` know they do. +type sourcedImage struct { + *UnparsedImage + manifestBlob []byte + manifestMIMEType string + // genericManifest contains data corresponding to manifestBlob. + // NOTE: The manifest may have been modified in the process; DO NOT reserialize and store genericManifest + // if you want to preserve the original manifest; use manifestBlob directly. + genericManifest +} + +// FromUnparsedImage returns a types.Image implementation for unparsed. +// If unparsed represents a manifest list, .Manifest() still returns the manifest list, +// but other methods transparently return data from an appropriate single image. +// +// The Image must not be used after the underlying ImageSource is Close()d. +func FromUnparsedImage(ctx context.Context, sys *types.SystemContext, unparsed *UnparsedImage) (types.Image, error) { + // Note that the input parameter above is specifically *image.UnparsedImage, not types.UnparsedImage: + // we want to be able to use unparsed.src. We could make that an explicit interface, but, well, + // this is the only UnparsedImage implementation around, anyway. + + // NOTE: It is essential for signature verification that all parsing done in this object happens on the same manifest which is returned by unparsed.Manifest(). + manifestBlob, manifestMIMEType, err := unparsed.Manifest(ctx) + if err != nil { + return nil, err + } + + parsedManifest, err := manifestInstanceFromBlob(ctx, sys, unparsed.src, manifestBlob, manifestMIMEType) + if err != nil { + return nil, err + } + + return &sourcedImage{ + UnparsedImage: unparsed, + manifestBlob: manifestBlob, + manifestMIMEType: manifestMIMEType, + genericManifest: parsedManifest, + }, nil +} + +// Size returns the size of the image as stored, if it's known, or -1 if it isn't. +func (i *sourcedImage) Size() (int64, error) { + return -1, nil +} + +// Manifest overrides the UnparsedImage.Manifest to always use the fields which we have already fetched. +func (i *sourcedImage) Manifest(ctx context.Context) ([]byte, string, error) { + return i.manifestBlob, i.manifestMIMEType, nil +} + +func (i *sourcedImage) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { + return i.UnparsedImage.src.LayerInfosForCopy(ctx, i.UnparsedImage.instanceDigest) +} diff --git a/vendor/github.com/containers/image/v5/image/unparsed.go b/vendor/github.com/containers/image/v5/image/unparsed.go new file mode 100644 index 00000000000..c64852f722b --- /dev/null +++ b/vendor/github.com/containers/image/v5/image/unparsed.go @@ -0,0 +1,95 @@ +package image + +import ( + "context" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +// UnparsedImage implements types.UnparsedImage . +// An UnparsedImage is a pair of (ImageSource, instance digest); it can represent either a manifest list or a single image instance. +type UnparsedImage struct { + src types.ImageSource + instanceDigest *digest.Digest + cachedManifest []byte // A private cache for Manifest(); nil if not yet known. + // A private cache for Manifest(), may be the empty string if guessing failed. + // Valid iff cachedManifest is not nil. + cachedManifestMIMEType string + cachedSignatures [][]byte // A private cache for Signatures(); nil if not yet known. +} + +// UnparsedInstance returns a types.UnparsedImage implementation for (source, instanceDigest). +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list). +// +// The UnparsedImage must not be used after the underlying ImageSource is Close()d. +func UnparsedInstance(src types.ImageSource, instanceDigest *digest.Digest) *UnparsedImage { + return &UnparsedImage{ + src: src, + instanceDigest: instanceDigest, + } +} + +// Reference returns the reference used to set up this source, _as specified by the user_ +// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. +func (i *UnparsedImage) Reference() types.ImageReference { + // Note that this does not depend on instanceDigest; e.g. all instances within a manifest list need to be signed with the manifest list identity. + return i.src.Reference() +} + +// Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need. +func (i *UnparsedImage) Manifest(ctx context.Context) ([]byte, string, error) { + if i.cachedManifest == nil { + m, mt, err := i.src.GetManifest(ctx, i.instanceDigest) + if err != nil { + return nil, "", err + } + + // ImageSource.GetManifest does not do digest verification, but we do; + // this immediately protects also any user of types.Image. + if digest, haveDigest := i.expectedManifestDigest(); haveDigest { + matches, err := manifest.MatchesDigest(m, digest) + if err != nil { + return nil, "", errors.Wrap(err, "computing manifest digest") + } + if !matches { + return nil, "", errors.Errorf("Manifest does not match provided manifest digest %s", digest) + } + } + + i.cachedManifest = m + i.cachedManifestMIMEType = mt + } + return i.cachedManifest, i.cachedManifestMIMEType, nil +} + +// expectedManifestDigest returns a the expected value of the manifest digest, and an indicator whether it is known. +// The bool return value seems redundant with digest != ""; it is used explicitly +// to refuse (unexpected) situations when the digest exists but is "". +func (i *UnparsedImage) expectedManifestDigest() (digest.Digest, bool) { + if i.instanceDigest != nil { + return *i.instanceDigest, true + } + ref := i.Reference().DockerReference() + if ref != nil { + if canonical, ok := ref.(reference.Canonical); ok { + return canonical.Digest(), true + } + } + return "", false +} + +// Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need. +func (i *UnparsedImage) Signatures(ctx context.Context) ([][]byte, error) { + if i.cachedSignatures == nil { + sigs, err := i.src.GetSignatures(ctx, i.instanceDigest) + if err != nil { + return nil, err + } + i.cachedSignatures = sigs + } + return i.cachedSignatures, nil +} diff --git a/vendor/github.com/containers/image/v5/internal/blobinfocache/blobinfocache.go b/vendor/github.com/containers/image/v5/internal/blobinfocache/blobinfocache.go new file mode 100644 index 00000000000..b86e8b1ac3a --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/blobinfocache/blobinfocache.go @@ -0,0 +1,64 @@ +package blobinfocache + +import ( + "github.com/containers/image/v5/pkg/compression" + compressiontypes "github.com/containers/image/v5/pkg/compression/types" + "github.com/containers/image/v5/types" + digest "github.com/opencontainers/go-digest" +) + +// FromBlobInfoCache returns a BlobInfoCache2 based on a BlobInfoCache, returning the original +// object if it implements BlobInfoCache2, or a wrapper which discards compression information +// if it only implements BlobInfoCache. +func FromBlobInfoCache(bic types.BlobInfoCache) BlobInfoCache2 { + if bic2, ok := bic.(BlobInfoCache2); ok { + return bic2 + } + return &v1OnlyBlobInfoCache{ + BlobInfoCache: bic, + } +} + +type v1OnlyBlobInfoCache struct { + types.BlobInfoCache +} + +func (bic *v1OnlyBlobInfoCache) RecordDigestCompressorName(anyDigest digest.Digest, compressorName string) { +} + +func (bic *v1OnlyBlobInfoCache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []BICReplacementCandidate2 { + return nil +} + +// CandidateLocationsFromV2 converts a slice of BICReplacementCandidate2 to a slice of +// types.BICReplacementCandidate, dropping compression information. +func CandidateLocationsFromV2(v2candidates []BICReplacementCandidate2) []types.BICReplacementCandidate { + candidates := make([]types.BICReplacementCandidate, 0, len(v2candidates)) + for _, c := range v2candidates { + candidates = append(candidates, types.BICReplacementCandidate{ + Digest: c.Digest, + Location: c.Location, + }) + } + return candidates +} + +// OperationAndAlgorithmForCompressor returns CompressionOperation and CompressionAlgorithm +// values suitable for inclusion in a types.BlobInfo structure, based on the name of the +// compression algorithm, or Uncompressed, or UnknownCompression. This is typically used by +// TryReusingBlob() implementations to set values in the BlobInfo structure that they return +// upon success. +func OperationAndAlgorithmForCompressor(compressorName string) (types.LayerCompression, *compressiontypes.Algorithm, error) { + switch compressorName { + case Uncompressed: + return types.Decompress, nil, nil + case UnknownCompression: + return types.PreserveOriginal, nil, nil + default: + algo, err := compression.AlgorithmByName(compressorName) + if err == nil { + return types.Compress, &algo, nil + } + return types.PreserveOriginal, nil, err + } +} diff --git a/vendor/github.com/containers/image/v5/internal/blobinfocache/types.go b/vendor/github.com/containers/image/v5/internal/blobinfocache/types.go new file mode 100644 index 00000000000..3c2be57f32a --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/blobinfocache/types.go @@ -0,0 +1,45 @@ +package blobinfocache + +import ( + "github.com/containers/image/v5/types" + digest "github.com/opencontainers/go-digest" +) + +const ( + // Uncompressed is the value we store in a blob info cache to indicate that we know that + // the blob in the corresponding location is not compressed. + Uncompressed = "uncompressed" + // UnknownCompression is the value we store in a blob info cache to indicate that we don't + // know if the blob in the corresponding location is compressed (and if so, how) or not. + UnknownCompression = "unknown" +) + +// BlobInfoCache2 extends BlobInfoCache by adding the ability to track information about what kind +// of compression was applied to the blobs it keeps information about. +type BlobInfoCache2 interface { + types.BlobInfoCache + // RecordDigestCompressorName records a compressor for the blob with the specified digest, + // or Uncompressed or UnknownCompression. + // WARNING: Only call this with LOCALLY VERIFIED data; don’t record a compressor for a + // digest just because some remote author claims so (e.g. because a manifest says so); + // otherwise the cache could be poisoned and cause us to make incorrect edits to type + // information in a manifest. + RecordDigestCompressorName(anyDigest digest.Digest, compressorName string) + // CandidateLocations2 returns a prioritized, limited, number of blobs and their locations + // that could possibly be reused within the specified (transport scope) (if they still + // exist, which is not guaranteed). + // + // If !canSubstitute, the returned cadidates will match the submitted digest exactly; if + // canSubstitute, data from previous RecordDigestUncompressedPair calls is used to also look + // up variants of the blob which have the same uncompressed digest. + // + // The CompressorName fields in returned data must never be UnknownCompression. + CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []BICReplacementCandidate2 +} + +// BICReplacementCandidate2 is an item returned by BlobInfoCache2.CandidateLocations2. +type BICReplacementCandidate2 struct { + Digest digest.Digest + CompressorName string // either the Name() of a known pkg/compression.Algorithm, or Uncompressed or UnknownCompression + Location types.BICLocationReference +} diff --git a/vendor/github.com/containers/image/v5/internal/iolimits/iolimits.go b/vendor/github.com/containers/image/v5/internal/iolimits/iolimits.go new file mode 100644 index 00000000000..3fed1995cb8 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/iolimits/iolimits.go @@ -0,0 +1,60 @@ +package iolimits + +import ( + "io" + "io/ioutil" + + "github.com/pkg/errors" +) + +// All constants below are intended to be used as limits for `ReadAtMost`. The +// immediate use-case for limiting the size of in-memory copied data is to +// protect against OOM DOS attacks as described inCVE-2020-1702. Instead of +// copying data until running out of memory, we error out after hitting the +// specified limit. +const ( + // megaByte denotes one megabyte and is intended to be used as a limit in + // `ReadAtMost`. + megaByte = 1 << 20 + // MaxManifestBodySize is the maximum allowed size of a manifest. The limit + // of 4 MB aligns with the one of a Docker registry: + // https://github.com/docker/distribution/blob/a8371794149d1d95f1e846744b05c87f2f825e5a/registry/handlers/manifests.go#L30 + MaxManifestBodySize = 4 * megaByte + // MaxAuthTokenBodySize is the maximum allowed size of an auth token. + // The limit of 1 MB is considered to be greatly sufficient. + MaxAuthTokenBodySize = megaByte + // MaxSignatureListBodySize is the maximum allowed size of a signature list. + // The limit of 4 MB is considered to be greatly sufficient. + MaxSignatureListBodySize = 4 * megaByte + // MaxSignatureBodySize is the maximum allowed size of a signature. + // The limit of 4 MB is considered to be greatly sufficient. + MaxSignatureBodySize = 4 * megaByte + // MaxErrorBodySize is the maximum allowed size of an error-response body. + // The limit of 1 MB is considered to be greatly sufficient. + MaxErrorBodySize = megaByte + // MaxConfigBodySize is the maximum allowed size of a config blob. + // The limit of 4 MB is considered to be greatly sufficient. + MaxConfigBodySize = 4 * megaByte + // MaxOpenShiftStatusBody is the maximum allowed size of an OpenShift status body. + // The limit of 4 MB is considered to be greatly sufficient. + MaxOpenShiftStatusBody = 4 * megaByte + // MaxTarFileManifestSize is the maximum allowed size of a (docker save)-like manifest (which may contain multiple images) + // The limit of 1 MB is considered to be greatly sufficient. + MaxTarFileManifestSize = megaByte +) + +// ReadAtMost reads from reader and errors out if the specified limit (in bytes) is exceeded. +func ReadAtMost(reader io.Reader, limit int) ([]byte, error) { + limitedReader := io.LimitReader(reader, int64(limit+1)) + + res, err := ioutil.ReadAll(limitedReader) + if err != nil { + return nil, err + } + + if len(res) > limit { + return nil, errors.Errorf("exceeded maximum allowed size of %d bytes", limit) + } + + return res, nil +} diff --git a/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go b/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go new file mode 100644 index 00000000000..3bda6af2918 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go @@ -0,0 +1,196 @@ +package platform + +// Largely based on +// https://github.com/moby/moby/blob/bc846d2e8fe5538220e0c31e9d0e8446f6fbc022/distribution/cpuinfo_unix.go +// Copyright 2012-2017 Docker, Inc. +// +// https://github.com/containerd/containerd/blob/726dcaea50883e51b2ec6db13caff0e7936b711d/platforms/cpuinfo.go +// Copyright The containerd Authors. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// https://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bufio" + "fmt" + "os" + "runtime" + "strings" + + "github.com/containers/image/v5/types" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +// For Linux, the kernel has already detected the ABI, ISA and Features. +// So we don't need to access the ARM registers to detect platform information +// by ourselves. We can just parse these information from /proc/cpuinfo +func getCPUInfo(pattern string) (info string, err error) { + if runtime.GOOS != "linux" { + return "", fmt.Errorf("getCPUInfo for OS %s not implemented", runtime.GOOS) + } + + cpuinfo, err := os.Open("/proc/cpuinfo") + if err != nil { + return "", err + } + defer cpuinfo.Close() + + // Start to Parse the Cpuinfo line by line. For SMP SoC, we parse + // the first core is enough. + scanner := bufio.NewScanner(cpuinfo) + for scanner.Scan() { + newline := scanner.Text() + list := strings.Split(newline, ":") + + if len(list) > 1 && strings.EqualFold(strings.TrimSpace(list[0]), pattern) { + return strings.TrimSpace(list[1]), nil + } + } + + // Check whether the scanner encountered errors + err = scanner.Err() + if err != nil { + return "", err + } + + return "", fmt.Errorf("getCPUInfo for pattern: %s not found", pattern) +} + +func getCPUVariantWindows(arch string) string { + // Windows only supports v7 for ARM32 and v8 for ARM64 and so we can use + // runtime.GOARCH to determine the variants + var variant string + switch arch { + case "arm64": + variant = "v8" + case "arm": + variant = "v7" + default: + variant = "" + } + + return variant +} + +func getCPUVariantArm() string { + variant, err := getCPUInfo("Cpu architecture") + if err != nil { + return "" + } + // TODO handle RPi Zero mismatch (https://github.com/moby/moby/pull/36121#issuecomment-398328286) + + switch strings.ToLower(variant) { + case "8", "aarch64": + variant = "v8" + case "7", "7m", "?(12)", "?(13)", "?(14)", "?(15)", "?(16)", "?(17)": + variant = "v7" + case "6", "6tej": + variant = "v6" + case "5", "5t", "5te", "5tej": + variant = "v5" + case "4", "4t": + variant = "v4" + case "3": + variant = "v3" + default: + variant = "" + } + + return variant +} + +func getCPUVariant(os string, arch string) string { + if os == "windows" { + return getCPUVariantWindows(arch) + } + if arch == "arm" || arch == "arm64" { + return getCPUVariantArm() + } + return "" +} + +// compatibility contains, for a specified architecture, a list of known variants, in the +// order from most capable (most restrictive) to least capable (most compatible). +// Architectures that don’t have variants should not have an entry here. +var compatibility = map[string][]string{ + "arm": {"v8", "v7", "v6", "v5"}, + "arm64": {"v8"}, +} + +// WantedPlatforms returns all compatible platforms with the platform specifics possibly overridden by user, +// the most compatible platform is first. +// If some option (arch, os, variant) is not present, a value from current platform is detected. +func WantedPlatforms(ctx *types.SystemContext) ([]imgspecv1.Platform, error) { + wantedArch := runtime.GOARCH + wantedVariant := "" + if ctx != nil && ctx.ArchitectureChoice != "" { + wantedArch = ctx.ArchitectureChoice + } else { + // Only auto-detect the variant if we are using the default architecture. + // If the user has specified the ArchitectureChoice, don't autodetect, even if + // ctx.ArchitectureChoice == runtime.GOARCH, because we have no idea whether the runtime.GOARCH + // value is relevant to the use case, and if we do autodetect a variant, + // ctx.VariantChoice can't be used to override it back to "". + wantedVariant = getCPUVariant(runtime.GOOS, runtime.GOARCH) + } + if ctx != nil && ctx.VariantChoice != "" { + wantedVariant = ctx.VariantChoice + } + + wantedOS := runtime.GOOS + if ctx != nil && ctx.OSChoice != "" { + wantedOS = ctx.OSChoice + } + + var variants []string = nil + if wantedVariant != "" { + // If the user requested a specific variant, we'll walk down + // the list from most to least compatible. + if compatibility[wantedArch] != nil { + variantOrder := compatibility[wantedArch] + for i, v := range variantOrder { + if wantedVariant == v { + variants = variantOrder[i:] + break + } + } + } + if variants == nil { + // user wants a variant which we know nothing about - not even compatibility + variants = []string{wantedVariant} + } + // Make sure to have a candidate with an empty variant as well. + variants = append(variants, "") + } else { + // Make sure to have a candidate with an empty variant as well. + variants = append(variants, "") + // If available add the entire compatibility matrix for the specific architecture. + if possibleVariants, ok := compatibility[wantedArch]; ok { + variants = append(variants, possibleVariants...) + } + } + + res := make([]imgspecv1.Platform, 0, len(variants)) + for _, v := range variants { + res = append(res, imgspecv1.Platform{ + OS: wantedOS, + Architecture: wantedArch, + Variant: v, + }) + } + return res, nil +} + +// MatchesPlatform returns true if a platform descriptor from a multi-arch image matches +// an item from the return value of WantedPlatforms. +func MatchesPlatform(image imgspecv1.Platform, wanted imgspecv1.Platform) bool { + return image.Architecture == wanted.Architecture && + image.OS == wanted.OS && + image.Variant == wanted.Variant +} diff --git a/vendor/github.com/containers/image/v5/internal/putblobdigest/put_blob_digest.go b/vendor/github.com/containers/image/v5/internal/putblobdigest/put_blob_digest.go new file mode 100644 index 00000000000..b8d3a7e56d7 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/putblobdigest/put_blob_digest.go @@ -0,0 +1,57 @@ +package putblobdigest + +import ( + "io" + + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" +) + +// Digester computes a digest of the provided stream, if not known yet. +type Digester struct { + knownDigest digest.Digest // Or "" + digester digest.Digester // Or nil +} + +// newDigester initiates computation of a digest.Canonical digest of stream, +// if !validDigest; otherwise it just records knownDigest to be returned later. +// The caller MUST use the returned stream instead of the original value. +func newDigester(stream io.Reader, knownDigest digest.Digest, validDigest bool) (Digester, io.Reader) { + if validDigest { + return Digester{knownDigest: knownDigest}, stream + } else { + res := Digester{ + digester: digest.Canonical.Digester(), + } + stream = io.TeeReader(stream, res.digester.Hash()) + return res, stream + } +} + +// DigestIfUnknown initiates computation of a digest.Canonical digest of stream, +// if no digest is supplied in the provided blobInfo; otherwise blobInfo.Digest will +// be used (accepting any algorithm). +// The caller MUST use the returned stream instead of the original value. +func DigestIfUnknown(stream io.Reader, blobInfo types.BlobInfo) (Digester, io.Reader) { + d := blobInfo.Digest + return newDigester(stream, d, d != "") +} + +// DigestIfCanonicalUnknown initiates computation of a digest.Canonical digest of stream, +// if a digest.Canonical digest is not supplied in the provided blobInfo; +// otherwise blobInfo.Digest will be used. +// The caller MUST use the returned stream instead of the original value. +func DigestIfCanonicalUnknown(stream io.Reader, blobInfo types.BlobInfo) (Digester, io.Reader) { + d := blobInfo.Digest + return newDigester(stream, d, d != "" && d.Algorithm() == digest.Canonical) +} + +// Digest() returns a digest value possibly computed by Digester. +// This must be called only after all of the stream returned by a Digester constructor +// has been successfully read. +func (d Digester) Digest() digest.Digest { + if d.digester != nil { + return d.digester.Digest() + } + return d.knownDigest +} diff --git a/vendor/github.com/containers/image/v5/internal/rootless/rootless.go b/vendor/github.com/containers/image/v5/internal/rootless/rootless.go new file mode 100644 index 00000000000..80623bfbc13 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/rootless/rootless.go @@ -0,0 +1,25 @@ +package rootless + +import ( + "os" + "strconv" +) + +// GetRootlessEUID returns the UID of the current user (in the parent userNS, if any) +// +// Podman and similar software, in “rootless” configuration, when run as a non-root +// user, very early switches to a user namespace, where Geteuid() == 0 (but does not +// switch to a limited mount namespace); so, code relying on Geteuid() would use +// system-wide paths in e.g. /var, when the user is actually not privileged to write to +// them, and expects state to be stored in the home directory. +// +// If Podman is setting up such a user namespace, it records the original UID in an +// environment variable, allowing us to make choices based on the actual user’s identity. +func GetRootlessEUID() int { + euidEnv := os.Getenv("_CONTAINERS_ROOTLESS_UID") + if euidEnv != "" { + euid, _ := strconv.Atoi(euidEnv) + return euid + } + return os.Geteuid() +} diff --git a/vendor/github.com/containers/image/v5/internal/streamdigest/stream_digest.go b/vendor/github.com/containers/image/v5/internal/streamdigest/stream_digest.go new file mode 100644 index 00000000000..306220585b6 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/streamdigest/stream_digest.go @@ -0,0 +1,41 @@ +package streamdigest + +import ( + "fmt" + "io" + "io/ioutil" + "os" + + "github.com/containers/image/v5/internal/putblobdigest" + "github.com/containers/image/v5/internal/tmpdir" + "github.com/containers/image/v5/types" +) + +// ComputeBlobInfo streams a blob to a temporary file and populates Digest and Size in inputInfo. +// The temporary file is returned as an io.Reader along with a cleanup function. +// It is the caller's responsibility to call the cleanup function, which closes and removes the temporary file. +// If an error occurs, inputInfo is not modified. +func ComputeBlobInfo(sys *types.SystemContext, stream io.Reader, inputInfo *types.BlobInfo) (io.Reader, func(), error) { + diskBlob, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(sys), "stream-blob") + if err != nil { + return nil, nil, fmt.Errorf("creating temporary on-disk layer: %w", err) + } + cleanup := func() { + diskBlob.Close() + os.Remove(diskBlob.Name()) + } + digester, stream := putblobdigest.DigestIfCanonicalUnknown(stream, *inputInfo) + written, err := io.Copy(diskBlob, stream) + if err != nil { + cleanup() + return nil, nil, fmt.Errorf("writing to temporary on-disk layer: %w", err) + } + _, err = diskBlob.Seek(0, io.SeekStart) + if err != nil { + cleanup() + return nil, nil, fmt.Errorf("rewinding temporary on-disk layer: %w", err) + } + inputInfo.Digest = digester.Digest() + inputInfo.Size = written + return diskBlob, cleanup, nil +} diff --git a/vendor/github.com/containers/image/v5/internal/tmpdir/tmpdir.go b/vendor/github.com/containers/image/v5/internal/tmpdir/tmpdir.go new file mode 100644 index 00000000000..809446e1897 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/tmpdir/tmpdir.go @@ -0,0 +1,34 @@ +package tmpdir + +import ( + "os" + "runtime" + + "github.com/containers/image/v5/types" +) + +// unixTempDirForBigFiles is the directory path to store big files on non Windows systems. +// You can override this at build time with +// -ldflags '-X github.com/containers/image/v5/internal/tmpdir.unixTempDirForBigFiles=$your_path' +var unixTempDirForBigFiles = builtinUnixTempDirForBigFiles + +// builtinUnixTempDirForBigFiles is the directory path to store big files. +// Do not use the system default of os.TempDir(), usually /tmp, because with systemd it could be a tmpfs. +// DO NOT change this, instead see unixTempDirForBigFiles above. +const builtinUnixTempDirForBigFiles = "/var/tmp" + +// TemporaryDirectoryForBigFiles returns a directory for temporary (big) files. +// On non Windows systems it avoids the use of os.TempDir(), because the default temporary directory usually falls under /tmp +// which on systemd based systems could be the unsuitable tmpfs filesystem. +func TemporaryDirectoryForBigFiles(sys *types.SystemContext) string { + if sys != nil && sys.BigFilesTemporaryDir != "" { + return sys.BigFilesTemporaryDir + } + var temporaryDirectoryForBigFiles string + if runtime.GOOS == "windows" { + temporaryDirectoryForBigFiles = os.TempDir() + } else { + temporaryDirectoryForBigFiles = unixTempDirForBigFiles + } + return temporaryDirectoryForBigFiles +} diff --git a/vendor/github.com/containers/image/v5/internal/types/types.go b/vendor/github.com/containers/image/v5/internal/types/types.go new file mode 100644 index 00000000000..388f8cf3b49 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/types/types.go @@ -0,0 +1,91 @@ +package types + +import ( + "context" + "io" + + "github.com/containers/image/v5/docker/reference" + publicTypes "github.com/containers/image/v5/types" +) + +// ImageDestinationWithOptions is an internal extension to the ImageDestination +// interface. +type ImageDestinationWithOptions interface { + publicTypes.ImageDestination + + // PutBlobWithOptions is a wrapper around PutBlob. If + // options.LayerIndex is set, the blob will be committed directly. + // Either by the calling goroutine or by another goroutine already + // committing layers. + // + // Please note that TryReusingBlobWithOptions and PutBlobWithOptions + // *must* be used the together. Mixing the two with non "WithOptions" + // functions is not supported. + PutBlobWithOptions(ctx context.Context, stream io.Reader, blobinfo publicTypes.BlobInfo, options PutBlobOptions) (publicTypes.BlobInfo, error) + + // TryReusingBlobWithOptions is a wrapper around TryReusingBlob. If + // options.LayerIndex is set, the reused blob will be recoreded as + // already pulled. + // + // Please note that TryReusingBlobWithOptions and PutBlobWithOptions + // *must* be used the together. Mixing the two with non "WithOptions" + // functions is not supported. + TryReusingBlobWithOptions(ctx context.Context, blobinfo publicTypes.BlobInfo, options TryReusingBlobOptions) (bool, publicTypes.BlobInfo, error) +} + +// PutBlobOptions are used in PutBlobWithOptions. +type PutBlobOptions struct { + // Cache to look up blob infos. + Cache publicTypes.BlobInfoCache + // Denotes whether the blob is a config or not. + IsConfig bool + // Indicates an empty layer. + EmptyLayer bool + // The corresponding index in the layer slice. + LayerIndex *int +} + +// TryReusingBlobOptions are used in TryReusingBlobWithOptions. +type TryReusingBlobOptions struct { + // Cache to look up blob infos. + Cache publicTypes.BlobInfoCache + // Use an equivalent of the desired blob. + CanSubstitute bool + // Indicates an empty layer. + EmptyLayer bool + // The corresponding index in the layer slice. + LayerIndex *int + // The reference of the image that contains the target blob. + SrcRef reference.Named +} + +// ImageSourceChunk is a portion of a blob. +// This API is experimental and can be changed without bumping the major version number. +type ImageSourceChunk struct { + Offset uint64 + Length uint64 +} + +// ImageSourceSeekable is an image source that permits to fetch chunks of the entire blob. +// This API is experimental and can be changed without bumping the major version number. +type ImageSourceSeekable interface { + // GetBlobAt returns a stream for the specified blob. + // The specified chunks must be not overlapping and sorted by their offset. + GetBlobAt(context.Context, publicTypes.BlobInfo, []ImageSourceChunk) (chan io.ReadCloser, chan error, error) +} + +// ImageDestinationPartial is a service to store a blob by requesting the missing chunks to a ImageSourceSeekable. +// This API is experimental and can be changed without bumping the major version number. +type ImageDestinationPartial interface { + // PutBlobPartial writes contents of stream and returns data representing the result. + PutBlobPartial(ctx context.Context, stream ImageSourceSeekable, srcInfo publicTypes.BlobInfo, cache publicTypes.BlobInfoCache) (publicTypes.BlobInfo, error) +} + +// BadPartialRequestError is returned by ImageSourceSeekable.GetBlobAt on an invalid request. +type BadPartialRequestError struct { + Status string +} + +func (e BadPartialRequestError) Error() string { + return e.Status +} diff --git a/vendor/github.com/containers/image/v5/internal/uploadreader/upload_reader.go b/vendor/github.com/containers/image/v5/internal/uploadreader/upload_reader.go new file mode 100644 index 00000000000..6aa9ead68f7 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/uploadreader/upload_reader.go @@ -0,0 +1,61 @@ +package uploadreader + +import ( + "io" + "sync" +) + +// UploadReader is a pass-through reader for use in sending non-trivial data using the net/http +// package (http.NewRequest, http.Post and the like). +// +// The net/http package uses a separate goroutine to upload data to a HTTP connection, +// and it is possible for the server to return a response (typically an error) before consuming +// the full body of the request. In that case http.Client.Do can return with an error while +// the body is still being read — regardless of of the cancellation, if any, of http.Request.Context(). +// +// As a result, any data used/updated by the io.Reader() provided as the request body may be +// used/updated even after http.Client.Do returns, causing races. +// +// To fix this, UploadReader provides a synchronized Terminate() method, which can block for +// a not-completely-negligible time (for a duration of the underlying Read()), but guarantees that +// after Terminate() returns, the underlying reader is never used any more (unlike calling +// the cancellation callback of context.WithCancel, which returns before any recipients may have +// reacted to the cancellation). +type UploadReader struct { + mutex sync.Mutex + // The following members can only be used with mutex held + reader io.Reader + terminationError error // nil if not terminated yet +} + +// NewUploadReader returns an UploadReader for an "underlying" reader. +func NewUploadReader(underlying io.Reader) *UploadReader { + return &UploadReader{ + reader: underlying, + terminationError: nil, + } +} + +// Read returns the error set by Terminate, if any, or calls the underlying reader. +// It is safe to call this from a different goroutine than Terminate. +func (ur *UploadReader) Read(p []byte) (int, error) { + ur.mutex.Lock() + defer ur.mutex.Unlock() + + if ur.terminationError != nil { + return 0, ur.terminationError + } + return ur.reader.Read(p) +} + +// Terminate waits for in-progress Read calls, if any, to finish, and ensures that after +// this function returns, any Read calls will fail with the provided error, and the underlying +// reader will never be used any more. +// +// It is safe to call this from a different goroutine than Read. +func (ur *UploadReader) Terminate(err error) { + ur.mutex.Lock() // May block for some time if ur.reader.Read() is in progress + defer ur.mutex.Unlock() + + ur.terminationError = err +} diff --git a/vendor/github.com/containers/image/v5/manifest/common.go b/vendor/github.com/containers/image/v5/manifest/common.go new file mode 100644 index 00000000000..20955ab7ff4 --- /dev/null +++ b/vendor/github.com/containers/image/v5/manifest/common.go @@ -0,0 +1,211 @@ +package manifest + +import ( + "encoding/json" + "fmt" + + compressiontypes "github.com/containers/image/v5/pkg/compression/types" + "github.com/containers/image/v5/types" + "github.com/sirupsen/logrus" +) + +// dupStringSlice returns a deep copy of a slice of strings, or nil if the +// source slice is empty. +func dupStringSlice(list []string) []string { + if len(list) == 0 { + return nil + } + dup := make([]string, len(list)) + copy(dup, list) + return dup +} + +// dupStringStringMap returns a deep copy of a map[string]string, or nil if the +// passed-in map is nil or has no keys. +func dupStringStringMap(m map[string]string) map[string]string { + if len(m) == 0 { + return nil + } + result := make(map[string]string) + for k, v := range m { + result[k] = v + } + return result +} + +// allowedManifestFields is a bit mask of “essential” manifest fields that validateUnambiguousManifestFormat +// can expect to be present. +type allowedManifestFields int + +const ( + allowedFieldConfig allowedManifestFields = 1 << iota + allowedFieldFSLayers + allowedFieldHistory + allowedFieldLayers + allowedFieldManifests + allowedFieldFirstUnusedBit // Keep this at the end! +) + +// validateUnambiguousManifestFormat rejects manifests (incl. multi-arch) that look like more than +// one kind we currently recognize, i.e. if they contain any of the known “essential” format fields +// other than the ones the caller specifically allows. +// expectedMIMEType is used only for diagnostics. +// NOTE: The caller should do the non-heuristic validations (e.g. check for any specified format +// identification/version, or other “magic numbers”) before calling this, to cleanly reject unambiguous +// data that just isn’t what was expected, as opposed to actually ambiguous data. +func validateUnambiguousManifestFormat(manifest []byte, expectedMIMEType string, + allowed allowedManifestFields) error { + if allowed >= allowedFieldFirstUnusedBit { + return fmt.Errorf("internal error: invalid allowedManifestFields value %#v", allowed) + } + // Use a private type to decode, not just a map[string]interface{}, because we want + // to also reject case-insensitive matches (which would be used by Go when really decoding + // the manifest). + // (It is expected that as manifest formats are added or extended over time, more fields will be added + // here.) + detectedFields := struct { + Config interface{} `json:"config"` + FSLayers interface{} `json:"fsLayers"` + History interface{} `json:"history"` + Layers interface{} `json:"layers"` + Manifests interface{} `json:"manifests"` + }{} + if err := json.Unmarshal(manifest, &detectedFields); err != nil { + // The caller was supposed to already validate version numbers, so this should not happen; + // let’s not bother with making this error “nice”. + return err + } + unexpected := []string{} + // Sadly this isn’t easy to automate in Go, without reflection. So, copy&paste. + if detectedFields.Config != nil && (allowed&allowedFieldConfig) == 0 { + unexpected = append(unexpected, "config") + } + if detectedFields.FSLayers != nil && (allowed&allowedFieldFSLayers) == 0 { + unexpected = append(unexpected, "fsLayers") + } + if detectedFields.History != nil && (allowed&allowedFieldHistory) == 0 { + unexpected = append(unexpected, "history") + } + if detectedFields.Layers != nil && (allowed&allowedFieldLayers) == 0 { + unexpected = append(unexpected, "layers") + } + if detectedFields.Manifests != nil && (allowed&allowedFieldManifests) == 0 { + unexpected = append(unexpected, "manifests") + } + if len(unexpected) != 0 { + return fmt.Errorf(`rejecting ambiguous manifest, unexpected fields %#v in supposedly %s`, + unexpected, expectedMIMEType) + } + return nil +} + +// layerInfosToStrings converts a list of layer infos, presumably obtained from a Manifest.LayerInfos() +// method call, into a format suitable for inclusion in a types.ImageInspectInfo structure. +func layerInfosToStrings(infos []LayerInfo) []string { + layers := make([]string, len(infos)) + for i, info := range infos { + layers[i] = info.Digest.String() + } + return layers +} + +// compressionMIMETypeSet describes a set of MIME type “variants” that represent differently-compressed +// versions of “the same kind of content”. +// The map key is the return value of compressiontypes.Algorithm.Name(), or mtsUncompressed; +// the map value is a MIME type, or mtsUnsupportedMIMEType to mean "recognized but unsupported". +type compressionMIMETypeSet map[string]string + +const mtsUncompressed = "" // A key in compressionMIMETypeSet for the uncompressed variant +const mtsUnsupportedMIMEType = "" // A value in compressionMIMETypeSet that means “recognized but unsupported” + +// compressionVariantMIMEType returns a variant of mimeType for the specified algorithm (which may be nil +// to mean "no compression"), based on variantTable. +// The returned error will be a ManifestLayerCompressionIncompatibilityError if mimeType has variants +// that differ only in what type of compression is applied, but it can't be combined with this +// algorithm to produce an updated MIME type that complies with the standard that defines mimeType. +// If the compression algorithm is unrecognized, or mimeType is not known to have variants that +// differ from it only in what type of compression has been applied, the returned error will not be +// a ManifestLayerCompressionIncompatibilityError. +func compressionVariantMIMEType(variantTable []compressionMIMETypeSet, mimeType string, algorithm *compressiontypes.Algorithm) (string, error) { + if mimeType == mtsUnsupportedMIMEType { // Prevent matching against the {algo:mtsUnsupportedMIMEType} entries + return "", fmt.Errorf("cannot update unknown MIME type") + } + for _, variants := range variantTable { + for _, mt := range variants { + if mt == mimeType { // Found the variant + name := mtsUncompressed + if algorithm != nil { + name = algorithm.InternalUnstableUndocumentedMIMEQuestionMark() + } + if res, ok := variants[name]; ok { + if res != mtsUnsupportedMIMEType { + return res, nil + } + if name != mtsUncompressed { + return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("%s compression is not supported for type %q", name, mt)} + } + return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("uncompressed variant is not supported for type %q", mt)} + } + if name != mtsUncompressed { + return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("unknown compressed with algorithm %s variant for type %s", name, mt)} + } + // We can't very well say “the idea of no compression is unknown” + return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("uncompressed variant is not supported for type %q", mt)} + } + } + } + if algorithm != nil { + return "", fmt.Errorf("unsupported MIME type for compression: %s", mimeType) + } + return "", fmt.Errorf("unsupported MIME type for decompression: %s", mimeType) +} + +// updatedMIMEType returns the result of applying edits in updated (MediaType, CompressionOperation) to +// mimeType, based on variantTable. It may use updated.Digest for error messages. +// The returned error will be a ManifestLayerCompressionIncompatibilityError if mimeType has variants +// that differ only in what type of compression is applied, but applying updated.CompressionOperation +// and updated.CompressionAlgorithm to it won't produce an updated MIME type that complies with the +// standard that defines mimeType. +func updatedMIMEType(variantTable []compressionMIMETypeSet, mimeType string, updated types.BlobInfo) (string, error) { + // Note that manifests in containers-storage might be reporting the + // wrong media type since the original manifests are stored while layers + // are decompressed in storage. Hence, we need to consider the case + // that an already {de}compressed layer should be {de}compressed; + // compressionVariantMIMEType does that by not caring whether the original is + // {de}compressed. + switch updated.CompressionOperation { + case types.PreserveOriginal: + // Force a change to the media type if we're being told to use a particular compressor, + // since it might be different from the one associated with the media type. Otherwise, + // try to keep the original media type. + if updated.CompressionAlgorithm != nil { + return compressionVariantMIMEType(variantTable, mimeType, updated.CompressionAlgorithm) + } + // Keep the original media type. + return mimeType, nil + + case types.Decompress: + return compressionVariantMIMEType(variantTable, mimeType, nil) + + case types.Compress: + if updated.CompressionAlgorithm == nil { + logrus.Debugf("Error preparing updated manifest: blob %q was compressed but does not specify by which algorithm: falling back to use the original blob", updated.Digest) + return mimeType, nil + } + return compressionVariantMIMEType(variantTable, mimeType, updated.CompressionAlgorithm) + + default: + return "", fmt.Errorf("unknown compression operation (%d)", updated.CompressionOperation) + } +} + +// ManifestLayerCompressionIncompatibilityError indicates that a specified compression algorithm +// could not be applied to a layer MIME type. A caller that receives this should either retry +// the call with a different compression algorithm, or attempt to use a different manifest type. +type ManifestLayerCompressionIncompatibilityError struct { + text string +} + +func (m ManifestLayerCompressionIncompatibilityError) Error() string { + return m.text +} diff --git a/vendor/github.com/containers/image/v5/manifest/docker_schema1.go b/vendor/github.com/containers/image/v5/manifest/docker_schema1.go new file mode 100644 index 00000000000..6d12c4cec1f --- /dev/null +++ b/vendor/github.com/containers/image/v5/manifest/docker_schema1.go @@ -0,0 +1,320 @@ +package manifest + +import ( + "encoding/json" + "regexp" + "strings" + "time" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/types" + "github.com/docker/docker/api/types/versions" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +// Schema1FSLayers is an entry of the "fsLayers" array in docker/distribution schema 1. +type Schema1FSLayers struct { + BlobSum digest.Digest `json:"blobSum"` +} + +// Schema1History is an entry of the "history" array in docker/distribution schema 1. +type Schema1History struct { + V1Compatibility string `json:"v1Compatibility"` +} + +// Schema1 is a manifest in docker/distribution schema 1. +type Schema1 struct { + Name string `json:"name"` + Tag string `json:"tag"` + Architecture string `json:"architecture"` + FSLayers []Schema1FSLayers `json:"fsLayers"` + History []Schema1History `json:"history"` // Keep this in sync with ExtractedV1Compatibility! + ExtractedV1Compatibility []Schema1V1Compatibility `json:"-"` // Keep this in sync with History! Does not contain the full config (Schema2V1Image) + SchemaVersion int `json:"schemaVersion"` +} + +type schema1V1CompatibilityContainerConfig struct { + Cmd []string +} + +// Schema1V1Compatibility is a v1Compatibility in docker/distribution schema 1. +type Schema1V1Compatibility struct { + ID string `json:"id"` + Parent string `json:"parent,omitempty"` + Comment string `json:"comment,omitempty"` + Created time.Time `json:"created"` + ContainerConfig schema1V1CompatibilityContainerConfig `json:"container_config,omitempty"` + Author string `json:"author,omitempty"` + ThrowAway bool `json:"throwaway,omitempty"` +} + +// Schema1FromManifest creates a Schema1 manifest instance from a manifest blob. +// (NOTE: The instance is not necessary a literal representation of the original blob, +// layers with duplicate IDs are eliminated.) +func Schema1FromManifest(manifest []byte) (*Schema1, error) { + s1 := Schema1{} + if err := json.Unmarshal(manifest, &s1); err != nil { + return nil, err + } + if s1.SchemaVersion != 1 { + return nil, errors.Errorf("unsupported schema version %d", s1.SchemaVersion) + } + if err := validateUnambiguousManifestFormat(manifest, DockerV2Schema1SignedMediaType, + allowedFieldFSLayers|allowedFieldHistory); err != nil { + return nil, err + } + if err := s1.initialize(); err != nil { + return nil, err + } + if err := s1.fixManifestLayers(); err != nil { + return nil, err + } + return &s1, nil +} + +// Schema1FromComponents creates an Schema1 manifest instance from the supplied data. +func Schema1FromComponents(ref reference.Named, fsLayers []Schema1FSLayers, history []Schema1History, architecture string) (*Schema1, error) { + var name, tag string + if ref != nil { // Well, what to do if it _is_ nil? Most consumers actually don't use these fields nowadays, so we might as well try not supplying them. + name = reference.Path(ref) + if tagged, ok := ref.(reference.NamedTagged); ok { + tag = tagged.Tag() + } + } + s1 := Schema1{ + Name: name, + Tag: tag, + Architecture: architecture, + FSLayers: fsLayers, + History: history, + SchemaVersion: 1, + } + if err := s1.initialize(); err != nil { + return nil, err + } + return &s1, nil +} + +// Schema1Clone creates a copy of the supplied Schema1 manifest. +func Schema1Clone(src *Schema1) *Schema1 { + copy := *src + return © +} + +// initialize initializes ExtractedV1Compatibility and verifies invariants, so that the rest of this code can assume a minimally healthy manifest. +func (m *Schema1) initialize() error { + if len(m.FSLayers) != len(m.History) { + return errors.New("length of history not equal to number of layers") + } + if len(m.FSLayers) == 0 { + return errors.New("no FSLayers in manifest") + } + m.ExtractedV1Compatibility = make([]Schema1V1Compatibility, len(m.History)) + for i, h := range m.History { + if err := json.Unmarshal([]byte(h.V1Compatibility), &m.ExtractedV1Compatibility[i]); err != nil { + return errors.Wrapf(err, "parsing v2s1 history entry %d", i) + } + } + return nil +} + +// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. +func (m *Schema1) ConfigInfo() types.BlobInfo { + return types.BlobInfo{} +} + +// LayerInfos returns a list of LayerInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (m *Schema1) LayerInfos() []LayerInfo { + layers := make([]LayerInfo, len(m.FSLayers)) + for i, layer := range m.FSLayers { // NOTE: This includes empty layers (where m.History.V1Compatibility->ThrowAway) + layers[(len(m.FSLayers)-1)-i] = LayerInfo{ + BlobInfo: types.BlobInfo{Digest: layer.BlobSum, Size: -1}, + EmptyLayer: m.ExtractedV1Compatibility[i].ThrowAway, + } + } + return layers +} + +// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers) +func (m *Schema1) UpdateLayerInfos(layerInfos []types.BlobInfo) error { + // Our LayerInfos includes empty layers (where m.ExtractedV1Compatibility[].ThrowAway), so expect them to be included here as well. + if len(m.FSLayers) != len(layerInfos) { + return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.FSLayers), len(layerInfos)) + } + m.FSLayers = make([]Schema1FSLayers, len(layerInfos)) + for i, info := range layerInfos { + // (docker push) sets up m.ExtractedV1Compatibility[].{Id,Parent} based on values of info.Digest, + // but (docker pull) ignores them in favor of computing DiffIDs from uncompressed data, except verifying the child->parent links and uniqueness. + // So, we don't bother recomputing the IDs in m.History.V1Compatibility. + m.FSLayers[(len(layerInfos)-1)-i].BlobSum = info.Digest + } + return nil +} + +// Serialize returns the manifest in a blob format. +// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! +func (m *Schema1) Serialize() ([]byte, error) { + // docker/distribution requires a signature even if the incoming data uses the nominally unsigned DockerV2Schema1MediaType. + unsigned, err := json.Marshal(*m) + if err != nil { + return nil, err + } + return AddDummyV2S1Signature(unsigned) +} + +// fixManifestLayers, after validating the supplied manifest +// (to use correctly-formatted IDs, and to not have non-consecutive ID collisions in m.History), +// modifies manifest to only have one entry for each layer ID in m.History (deleting the older duplicates, +// both from m.History and m.FSLayers). +// Note that even after this succeeds, m.FSLayers may contain duplicate entries +// (for Dockerfile operations which change the configuration but not the filesystem). +func (m *Schema1) fixManifestLayers() error { + // m.initialize() has verified that len(m.FSLayers) == len(m.History) + for _, compat := range m.ExtractedV1Compatibility { + if err := validateV1ID(compat.ID); err != nil { + return err + } + } + if m.ExtractedV1Compatibility[len(m.ExtractedV1Compatibility)-1].Parent != "" { + return errors.New("Invalid parent ID in the base layer of the image") + } + // check general duplicates to error instead of a deadlock + idmap := make(map[string]struct{}) + var lastID string + for _, img := range m.ExtractedV1Compatibility { + // skip IDs that appear after each other, we handle those later + if _, exists := idmap[img.ID]; img.ID != lastID && exists { + return errors.Errorf("ID %+v appears multiple times in manifest", img.ID) + } + lastID = img.ID + idmap[lastID] = struct{}{} + } + // backwards loop so that we keep the remaining indexes after removing items + for i := len(m.ExtractedV1Compatibility) - 2; i >= 0; i-- { + if m.ExtractedV1Compatibility[i].ID == m.ExtractedV1Compatibility[i+1].ID { // repeated ID. remove and continue + m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...) + m.History = append(m.History[:i], m.History[i+1:]...) + m.ExtractedV1Compatibility = append(m.ExtractedV1Compatibility[:i], m.ExtractedV1Compatibility[i+1:]...) + } else if m.ExtractedV1Compatibility[i].Parent != m.ExtractedV1Compatibility[i+1].ID { + return errors.Errorf("Invalid parent ID. Expected %v, got %v", m.ExtractedV1Compatibility[i+1].ID, m.ExtractedV1Compatibility[i].Parent) + } + } + return nil +} + +var validHex = regexp.MustCompile(`^([a-f0-9]{64})$`) + +func validateV1ID(id string) error { + if ok := validHex.MatchString(id); !ok { + return errors.Errorf("image ID %q is invalid", id) + } + return nil +} + +// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. +func (m *Schema1) Inspect(_ func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) { + s1 := &Schema2V1Image{} + if err := json.Unmarshal([]byte(m.History[0].V1Compatibility), s1); err != nil { + return nil, err + } + i := &types.ImageInspectInfo{ + Tag: m.Tag, + Created: &s1.Created, + DockerVersion: s1.DockerVersion, + Architecture: s1.Architecture, + Os: s1.OS, + Layers: layerInfosToStrings(m.LayerInfos()), + } + if s1.Config != nil { + i.Labels = s1.Config.Labels + i.Env = s1.Config.Env + } + return i, nil +} + +// ToSchema2Config builds a schema2-style configuration blob using the supplied diffIDs. +func (m *Schema1) ToSchema2Config(diffIDs []digest.Digest) ([]byte, error) { + // Convert the schema 1 compat info into a schema 2 config, constructing some of the fields + // that aren't directly comparable using info from the manifest. + if len(m.History) == 0 { + return nil, errors.New("image has no layers") + } + s1 := Schema2V1Image{} + config := []byte(m.History[0].V1Compatibility) + err := json.Unmarshal(config, &s1) + if err != nil { + return nil, errors.Wrapf(err, "decoding configuration") + } + // Images created with versions prior to 1.8.3 require us to re-encode the encoded object, + // adding some fields that aren't "omitempty". + if s1.DockerVersion != "" && versions.LessThan(s1.DockerVersion, "1.8.3") { + config, err = json.Marshal(&s1) + if err != nil { + return nil, errors.Wrapf(err, "re-encoding compat image config %#v", s1) + } + } + // Build the history. + convertedHistory := []Schema2History{} + for _, compat := range m.ExtractedV1Compatibility { + hitem := Schema2History{ + Created: compat.Created, + CreatedBy: strings.Join(compat.ContainerConfig.Cmd, " "), + Author: compat.Author, + Comment: compat.Comment, + EmptyLayer: compat.ThrowAway, + } + convertedHistory = append([]Schema2History{hitem}, convertedHistory...) + } + // Build the rootfs information. We need the decompressed sums that we've been + // calculating to fill in the DiffIDs. It's expected (but not enforced by us) + // that the number of diffIDs corresponds to the number of non-EmptyLayer + // entries in the history. + rootFS := &Schema2RootFS{ + Type: "layers", + DiffIDs: diffIDs, + } + // And now for some raw manipulation. + raw := make(map[string]*json.RawMessage) + err = json.Unmarshal(config, &raw) + if err != nil { + return nil, errors.Wrapf(err, "re-decoding compat image config %#v", s1) + } + // Drop some fields. + delete(raw, "id") + delete(raw, "parent") + delete(raw, "parent_id") + delete(raw, "layer_id") + delete(raw, "throwaway") + delete(raw, "Size") + // Add the history and rootfs information. + rootfs, err := json.Marshal(rootFS) + if err != nil { + return nil, errors.Errorf("error encoding rootfs information %#v: %v", rootFS, err) + } + rawRootfs := json.RawMessage(rootfs) + raw["rootfs"] = &rawRootfs + history, err := json.Marshal(convertedHistory) + if err != nil { + return nil, errors.Errorf("error encoding history information %#v: %v", convertedHistory, err) + } + rawHistory := json.RawMessage(history) + raw["history"] = &rawHistory + // Encode the result. + config, err = json.Marshal(raw) + if err != nil { + return nil, errors.Errorf("error re-encoding compat image config %#v: %v", s1, err) + } + return config, nil +} + +// ImageID computes an ID which can uniquely identify this image by its contents. +func (m *Schema1) ImageID(diffIDs []digest.Digest) (string, error) { + image, err := m.ToSchema2Config(diffIDs) + if err != nil { + return "", err + } + return digest.FromBytes(image).Hex(), nil +} diff --git a/vendor/github.com/containers/image/v5/manifest/docker_schema2.go b/vendor/github.com/containers/image/v5/manifest/docker_schema2.go new file mode 100644 index 00000000000..1f4db54eed0 --- /dev/null +++ b/vendor/github.com/containers/image/v5/manifest/docker_schema2.go @@ -0,0 +1,297 @@ +package manifest + +import ( + "encoding/json" + "fmt" + "time" + + compressiontypes "github.com/containers/image/v5/pkg/compression/types" + "github.com/containers/image/v5/pkg/strslice" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +// Schema2Descriptor is a “descriptor” in docker/distribution schema 2. +type Schema2Descriptor struct { + MediaType string `json:"mediaType"` + Size int64 `json:"size"` + Digest digest.Digest `json:"digest"` + URLs []string `json:"urls,omitempty"` +} + +// BlobInfoFromSchema2Descriptor returns a types.BlobInfo based on the input schema 2 descriptor. +func BlobInfoFromSchema2Descriptor(desc Schema2Descriptor) types.BlobInfo { + return types.BlobInfo{ + Digest: desc.Digest, + Size: desc.Size, + URLs: desc.URLs, + MediaType: desc.MediaType, + } +} + +// Schema2 is a manifest in docker/distribution schema 2. +type Schema2 struct { + SchemaVersion int `json:"schemaVersion"` + MediaType string `json:"mediaType"` + ConfigDescriptor Schema2Descriptor `json:"config"` + LayersDescriptors []Schema2Descriptor `json:"layers"` +} + +// Schema2Port is a Port, a string containing port number and protocol in the +// format "80/tcp", from docker/go-connections/nat. +type Schema2Port string + +// Schema2PortSet is a PortSet, a collection of structs indexed by Port, from +// docker/go-connections/nat. +type Schema2PortSet map[Schema2Port]struct{} + +// Schema2HealthConfig is a HealthConfig, which holds configuration settings +// for the HEALTHCHECK feature, from docker/docker/api/types/container. +type Schema2HealthConfig struct { + // Test is the test to perform to check that the container is healthy. + // An empty slice means to inherit the default. + // The options are: + // {} : inherit healthcheck + // {"NONE"} : disable healthcheck + // {"CMD", args...} : exec arguments directly + // {"CMD-SHELL", command} : run command with system's default shell + Test []string `json:",omitempty"` + + // Zero means to inherit. Durations are expressed as integer nanoseconds. + StartPeriod time.Duration `json:",omitempty"` // StartPeriod is the time to wait after starting before running the first check. + Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. + Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. + + // Retries is the number of consecutive failures needed to consider a container as unhealthy. + // Zero means inherit. + Retries int `json:",omitempty"` +} + +// Schema2Config is a Config in docker/docker/api/types/container. +type Schema2Config struct { + Hostname string // Hostname + Domainname string // Domainname + User string // User that will run the command(s) inside the container, also support user:group + AttachStdin bool // Attach the standard input, makes possible user interaction + AttachStdout bool // Attach the standard output + AttachStderr bool // Attach the standard error + ExposedPorts Schema2PortSet `json:",omitempty"` // List of exposed ports + Tty bool // Attach standard streams to a tty, including stdin if it is not closed. + OpenStdin bool // Open stdin + StdinOnce bool // If true, close stdin after the 1 attached client disconnects. + Env []string // List of environment variable to set in the container + Cmd strslice.StrSlice // Command to run when starting the container + Healthcheck *Schema2HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy + ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific) + Image string // Name of the image as it was passed by the operator (e.g. could be symbolic) + Volumes map[string]struct{} // List of volumes (mounts) used for the container + WorkingDir string // Current directory (PWD) in the command will be launched + Entrypoint strslice.StrSlice // Entrypoint to run when starting the container + NetworkDisabled bool `json:",omitempty"` // Is network disabled + MacAddress string `json:",omitempty"` // Mac Address of the container + OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile + Labels map[string]string // List of labels set to this container + StopSignal string `json:",omitempty"` // Signal to stop a container + StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container + Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT +} + +// Schema2V1Image is a V1Image in docker/docker/image. +type Schema2V1Image struct { + // ID is a unique 64 character identifier of the image + ID string `json:"id,omitempty"` + // Parent is the ID of the parent image + Parent string `json:"parent,omitempty"` + // Comment is the commit message that was set when committing the image + Comment string `json:"comment,omitempty"` + // Created is the timestamp at which the image was created + Created time.Time `json:"created"` + // Container is the id of the container used to commit + Container string `json:"container,omitempty"` + // ContainerConfig is the configuration of the container that is committed into the image + ContainerConfig Schema2Config `json:"container_config,omitempty"` + // DockerVersion specifies the version of Docker that was used to build the image + DockerVersion string `json:"docker_version,omitempty"` + // Author is the name of the author that was specified when committing the image + Author string `json:"author,omitempty"` + // Config is the configuration of the container received from the client + Config *Schema2Config `json:"config,omitempty"` + // Architecture is the hardware that the image is built and runs on + Architecture string `json:"architecture,omitempty"` + // Variant is a variant of the CPU that the image is built and runs on + Variant string `json:"variant,omitempty"` + // OS is the operating system used to built and run the image + OS string `json:"os,omitempty"` + // Size is the total size of the image including all layers it is composed of + Size int64 `json:",omitempty"` +} + +// Schema2RootFS is a description of how to build up an image's root filesystem, from docker/docker/image. +type Schema2RootFS struct { + Type string `json:"type"` + DiffIDs []digest.Digest `json:"diff_ids,omitempty"` +} + +// Schema2History stores build commands that were used to create an image, from docker/docker/image. +type Schema2History struct { + // Created is the timestamp at which the image was created + Created time.Time `json:"created"` + // Author is the name of the author that was specified when committing the image + Author string `json:"author,omitempty"` + // CreatedBy keeps the Dockerfile command used while building the image + CreatedBy string `json:"created_by,omitempty"` + // Comment is the commit message that was set when committing the image + Comment string `json:"comment,omitempty"` + // EmptyLayer is set to true if this history item did not generate a + // layer. Otherwise, the history item is associated with the next + // layer in the RootFS section. + EmptyLayer bool `json:"empty_layer,omitempty"` +} + +// Schema2Image is an Image in docker/docker/image. +type Schema2Image struct { + Schema2V1Image + Parent digest.Digest `json:"parent,omitempty"` + RootFS *Schema2RootFS `json:"rootfs,omitempty"` + History []Schema2History `json:"history,omitempty"` + OSVersion string `json:"os.version,omitempty"` + OSFeatures []string `json:"os.features,omitempty"` +} + +// Schema2FromManifest creates a Schema2 manifest instance from a manifest blob. +func Schema2FromManifest(manifest []byte) (*Schema2, error) { + s2 := Schema2{} + if err := json.Unmarshal(manifest, &s2); err != nil { + return nil, err + } + if err := validateUnambiguousManifestFormat(manifest, DockerV2Schema2MediaType, + allowedFieldConfig|allowedFieldLayers); err != nil { + return nil, err + } + // Check manifest's and layers' media types. + if err := SupportedSchema2MediaType(s2.MediaType); err != nil { + return nil, err + } + for _, layer := range s2.LayersDescriptors { + if err := SupportedSchema2MediaType(layer.MediaType); err != nil { + return nil, err + } + } + return &s2, nil +} + +// Schema2FromComponents creates an Schema2 manifest instance from the supplied data. +func Schema2FromComponents(config Schema2Descriptor, layers []Schema2Descriptor) *Schema2 { + return &Schema2{ + SchemaVersion: 2, + MediaType: DockerV2Schema2MediaType, + ConfigDescriptor: config, + LayersDescriptors: layers, + } +} + +// Schema2Clone creates a copy of the supplied Schema2 manifest. +func Schema2Clone(src *Schema2) *Schema2 { + copy := *src + return © +} + +// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. +func (m *Schema2) ConfigInfo() types.BlobInfo { + return BlobInfoFromSchema2Descriptor(m.ConfigDescriptor) +} + +// LayerInfos returns a list of LayerInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (m *Schema2) LayerInfos() []LayerInfo { + blobs := []LayerInfo{} + for _, layer := range m.LayersDescriptors { + blobs = append(blobs, LayerInfo{ + BlobInfo: BlobInfoFromSchema2Descriptor(layer), + EmptyLayer: false, + }) + } + return blobs +} + +var schema2CompressionMIMETypeSets = []compressionMIMETypeSet{ + { + mtsUncompressed: DockerV2Schema2ForeignLayerMediaType, + compressiontypes.GzipAlgorithmName: DockerV2Schema2ForeignLayerMediaTypeGzip, + compressiontypes.ZstdAlgorithmName: mtsUnsupportedMIMEType, + }, + { + mtsUncompressed: DockerV2SchemaLayerMediaTypeUncompressed, + compressiontypes.GzipAlgorithmName: DockerV2Schema2LayerMediaType, + compressiontypes.ZstdAlgorithmName: mtsUnsupportedMIMEType, + }, +} + +// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers) +// The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError if any of the layerInfos includes a combination of CompressionOperation and +// CompressionAlgorithm that would result in anything other than gzip compression. +func (m *Schema2) UpdateLayerInfos(layerInfos []types.BlobInfo) error { + if len(m.LayersDescriptors) != len(layerInfos) { + return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.LayersDescriptors), len(layerInfos)) + } + original := m.LayersDescriptors + m.LayersDescriptors = make([]Schema2Descriptor, len(layerInfos)) + for i, info := range layerInfos { + mimeType := original[i].MediaType + // First make sure we support the media type of the original layer. + if err := SupportedSchema2MediaType(mimeType); err != nil { + return fmt.Errorf("Error preparing updated manifest: unknown media type of original layer %q: %q", info.Digest, mimeType) + } + mimeType, err := updatedMIMEType(schema2CompressionMIMETypeSets, mimeType, info) + if err != nil { + return errors.Wrapf(err, "preparing updated manifest, layer %q", info.Digest) + } + m.LayersDescriptors[i].MediaType = mimeType + m.LayersDescriptors[i].Digest = info.Digest + m.LayersDescriptors[i].Size = info.Size + m.LayersDescriptors[i].URLs = info.URLs + } + return nil +} + +// Serialize returns the manifest in a blob format. +// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! +func (m *Schema2) Serialize() ([]byte, error) { + return json.Marshal(*m) +} + +// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. +func (m *Schema2) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) { + config, err := configGetter(m.ConfigInfo()) + if err != nil { + return nil, err + } + s2 := &Schema2Image{} + if err := json.Unmarshal(config, s2); err != nil { + return nil, err + } + i := &types.ImageInspectInfo{ + Tag: "", + Created: &s2.Created, + DockerVersion: s2.DockerVersion, + Architecture: s2.Architecture, + Variant: s2.Variant, + Os: s2.OS, + Layers: layerInfosToStrings(m.LayerInfos()), + } + if s2.Config != nil { + i.Labels = s2.Config.Labels + i.Env = s2.Config.Env + } + return i, nil +} + +// ImageID computes an ID which can uniquely identify this image by its contents. +func (m *Schema2) ImageID([]digest.Digest) (string, error) { + if err := m.ConfigDescriptor.Digest.Validate(); err != nil { + return "", err + } + return m.ConfigDescriptor.Digest.Hex(), nil +} diff --git a/vendor/github.com/containers/image/v5/manifest/docker_schema2_list.go b/vendor/github.com/containers/image/v5/manifest/docker_schema2_list.go new file mode 100644 index 00000000000..e97dfbd887c --- /dev/null +++ b/vendor/github.com/containers/image/v5/manifest/docker_schema2_list.go @@ -0,0 +1,221 @@ +package manifest + +import ( + "encoding/json" + "fmt" + + platform "github.com/containers/image/v5/internal/pkg/platform" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +// Schema2PlatformSpec describes the platform which a particular manifest is +// specialized for. +type Schema2PlatformSpec struct { + Architecture string `json:"architecture"` + OS string `json:"os"` + OSVersion string `json:"os.version,omitempty"` + OSFeatures []string `json:"os.features,omitempty"` + Variant string `json:"variant,omitempty"` + Features []string `json:"features,omitempty"` // removed in OCI +} + +// Schema2ManifestDescriptor references a platform-specific manifest. +type Schema2ManifestDescriptor struct { + Schema2Descriptor + Platform Schema2PlatformSpec `json:"platform"` +} + +// Schema2List is a list of platform-specific manifests. +type Schema2List struct { + SchemaVersion int `json:"schemaVersion"` + MediaType string `json:"mediaType"` + Manifests []Schema2ManifestDescriptor `json:"manifests"` +} + +// MIMEType returns the MIME type of this particular manifest list. +func (list *Schema2List) MIMEType() string { + return list.MediaType +} + +// Instances returns a slice of digests of the manifests that this list knows of. +func (list *Schema2List) Instances() []digest.Digest { + results := make([]digest.Digest, len(list.Manifests)) + for i, m := range list.Manifests { + results[i] = m.Digest + } + return results +} + +// Instance returns the ListUpdate of a particular instance in the list. +func (list *Schema2List) Instance(instanceDigest digest.Digest) (ListUpdate, error) { + for _, manifest := range list.Manifests { + if manifest.Digest == instanceDigest { + return ListUpdate{ + Digest: manifest.Digest, + Size: manifest.Size, + MediaType: manifest.MediaType, + }, nil + } + } + return ListUpdate{}, errors.Errorf("unable to find instance %s passed to Schema2List.Instances", instanceDigest) +} + +// UpdateInstances updates the sizes, digests, and media types of the manifests +// which the list catalogs. +func (list *Schema2List) UpdateInstances(updates []ListUpdate) error { + if len(updates) != len(list.Manifests) { + return errors.Errorf("incorrect number of update entries passed to Schema2List.UpdateInstances: expected %d, got %d", len(list.Manifests), len(updates)) + } + for i := range updates { + if err := updates[i].Digest.Validate(); err != nil { + return errors.Wrapf(err, "update %d of %d passed to Schema2List.UpdateInstances contained an invalid digest", i+1, len(updates)) + } + list.Manifests[i].Digest = updates[i].Digest + if updates[i].Size < 0 { + return errors.Errorf("update %d of %d passed to Schema2List.UpdateInstances had an invalid size (%d)", i+1, len(updates), updates[i].Size) + } + list.Manifests[i].Size = updates[i].Size + if updates[i].MediaType == "" { + return errors.Errorf("update %d of %d passed to Schema2List.UpdateInstances had no media type (was %q)", i+1, len(updates), list.Manifests[i].MediaType) + } + list.Manifests[i].MediaType = updates[i].MediaType + } + return nil +} + +// ChooseInstance parses blob as a schema2 manifest list, and returns the digest +// of the image which is appropriate for the current environment. +func (list *Schema2List) ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) { + wantedPlatforms, err := platform.WantedPlatforms(ctx) + if err != nil { + return "", errors.Wrapf(err, "getting platform information %#v", ctx) + } + for _, wantedPlatform := range wantedPlatforms { + for _, d := range list.Manifests { + imagePlatform := imgspecv1.Platform{ + Architecture: d.Platform.Architecture, + OS: d.Platform.OS, + OSVersion: d.Platform.OSVersion, + OSFeatures: dupStringSlice(d.Platform.OSFeatures), + Variant: d.Platform.Variant, + } + if platform.MatchesPlatform(imagePlatform, wantedPlatform) { + return d.Digest, nil + } + } + } + return "", fmt.Errorf("no image found in manifest list for architecture %s, variant %q, OS %s", wantedPlatforms[0].Architecture, wantedPlatforms[0].Variant, wantedPlatforms[0].OS) +} + +// Serialize returns the list in a blob format. +// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! +func (list *Schema2List) Serialize() ([]byte, error) { + buf, err := json.Marshal(list) + if err != nil { + return nil, errors.Wrapf(err, "marshaling Schema2List %#v", list) + } + return buf, nil +} + +// Schema2ListFromComponents creates a Schema2 manifest list instance from the +// supplied data. +func Schema2ListFromComponents(components []Schema2ManifestDescriptor) *Schema2List { + list := Schema2List{ + SchemaVersion: 2, + MediaType: DockerV2ListMediaType, + Manifests: make([]Schema2ManifestDescriptor, len(components)), + } + for i, component := range components { + m := Schema2ManifestDescriptor{ + Schema2Descriptor{ + MediaType: component.MediaType, + Size: component.Size, + Digest: component.Digest, + URLs: dupStringSlice(component.URLs), + }, + Schema2PlatformSpec{ + Architecture: component.Platform.Architecture, + OS: component.Platform.OS, + OSVersion: component.Platform.OSVersion, + OSFeatures: dupStringSlice(component.Platform.OSFeatures), + Variant: component.Platform.Variant, + Features: dupStringSlice(component.Platform.Features), + }, + } + list.Manifests[i] = m + } + return &list +} + +// Schema2ListClone creates a deep copy of the passed-in list. +func Schema2ListClone(list *Schema2List) *Schema2List { + return Schema2ListFromComponents(list.Manifests) +} + +// ToOCI1Index returns the list encoded as an OCI1 index. +func (list *Schema2List) ToOCI1Index() (*OCI1Index, error) { + components := make([]imgspecv1.Descriptor, 0, len(list.Manifests)) + for _, manifest := range list.Manifests { + converted := imgspecv1.Descriptor{ + MediaType: manifest.MediaType, + Size: manifest.Size, + Digest: manifest.Digest, + URLs: dupStringSlice(manifest.URLs), + Platform: &imgspecv1.Platform{ + OS: manifest.Platform.OS, + Architecture: manifest.Platform.Architecture, + OSFeatures: dupStringSlice(manifest.Platform.OSFeatures), + OSVersion: manifest.Platform.OSVersion, + Variant: manifest.Platform.Variant, + }, + } + components = append(components, converted) + } + oci := OCI1IndexFromComponents(components, nil) + return oci, nil +} + +// ToSchema2List returns the list encoded as a Schema2 list. +func (list *Schema2List) ToSchema2List() (*Schema2List, error) { + return Schema2ListClone(list), nil +} + +// Schema2ListFromManifest creates a Schema2 manifest list instance from marshalled +// JSON, presumably generated by encoding a Schema2 manifest list. +func Schema2ListFromManifest(manifest []byte) (*Schema2List, error) { + list := Schema2List{ + Manifests: []Schema2ManifestDescriptor{}, + } + if err := json.Unmarshal(manifest, &list); err != nil { + return nil, errors.Wrapf(err, "unmarshaling Schema2List %q", string(manifest)) + } + if err := validateUnambiguousManifestFormat(manifest, DockerV2ListMediaType, + allowedFieldManifests); err != nil { + return nil, err + } + return &list, nil +} + +// Clone returns a deep copy of this list and its contents. +func (list *Schema2List) Clone() List { + return Schema2ListClone(list) +} + +// ConvertToMIMEType converts the passed-in manifest list to a manifest +// list of the specified type. +func (list *Schema2List) ConvertToMIMEType(manifestMIMEType string) (List, error) { + switch normalized := NormalizedMIMEType(manifestMIMEType); normalized { + case DockerV2ListMediaType: + return list.Clone(), nil + case imgspecv1.MediaTypeImageIndex: + return list.ToOCI1Index() + case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest, DockerV2Schema2MediaType: + return nil, fmt.Errorf("Can not convert manifest list to MIME type %q, which is not a list type", manifestMIMEType) + default: + // Note that this may not be reachable, NormalizedMIMEType has a default for unknown values. + return nil, fmt.Errorf("Unimplemented manifest list MIME type %s", manifestMIMEType) + } +} diff --git a/vendor/github.com/containers/image/v5/manifest/list.go b/vendor/github.com/containers/image/v5/manifest/list.go new file mode 100644 index 00000000000..58982597e6e --- /dev/null +++ b/vendor/github.com/containers/image/v5/manifest/list.go @@ -0,0 +1,80 @@ +package manifest + +import ( + "fmt" + + "github.com/containers/image/v5/types" + digest "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +var ( + // SupportedListMIMETypes is a list of the manifest list types that we know how to + // read/manipulate/write. + SupportedListMIMETypes = []string{ + DockerV2ListMediaType, + imgspecv1.MediaTypeImageIndex, + } +) + +// List is an interface for parsing, modifying lists of image manifests. +// Callers can either use this abstract interface without understanding the details of the formats, +// or instantiate a specific implementation (e.g. manifest.OCI1Index) and access the public members +// directly. +type List interface { + // MIMEType returns the MIME type of this particular manifest list. + MIMEType() string + + // Instances returns a list of the manifests that this list knows of, other than its own. + Instances() []digest.Digest + + // Update information about the list's instances. The length of the passed-in slice must + // match the length of the list of instances which the list already contains, and every field + // must be specified. + UpdateInstances([]ListUpdate) error + + // Instance returns the size and MIME type of a particular instance in the list. + Instance(digest.Digest) (ListUpdate, error) + + // ChooseInstance selects which manifest is most appropriate for the platform described by the + // SystemContext, or for the current platform if the SystemContext doesn't specify any details. + ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) + + // Serialize returns the list in a blob format. + // NOTE: Serialize() does not in general reproduce the original blob if this object was loaded + // from, even if no modifications were made! + Serialize() ([]byte, error) + + // ConvertToMIMEType returns the list rebuilt to the specified MIME type, or an error. + ConvertToMIMEType(mimeType string) (List, error) + + // Clone returns a deep copy of this list and its contents. + Clone() List +} + +// ListUpdate includes the fields which a List's UpdateInstances() method will modify. +type ListUpdate struct { + Digest digest.Digest + Size int64 + MediaType string +} + +// ListFromBlob parses a list of manifests. +func ListFromBlob(manifest []byte, manifestMIMEType string) (List, error) { + normalized := NormalizedMIMEType(manifestMIMEType) + switch normalized { + case DockerV2ListMediaType: + return Schema2ListFromManifest(manifest) + case imgspecv1.MediaTypeImageIndex: + return OCI1IndexFromManifest(manifest) + case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest, DockerV2Schema2MediaType: + return nil, fmt.Errorf("Treating single images as manifest lists is not implemented") + } + return nil, fmt.Errorf("Unimplemented manifest list MIME type %s (normalized as %s)", manifestMIMEType, normalized) +} + +// ConvertListToMIMEType converts the passed-in manifest list to a manifest +// list of the specified type. +func ConvertListToMIMEType(list List, manifestMIMEType string) (List, error) { + return list.ConvertToMIMEType(manifestMIMEType) +} diff --git a/vendor/github.com/containers/image/v5/manifest/manifest.go b/vendor/github.com/containers/image/v5/manifest/manifest.go new file mode 100644 index 00000000000..2e3e5da15ea --- /dev/null +++ b/vendor/github.com/containers/image/v5/manifest/manifest.go @@ -0,0 +1,270 @@ +package manifest + +import ( + "encoding/json" + "fmt" + + "github.com/containers/image/v5/types" + "github.com/containers/libtrust" + digest "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +// FIXME: Should we just use docker/distribution and docker/docker implementations directly? + +// FIXME(runcom, mitr): should we have a mediatype pkg?? +const ( + // DockerV2Schema1MediaType MIME type represents Docker manifest schema 1 + DockerV2Schema1MediaType = "application/vnd.docker.distribution.manifest.v1+json" + // DockerV2Schema1MediaType MIME type represents Docker manifest schema 1 with a JWS signature + DockerV2Schema1SignedMediaType = "application/vnd.docker.distribution.manifest.v1+prettyjws" + // DockerV2Schema2MediaType MIME type represents Docker manifest schema 2 + DockerV2Schema2MediaType = "application/vnd.docker.distribution.manifest.v2+json" + // DockerV2Schema2ConfigMediaType is the MIME type used for schema 2 config blobs. + DockerV2Schema2ConfigMediaType = "application/vnd.docker.container.image.v1+json" + // DockerV2Schema2LayerMediaType is the MIME type used for schema 2 layers. + DockerV2Schema2LayerMediaType = "application/vnd.docker.image.rootfs.diff.tar.gzip" + // DockerV2SchemaLayerMediaTypeUncompressed is the mediaType used for uncompressed layers. + DockerV2SchemaLayerMediaTypeUncompressed = "application/vnd.docker.image.rootfs.diff.tar" + // DockerV2ListMediaType MIME type represents Docker manifest schema 2 list + DockerV2ListMediaType = "application/vnd.docker.distribution.manifest.list.v2+json" + // DockerV2Schema2ForeignLayerMediaType is the MIME type used for schema 2 foreign layers. + DockerV2Schema2ForeignLayerMediaType = "application/vnd.docker.image.rootfs.foreign.diff.tar" + // DockerV2Schema2ForeignLayerMediaType is the MIME type used for gzipped schema 2 foreign layers. + DockerV2Schema2ForeignLayerMediaTypeGzip = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip" +) + +// SupportedSchema2MediaType checks if the specified string is a supported Docker v2s2 media type. +func SupportedSchema2MediaType(m string) error { + switch m { + case DockerV2ListMediaType, DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, DockerV2Schema2ConfigMediaType, DockerV2Schema2ForeignLayerMediaType, DockerV2Schema2ForeignLayerMediaTypeGzip, DockerV2Schema2LayerMediaType, DockerV2Schema2MediaType, DockerV2SchemaLayerMediaTypeUncompressed: + return nil + default: + return fmt.Errorf("unsupported docker v2s2 media type: %q", m) + } +} + +// DefaultRequestedManifestMIMETypes is a list of MIME types a types.ImageSource +// should request from the backend unless directed otherwise. +var DefaultRequestedManifestMIMETypes = []string{ + imgspecv1.MediaTypeImageManifest, + DockerV2Schema2MediaType, + DockerV2Schema1SignedMediaType, + DockerV2Schema1MediaType, + DockerV2ListMediaType, + imgspecv1.MediaTypeImageIndex, +} + +// Manifest is an interface for parsing, modifying image manifests in isolation. +// Callers can either use this abstract interface without understanding the details of the formats, +// or instantiate a specific implementation (e.g. manifest.OCI1) and access the public members +// directly. +// +// See types.Image for functionality not limited to manifests, including format conversions and config parsing. +// This interface is similar to, but not strictly equivalent to, the equivalent methods in types.Image. +type Manifest interface { + // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. + ConfigInfo() types.BlobInfo + // LayerInfos returns a list of LayerInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). + // The Digest field is guaranteed to be provided; Size may be -1. + // WARNING: The list may contain duplicates, and they are semantically relevant. + LayerInfos() []LayerInfo + // UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers) + UpdateLayerInfos(layerInfos []types.BlobInfo) error + + // ImageID computes an ID which can uniquely identify this image by its contents, irrespective + // of which (of possibly more than one simultaneously valid) reference was used to locate the + // image, and unchanged by whether or how the layers are compressed. The result takes the form + // of the hexadecimal portion of a digest.Digest. + ImageID(diffIDs []digest.Digest) (string, error) + + // Inspect returns various information for (skopeo inspect) parsed from the manifest, + // incorporating information from a configuration blob returned by configGetter, if + // the underlying image format is expected to include a configuration blob. + Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) + + // Serialize returns the manifest in a blob format. + // NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! + Serialize() ([]byte, error) +} + +// LayerInfo is an extended version of types.BlobInfo for low-level users of Manifest.LayerInfos. +type LayerInfo struct { + types.BlobInfo + EmptyLayer bool // The layer is an “empty”/“throwaway” one, and may or may not be physically represented in various transport / storage systems. false if the manifest type does not have the concept. +} + +// GuessMIMEType guesses MIME type of a manifest and returns it _if it is recognized_, or "" if unknown or unrecognized. +// FIXME? We should, in general, prefer out-of-band MIME type instead of blindly parsing the manifest, +// but we may not have such metadata available (e.g. when the manifest is a local file). +func GuessMIMEType(manifest []byte) string { + // A subset of manifest fields; the rest is silently ignored by json.Unmarshal. + // Also docker/distribution/manifest.Versioned. + meta := struct { + MediaType string `json:"mediaType"` + SchemaVersion int `json:"schemaVersion"` + Signatures interface{} `json:"signatures"` + }{} + if err := json.Unmarshal(manifest, &meta); err != nil { + return "" + } + + switch meta.MediaType { + case DockerV2Schema2MediaType, DockerV2ListMediaType, + imgspecv1.MediaTypeImageManifest, imgspecv1.MediaTypeImageIndex: // A recognized type. + return meta.MediaType + } + // this is the only way the function can return DockerV2Schema1MediaType, and recognizing that is essential for stripping the JWS signatures = computing the correct manifest digest. + switch meta.SchemaVersion { + case 1: + if meta.Signatures != nil { + return DockerV2Schema1SignedMediaType + } + return DockerV2Schema1MediaType + case 2: + // Best effort to understand if this is an OCI image since mediaType + // wasn't in the manifest for OCI image-spec < 1.0.2. + // For docker v2s2 meta.MediaType should have been set. But given the data, this is our best guess. + ociMan := struct { + Config struct { + MediaType string `json:"mediaType"` + } `json:"config"` + }{} + if err := json.Unmarshal(manifest, &ociMan); err != nil { + return "" + } + switch ociMan.Config.MediaType { + case imgspecv1.MediaTypeImageConfig: + return imgspecv1.MediaTypeImageManifest + case DockerV2Schema2ConfigMediaType: + // This case should not happen since a Docker image + // must declare a top-level media type and + // `meta.MediaType` has already been checked. + return DockerV2Schema2MediaType + } + // Maybe an image index or an OCI artifact. + ociIndex := struct { + Manifests []imgspecv1.Descriptor `json:"manifests"` + }{} + if err := json.Unmarshal(manifest, &ociIndex); err != nil { + return "" + } + if len(ociIndex.Manifests) != 0 { + if ociMan.Config.MediaType == "" { + return imgspecv1.MediaTypeImageIndex + } + // FIXME: this is mixing media types of manifests and configs. + return ociMan.Config.MediaType + } + // It's most likely an OCI artifact with a custom config media + // type which is not (and cannot) be covered by the media-type + // checks cabove. + return imgspecv1.MediaTypeImageManifest + } + return "" +} + +// Digest returns the a digest of a docker manifest, with any necessary implied transformations like stripping v1s1 signatures. +func Digest(manifest []byte) (digest.Digest, error) { + if GuessMIMEType(manifest) == DockerV2Schema1SignedMediaType { + sig, err := libtrust.ParsePrettySignature(manifest, "signatures") + if err != nil { + return "", err + } + manifest, err = sig.Payload() + if err != nil { + // Coverage: This should never happen, libtrust's Payload() can fail only if joseBase64UrlDecode() fails, on a string + // that libtrust itself has josebase64UrlEncode()d + return "", err + } + } + + return digest.FromBytes(manifest), nil +} + +// MatchesDigest returns true iff the manifest matches expectedDigest. +// Error may be set if this returns false. +// Note that this is not doing ConstantTimeCompare; by the time we get here, the cryptographic signature must already have been verified, +// or we are not using a cryptographic channel and the attacker can modify the digest along with the manifest blob. +func MatchesDigest(manifest []byte, expectedDigest digest.Digest) (bool, error) { + // This should eventually support various digest types. + actualDigest, err := Digest(manifest) + if err != nil { + return false, err + } + return expectedDigest == actualDigest, nil +} + +// AddDummyV2S1Signature adds an JWS signature with a temporary key (i.e. useless) to a v2s1 manifest. +// This is useful to make the manifest acceptable to a docker/distribution registry (even though nothing needs or wants the JWS signature). +func AddDummyV2S1Signature(manifest []byte) ([]byte, error) { + key, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + return nil, err // Coverage: This can fail only if rand.Reader fails. + } + + js, err := libtrust.NewJSONSignature(manifest) + if err != nil { + return nil, err + } + if err := js.Sign(key); err != nil { // Coverage: This can fail basically only if rand.Reader fails. + return nil, err + } + return js.PrettySignature("signatures") +} + +// MIMETypeIsMultiImage returns true if mimeType is a list of images +func MIMETypeIsMultiImage(mimeType string) bool { + return mimeType == DockerV2ListMediaType || mimeType == imgspecv1.MediaTypeImageIndex +} + +// MIMETypeSupportsEncryption returns true if the mimeType supports encryption +func MIMETypeSupportsEncryption(mimeType string) bool { + return mimeType == imgspecv1.MediaTypeImageManifest +} + +// NormalizedMIMEType returns the effective MIME type of a manifest MIME type returned by a server, +// centralizing various workarounds. +func NormalizedMIMEType(input string) string { + switch input { + // "application/json" is a valid v2s1 value per https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-1.md . + // This works for now, when nothing else seems to return "application/json"; if that were not true, the mapping/detection might + // need to happen within the ImageSource. + case "application/json": + return DockerV2Schema1SignedMediaType + case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, + imgspecv1.MediaTypeImageManifest, + imgspecv1.MediaTypeImageIndex, + DockerV2Schema2MediaType, + DockerV2ListMediaType: + return input + default: + // If it's not a recognized manifest media type, or we have failed determining the type, we'll try one last time + // to deserialize using v2s1 as per https://github.com/docker/distribution/blob/master/manifests.go#L108 + // and https://github.com/docker/distribution/blob/master/manifest/schema1/manifest.go#L50 + // + // Crane registries can also return "text/plain", or pretty much anything else depending on a file extension “recognized” in the tag. + // This makes no real sense, but it happens + // because requests for manifests are + // redirected to a content distribution + // network which is configured that way. See https://bugzilla.redhat.com/show_bug.cgi?id=1389442 + return DockerV2Schema1SignedMediaType + } +} + +// FromBlob returns a Manifest instance for the specified manifest blob and the corresponding MIME type +func FromBlob(manblob []byte, mt string) (Manifest, error) { + nmt := NormalizedMIMEType(mt) + switch nmt { + case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType: + return Schema1FromManifest(manblob) + case imgspecv1.MediaTypeImageManifest: + return OCI1FromManifest(manblob) + case DockerV2Schema2MediaType: + return Schema2FromManifest(manblob) + case DockerV2ListMediaType, imgspecv1.MediaTypeImageIndex: + return nil, fmt.Errorf("Treating manifest lists as individual manifests is not implemented") + } + // Note that this may not be reachable, NormalizedMIMEType has a default for unknown values. + return nil, fmt.Errorf("Unimplemented manifest MIME type %s (normalized as %s)", mt, nmt) +} diff --git a/vendor/github.com/containers/image/v5/manifest/oci.go b/vendor/github.com/containers/image/v5/manifest/oci.go new file mode 100644 index 00000000000..5892184df19 --- /dev/null +++ b/vendor/github.com/containers/image/v5/manifest/oci.go @@ -0,0 +1,220 @@ +package manifest + +import ( + "encoding/json" + "fmt" + "strings" + + compressiontypes "github.com/containers/image/v5/pkg/compression/types" + "github.com/containers/image/v5/types" + ociencspec "github.com/containers/ocicrypt/spec" + "github.com/opencontainers/go-digest" + "github.com/opencontainers/image-spec/specs-go" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +// BlobInfoFromOCI1Descriptor returns a types.BlobInfo based on the input OCI1 descriptor. +func BlobInfoFromOCI1Descriptor(desc imgspecv1.Descriptor) types.BlobInfo { + return types.BlobInfo{ + Digest: desc.Digest, + Size: desc.Size, + URLs: desc.URLs, + Annotations: desc.Annotations, + MediaType: desc.MediaType, + } +} + +// OCI1 is a manifest.Manifest implementation for OCI images. +// The underlying data from imgspecv1.Manifest is also available. +type OCI1 struct { + imgspecv1.Manifest +} + +// SupportedOCI1MediaType checks if the specified string is a supported OCI1 +// media type. +// +// Deprecated: blindly rejecting unknown MIME types when the consumer does not +// need to process the input just reduces interoperability (and violates the +// standard) with no benefit, and that this function does not check that the +// media type is appropriate for any specific purpose, so it’s not all that +// useful for validation anyway. +func SupportedOCI1MediaType(m string) error { + switch m { + case imgspecv1.MediaTypeDescriptor, imgspecv1.MediaTypeImageConfig, imgspecv1.MediaTypeImageLayer, imgspecv1.MediaTypeImageLayerGzip, imgspecv1.MediaTypeImageLayerNonDistributable, imgspecv1.MediaTypeImageLayerNonDistributableGzip, imgspecv1.MediaTypeImageLayerNonDistributableZstd, imgspecv1.MediaTypeImageLayerZstd, imgspecv1.MediaTypeImageManifest, imgspecv1.MediaTypeLayoutHeader, ociencspec.MediaTypeLayerEnc, ociencspec.MediaTypeLayerGzipEnc: + return nil + default: + return fmt.Errorf("unsupported OCIv1 media type: %q", m) + } +} + +// OCI1FromManifest creates an OCI1 manifest instance from a manifest blob. +func OCI1FromManifest(manifest []byte) (*OCI1, error) { + oci1 := OCI1{} + if err := json.Unmarshal(manifest, &oci1); err != nil { + return nil, err + } + if err := validateUnambiguousManifestFormat(manifest, imgspecv1.MediaTypeImageIndex, + allowedFieldConfig|allowedFieldLayers); err != nil { + return nil, err + } + return &oci1, nil +} + +// OCI1FromComponents creates an OCI1 manifest instance from the supplied data. +func OCI1FromComponents(config imgspecv1.Descriptor, layers []imgspecv1.Descriptor) *OCI1 { + return &OCI1{ + imgspecv1.Manifest{ + Versioned: specs.Versioned{SchemaVersion: 2}, + MediaType: imgspecv1.MediaTypeImageManifest, + Config: config, + Layers: layers, + }, + } +} + +// OCI1Clone creates a copy of the supplied OCI1 manifest. +func OCI1Clone(src *OCI1) *OCI1 { + return &OCI1{ + Manifest: src.Manifest, + } +} + +// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. +func (m *OCI1) ConfigInfo() types.BlobInfo { + return BlobInfoFromOCI1Descriptor(m.Config) +} + +// LayerInfos returns a list of LayerInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (m *OCI1) LayerInfos() []LayerInfo { + blobs := []LayerInfo{} + for _, layer := range m.Layers { + blobs = append(blobs, LayerInfo{ + BlobInfo: BlobInfoFromOCI1Descriptor(layer), + EmptyLayer: false, + }) + } + return blobs +} + +var oci1CompressionMIMETypeSets = []compressionMIMETypeSet{ + { + mtsUncompressed: imgspecv1.MediaTypeImageLayerNonDistributable, + compressiontypes.GzipAlgorithmName: imgspecv1.MediaTypeImageLayerNonDistributableGzip, + compressiontypes.ZstdAlgorithmName: imgspecv1.MediaTypeImageLayerNonDistributableZstd, + }, + { + mtsUncompressed: imgspecv1.MediaTypeImageLayer, + compressiontypes.GzipAlgorithmName: imgspecv1.MediaTypeImageLayerGzip, + compressiontypes.ZstdAlgorithmName: imgspecv1.MediaTypeImageLayerZstd, + }, +} + +// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls+mediatype), in order (the root layer first, and then successive layered layers) +// The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError if any of the layerInfos includes a combination of CompressionOperation and +// CompressionAlgorithm that isn't supported by OCI. +func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error { + if len(m.Layers) != len(layerInfos) { + return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.Layers), len(layerInfos)) + } + original := m.Layers + m.Layers = make([]imgspecv1.Descriptor, len(layerInfos)) + for i, info := range layerInfos { + mimeType := original[i].MediaType + if info.CryptoOperation == types.Decrypt { + decMimeType, err := getDecryptedMediaType(mimeType) + if err != nil { + return fmt.Errorf("error preparing updated manifest: decryption specified but original mediatype is not encrypted: %q", mimeType) + } + mimeType = decMimeType + } + mimeType, err := updatedMIMEType(oci1CompressionMIMETypeSets, mimeType, info) + if err != nil { + return errors.Wrapf(err, "preparing updated manifest, layer %q", info.Digest) + } + if info.CryptoOperation == types.Encrypt { + encMediaType, err := getEncryptedMediaType(mimeType) + if err != nil { + return fmt.Errorf("error preparing updated manifest: encryption specified but no counterpart for mediatype: %q", mimeType) + } + mimeType = encMediaType + } + + m.Layers[i].MediaType = mimeType + m.Layers[i].Digest = info.Digest + m.Layers[i].Size = info.Size + m.Layers[i].Annotations = info.Annotations + m.Layers[i].URLs = info.URLs + } + return nil +} + +// Serialize returns the manifest in a blob format. +// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! +func (m *OCI1) Serialize() ([]byte, error) { + return json.Marshal(*m) +} + +// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. +func (m *OCI1) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) { + config, err := configGetter(m.ConfigInfo()) + if err != nil { + return nil, err + } + v1 := &imgspecv1.Image{} + if err := json.Unmarshal(config, v1); err != nil { + return nil, err + } + d1 := &Schema2V1Image{} + if err := json.Unmarshal(config, d1); err != nil { + return nil, err + } + i := &types.ImageInspectInfo{ + Tag: "", + Created: v1.Created, + DockerVersion: d1.DockerVersion, + Labels: v1.Config.Labels, + Architecture: v1.Architecture, + Os: v1.OS, + Layers: layerInfosToStrings(m.LayerInfos()), + Env: v1.Config.Env, + } + return i, nil +} + +// ImageID computes an ID which can uniquely identify this image by its contents. +func (m *OCI1) ImageID([]digest.Digest) (string, error) { + if err := m.Config.Digest.Validate(); err != nil { + return "", err + } + return m.Config.Digest.Hex(), nil +} + +// getEncryptedMediaType will return the mediatype to its encrypted counterpart and return +// an error if the mediatype does not support encryption +func getEncryptedMediaType(mediatype string) (string, error) { + for _, s := range strings.Split(mediatype, "+")[1:] { + if s == "encrypted" { + return "", errors.Errorf("unsupportedmediatype: %v already encrypted", mediatype) + } + } + unsuffixedMediatype := strings.Split(mediatype, "+")[0] + switch unsuffixedMediatype { + case DockerV2Schema2LayerMediaType, imgspecv1.MediaTypeImageLayer, imgspecv1.MediaTypeImageLayerNonDistributable: + return mediatype + "+encrypted", nil + } + + return "", errors.Errorf("unsupported mediatype to encrypt: %v", mediatype) +} + +// getEncryptedMediaType will return the mediatype to its encrypted counterpart and return +// an error if the mediatype does not support decryption +func getDecryptedMediaType(mediatype string) (string, error) { + if !strings.HasSuffix(mediatype, "+encrypted") { + return "", errors.Errorf("unsupported mediatype to decrypt %v:", mediatype) + } + + return strings.TrimSuffix(mediatype, "+encrypted"), nil +} diff --git a/vendor/github.com/containers/image/v5/manifest/oci_index.go b/vendor/github.com/containers/image/v5/manifest/oci_index.go new file mode 100644 index 00000000000..c4f11e09c56 --- /dev/null +++ b/vendor/github.com/containers/image/v5/manifest/oci_index.go @@ -0,0 +1,233 @@ +package manifest + +import ( + "encoding/json" + "fmt" + "runtime" + + platform "github.com/containers/image/v5/internal/pkg/platform" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + imgspec "github.com/opencontainers/image-spec/specs-go" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +// OCI1Index is just an alias for the OCI index type, but one which we can +// provide methods for. +type OCI1Index struct { + imgspecv1.Index +} + +// MIMEType returns the MIME type of this particular manifest index. +func (index *OCI1Index) MIMEType() string { + return imgspecv1.MediaTypeImageIndex +} + +// Instances returns a slice of digests of the manifests that this index knows of. +func (index *OCI1Index) Instances() []digest.Digest { + results := make([]digest.Digest, len(index.Manifests)) + for i, m := range index.Manifests { + results[i] = m.Digest + } + return results +} + +// Instance returns the ListUpdate of a particular instance in the index. +func (index *OCI1Index) Instance(instanceDigest digest.Digest) (ListUpdate, error) { + for _, manifest := range index.Manifests { + if manifest.Digest == instanceDigest { + return ListUpdate{ + Digest: manifest.Digest, + Size: manifest.Size, + MediaType: manifest.MediaType, + }, nil + } + } + return ListUpdate{}, errors.Errorf("unable to find instance %s in OCI1Index", instanceDigest) +} + +// UpdateInstances updates the sizes, digests, and media types of the manifests +// which the list catalogs. +func (index *OCI1Index) UpdateInstances(updates []ListUpdate) error { + if len(updates) != len(index.Manifests) { + return errors.Errorf("incorrect number of update entries passed to OCI1Index.UpdateInstances: expected %d, got %d", len(index.Manifests), len(updates)) + } + for i := range updates { + if err := updates[i].Digest.Validate(); err != nil { + return errors.Wrapf(err, "update %d of %d passed to OCI1Index.UpdateInstances contained an invalid digest", i+1, len(updates)) + } + index.Manifests[i].Digest = updates[i].Digest + if updates[i].Size < 0 { + return errors.Errorf("update %d of %d passed to OCI1Index.UpdateInstances had an invalid size (%d)", i+1, len(updates), updates[i].Size) + } + index.Manifests[i].Size = updates[i].Size + if updates[i].MediaType == "" { + return errors.Errorf("update %d of %d passed to OCI1Index.UpdateInstances had no media type (was %q)", i+1, len(updates), index.Manifests[i].MediaType) + } + index.Manifests[i].MediaType = updates[i].MediaType + } + return nil +} + +// ChooseInstance parses blob as an oci v1 manifest index, and returns the digest +// of the image which is appropriate for the current environment. +func (index *OCI1Index) ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) { + wantedPlatforms, err := platform.WantedPlatforms(ctx) + if err != nil { + return "", errors.Wrapf(err, "getting platform information %#v", ctx) + } + for _, wantedPlatform := range wantedPlatforms { + for _, d := range index.Manifests { + if d.Platform == nil { + continue + } + imagePlatform := imgspecv1.Platform{ + Architecture: d.Platform.Architecture, + OS: d.Platform.OS, + OSVersion: d.Platform.OSVersion, + OSFeatures: dupStringSlice(d.Platform.OSFeatures), + Variant: d.Platform.Variant, + } + if platform.MatchesPlatform(imagePlatform, wantedPlatform) { + return d.Digest, nil + } + } + } + + for _, d := range index.Manifests { + if d.Platform == nil { + return d.Digest, nil + } + } + return "", fmt.Errorf("no image found in image index for architecture %s, variant %q, OS %s", wantedPlatforms[0].Architecture, wantedPlatforms[0].Variant, wantedPlatforms[0].OS) +} + +// Serialize returns the index in a blob format. +// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! +func (index *OCI1Index) Serialize() ([]byte, error) { + buf, err := json.Marshal(index) + if err != nil { + return nil, errors.Wrapf(err, "marshaling OCI1Index %#v", index) + } + return buf, nil +} + +// OCI1IndexFromComponents creates an OCI1 image index instance from the +// supplied data. +func OCI1IndexFromComponents(components []imgspecv1.Descriptor, annotations map[string]string) *OCI1Index { + index := OCI1Index{ + imgspecv1.Index{ + Versioned: imgspec.Versioned{SchemaVersion: 2}, + MediaType: imgspecv1.MediaTypeImageIndex, + Manifests: make([]imgspecv1.Descriptor, len(components)), + Annotations: dupStringStringMap(annotations), + }, + } + for i, component := range components { + var platform *imgspecv1.Platform + if component.Platform != nil { + platform = &imgspecv1.Platform{ + Architecture: component.Platform.Architecture, + OS: component.Platform.OS, + OSVersion: component.Platform.OSVersion, + OSFeatures: dupStringSlice(component.Platform.OSFeatures), + Variant: component.Platform.Variant, + } + } + m := imgspecv1.Descriptor{ + MediaType: component.MediaType, + Size: component.Size, + Digest: component.Digest, + URLs: dupStringSlice(component.URLs), + Annotations: dupStringStringMap(component.Annotations), + Platform: platform, + } + index.Manifests[i] = m + } + return &index +} + +// OCI1IndexClone creates a deep copy of the passed-in index. +func OCI1IndexClone(index *OCI1Index) *OCI1Index { + return OCI1IndexFromComponents(index.Manifests, index.Annotations) +} + +// ToOCI1Index returns the index encoded as an OCI1 index. +func (index *OCI1Index) ToOCI1Index() (*OCI1Index, error) { + return OCI1IndexClone(index), nil +} + +// ToSchema2List returns the index encoded as a Schema2 list. +func (index *OCI1Index) ToSchema2List() (*Schema2List, error) { + components := make([]Schema2ManifestDescriptor, 0, len(index.Manifests)) + for _, manifest := range index.Manifests { + platform := manifest.Platform + if platform == nil { + platform = &imgspecv1.Platform{ + OS: runtime.GOOS, + Architecture: runtime.GOARCH, + } + } + converted := Schema2ManifestDescriptor{ + Schema2Descriptor{ + MediaType: manifest.MediaType, + Size: manifest.Size, + Digest: manifest.Digest, + URLs: dupStringSlice(manifest.URLs), + }, + Schema2PlatformSpec{ + OS: platform.OS, + Architecture: platform.Architecture, + OSFeatures: dupStringSlice(platform.OSFeatures), + OSVersion: platform.OSVersion, + Variant: platform.Variant, + }, + } + components = append(components, converted) + } + s2 := Schema2ListFromComponents(components) + return s2, nil +} + +// OCI1IndexFromManifest creates an OCI1 manifest index instance from marshalled +// JSON, presumably generated by encoding a OCI1 manifest index. +func OCI1IndexFromManifest(manifest []byte) (*OCI1Index, error) { + index := OCI1Index{ + Index: imgspecv1.Index{ + Versioned: imgspec.Versioned{SchemaVersion: 2}, + MediaType: imgspecv1.MediaTypeImageIndex, + Manifests: []imgspecv1.Descriptor{}, + Annotations: make(map[string]string), + }, + } + if err := json.Unmarshal(manifest, &index); err != nil { + return nil, errors.Wrapf(err, "unmarshaling OCI1Index %q", string(manifest)) + } + if err := validateUnambiguousManifestFormat(manifest, imgspecv1.MediaTypeImageIndex, + allowedFieldManifests); err != nil { + return nil, err + } + return &index, nil +} + +// Clone returns a deep copy of this list and its contents. +func (index *OCI1Index) Clone() List { + return OCI1IndexClone(index) +} + +// ConvertToMIMEType converts the passed-in image index to a manifest list of +// the specified type. +func (index *OCI1Index) ConvertToMIMEType(manifestMIMEType string) (List, error) { + switch normalized := NormalizedMIMEType(manifestMIMEType); normalized { + case DockerV2ListMediaType: + return index.ToSchema2List() + case imgspecv1.MediaTypeImageIndex: + return index.Clone(), nil + case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest, DockerV2Schema2MediaType: + return nil, fmt.Errorf("Can not convert image index to MIME type %q, which is not a list type", manifestMIMEType) + default: + // Note that this may not be reachable, NormalizedMIMEType has a default for unknown values. + return nil, fmt.Errorf("Unimplemented manifest MIME type %s", manifestMIMEType) + } +} diff --git a/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go b/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go new file mode 100644 index 00000000000..3d8738db536 --- /dev/null +++ b/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go @@ -0,0 +1,168 @@ +package archive + +import ( + "context" + "io" + "os" + + "github.com/containers/image/v5/types" + "github.com/containers/storage/pkg/archive" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +type ociArchiveImageDestination struct { + ref ociArchiveReference + unpackedDest types.ImageDestination + tempDirRef tempDirOCIRef +} + +// newImageDestination returns an ImageDestination for writing to an existing directory. +func newImageDestination(ctx context.Context, sys *types.SystemContext, ref ociArchiveReference) (types.ImageDestination, error) { + tempDirRef, err := createOCIRef(sys, ref.image) + if err != nil { + return nil, errors.Wrapf(err, "creating oci reference") + } + unpackedDest, err := tempDirRef.ociRefExtracted.NewImageDestination(ctx, sys) + if err != nil { + if err := tempDirRef.deleteTempDir(); err != nil { + return nil, errors.Wrapf(err, "deleting temp directory %q", tempDirRef.tempDirectory) + } + return nil, err + } + return &ociArchiveImageDestination{ref: ref, + unpackedDest: unpackedDest, + tempDirRef: tempDirRef}, nil +} + +// Reference returns the reference used to set up this destination. +func (d *ociArchiveImageDestination) Reference() types.ImageReference { + return d.ref +} + +// Close removes resources associated with an initialized ImageDestination, if any +// Close deletes the temp directory of the oci-archive image +func (d *ociArchiveImageDestination) Close() error { + defer func() { + err := d.tempDirRef.deleteTempDir() + logrus.Debugf("Error deleting temporary directory: %v", err) + }() + return d.unpackedDest.Close() +} + +func (d *ociArchiveImageDestination) SupportedManifestMIMETypes() []string { + return d.unpackedDest.SupportedManifestMIMETypes() +} + +// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures +func (d *ociArchiveImageDestination) SupportsSignatures(ctx context.Context) error { + return d.unpackedDest.SupportsSignatures(ctx) +} + +func (d *ociArchiveImageDestination) DesiredLayerCompression() types.LayerCompression { + return d.unpackedDest.DesiredLayerCompression() +} + +// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually +// uploaded to the image destination, true otherwise. +func (d *ociArchiveImageDestination) AcceptsForeignLayerURLs() bool { + return d.unpackedDest.AcceptsForeignLayerURLs() +} + +// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise +func (d *ociArchiveImageDestination) MustMatchRuntimeOS() bool { + return d.unpackedDest.MustMatchRuntimeOS() +} + +// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), +// and would prefer to receive an unmodified manifest instead of one modified for the destination. +// Does not make a difference if Reference().DockerReference() is nil. +func (d *ociArchiveImageDestination) IgnoresEmbeddedDockerReference() bool { + return d.unpackedDest.IgnoresEmbeddedDockerReference() +} + +// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. +func (d *ociArchiveImageDestination) HasThreadSafePutBlob() bool { + return false +} + +// PutBlob writes contents of stream and returns data representing the result. +// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. +// inputInfo.Size is the expected length of stream, if known. +// inputInfo.MediaType describes the blob format, if known. +// May update cache. +// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available +// to any other readers for download using the supplied digest. +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. +func (d *ociArchiveImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { + return d.unpackedDest.PutBlob(ctx, stream, inputInfo, cache, isConfig) +} + +// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. +// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may +// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be +// reflected in the manifest that will be written. +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +// May use and/or update cache. +func (d *ociArchiveImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { + return d.unpackedDest.TryReusingBlob(ctx, info, cache, canSubstitute) +} + +// PutManifest writes the manifest to the destination. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to overwrite the manifest for (when +// the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. +// It is expected but not enforced that the instanceDigest, when specified, matches the digest of `manifest` as generated +// by `manifest.Digest()`. +func (d *ociArchiveImageDestination) PutManifest(ctx context.Context, m []byte, instanceDigest *digest.Digest) error { + return d.unpackedDest.PutManifest(ctx, m, instanceDigest) +} + +// PutSignatures writes a set of signatures to the destination. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for +// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. +func (d *ociArchiveImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error { + return d.unpackedDest.PutSignatures(ctx, signatures, instanceDigest) +} + +// Commit marks the process of storing the image as successful and asks for the image to be persisted +// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list +// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the +// original manifest list digest, if desired. +// after the directory is made, it is tarred up into a file and the directory is deleted +func (d *ociArchiveImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error { + if err := d.unpackedDest.Commit(ctx, unparsedToplevel); err != nil { + return errors.Wrapf(err, "storing image %q", d.ref.image) + } + + // path of directory to tar up + src := d.tempDirRef.tempDirectory + // path to save tarred up file + dst := d.ref.resolvedFile + return tarDirectory(src, dst) +} + +// tar converts the directory at src and saves it to dst +func tarDirectory(src, dst string) error { + // input is a stream of bytes from the archive of the directory at path + input, err := archive.Tar(src, archive.Uncompressed) + if err != nil { + return errors.Wrapf(err, "retrieving stream of bytes from %q", src) + } + + // creates the tar file + outFile, err := os.Create(dst) + if err != nil { + return errors.Wrapf(err, "creating tar file %q", dst) + } + defer outFile.Close() + + // copies the contents of the directory to the tar file + // TODO: This can take quite some time, and should ideally be cancellable using a context.Context. + _, err = io.Copy(outFile, input) + + return err +} diff --git a/vendor/github.com/containers/image/v5/oci/archive/oci_src.go b/vendor/github.com/containers/image/v5/oci/archive/oci_src.go new file mode 100644 index 00000000000..20b392dc0e1 --- /dev/null +++ b/vendor/github.com/containers/image/v5/oci/archive/oci_src.go @@ -0,0 +1,122 @@ +package archive + +import ( + "context" + "io" + + ocilayout "github.com/containers/image/v5/oci/layout" + "github.com/containers/image/v5/types" + digest "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +type ociArchiveImageSource struct { + ref ociArchiveReference + unpackedSrc types.ImageSource + tempDirRef tempDirOCIRef +} + +// newImageSource returns an ImageSource for reading from an existing directory. +// newImageSource untars the file and saves it in a temp directory +func newImageSource(ctx context.Context, sys *types.SystemContext, ref ociArchiveReference) (types.ImageSource, error) { + tempDirRef, err := createUntarTempDir(sys, ref) + if err != nil { + return nil, errors.Wrap(err, "creating temp directory") + } + + unpackedSrc, err := tempDirRef.ociRefExtracted.NewImageSource(ctx, sys) + if err != nil { + if err := tempDirRef.deleteTempDir(); err != nil { + return nil, errors.Wrapf(err, "deleting temp directory %q", tempDirRef.tempDirectory) + } + return nil, err + } + return &ociArchiveImageSource{ref: ref, + unpackedSrc: unpackedSrc, + tempDirRef: tempDirRef}, nil +} + +// LoadManifestDescriptor loads the manifest +// Deprecated: use LoadManifestDescriptorWithContext instead +func LoadManifestDescriptor(imgRef types.ImageReference) (imgspecv1.Descriptor, error) { + return LoadManifestDescriptorWithContext(nil, imgRef) +} + +// LoadManifestDescriptorWithContext loads the manifest +func LoadManifestDescriptorWithContext(sys *types.SystemContext, imgRef types.ImageReference) (imgspecv1.Descriptor, error) { + ociArchRef, ok := imgRef.(ociArchiveReference) + if !ok { + return imgspecv1.Descriptor{}, errors.Errorf("error typecasting, need type ociArchiveReference") + } + tempDirRef, err := createUntarTempDir(sys, ociArchRef) + if err != nil { + return imgspecv1.Descriptor{}, errors.Wrap(err, "creating temp directory") + } + defer func() { + err := tempDirRef.deleteTempDir() + logrus.Debugf("Error deleting temporary directory: %v", err) + }() + + descriptor, err := ocilayout.LoadManifestDescriptor(tempDirRef.ociRefExtracted) + if err != nil { + return imgspecv1.Descriptor{}, errors.Wrap(err, "loading index") + } + return descriptor, nil +} + +// Reference returns the reference used to set up this source. +func (s *ociArchiveImageSource) Reference() types.ImageReference { + return s.ref +} + +// Close removes resources associated with an initialized ImageSource, if any. +// Close deletes the temporary directory at dst +func (s *ociArchiveImageSource) Close() error { + defer func() { + err := s.tempDirRef.deleteTempDir() + logrus.Debugf("error deleting tmp dir: %v", err) + }() + return s.unpackedSrc.Close() +} + +// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). +// It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); +// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). +func (s *ociArchiveImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { + return s.unpackedSrc.GetManifest(ctx, instanceDigest) +} + +// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. +func (s *ociArchiveImageSource) HasThreadSafeGetBlob() bool { + return false +} + +// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). +// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. +// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. +func (s *ociArchiveImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { + return s.unpackedSrc.GetBlob(ctx, info, cache) +} + +// GetSignatures returns the image's signatures. It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +func (s *ociArchiveImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + return s.unpackedSrc.GetSignatures(ctx, instanceDigest) +} + +// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer +// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob() +// to read the image's layers. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (s *ociArchiveImageSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) { + return s.unpackedSrc.LayerInfosForCopy(ctx, instanceDigest) +} diff --git a/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go b/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go new file mode 100644 index 00000000000..54d325d34df --- /dev/null +++ b/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go @@ -0,0 +1,198 @@ +package archive + +import ( + "context" + "fmt" + "io/ioutil" + "os" + "strings" + + "github.com/containers/image/v5/directory/explicitfilepath" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/image" + "github.com/containers/image/v5/internal/tmpdir" + "github.com/containers/image/v5/oci/internal" + ocilayout "github.com/containers/image/v5/oci/layout" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" + "github.com/containers/storage/pkg/archive" + "github.com/pkg/errors" +) + +func init() { + transports.Register(Transport) +} + +// Transport is an ImageTransport for OCI archive +// it creates an oci-archive tar file by calling into the OCI transport +// tarring the directory created by oci and deleting the directory +var Transport = ociArchiveTransport{} + +type ociArchiveTransport struct{} + +// ociArchiveReference is an ImageReference for OCI Archive paths +type ociArchiveReference struct { + file string + resolvedFile string + image string +} + +func (t ociArchiveTransport) Name() string { + return "oci-archive" +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix +// into an ImageReference. +func (t ociArchiveTransport) ParseReference(reference string) (types.ImageReference, error) { + return ParseReference(reference) +} + +// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys +func (t ociArchiveTransport) ValidatePolicyConfigurationScope(scope string) error { + return internal.ValidateScope(scope) +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an OCI ImageReference. +func ParseReference(reference string) (types.ImageReference, error) { + file, image := internal.SplitPathAndImage(reference) + return NewReference(file, image) +} + +// NewReference returns an OCI reference for a file and a image. +func NewReference(file, image string) (types.ImageReference, error) { + resolved, err := explicitfilepath.ResolvePathToFullyExplicit(file) + if err != nil { + return nil, err + } + + if err := internal.ValidateOCIPath(file); err != nil { + return nil, err + } + + if err := internal.ValidateImageName(image); err != nil { + return nil, err + } + + return ociArchiveReference{file: file, resolvedFile: resolved, image: image}, nil +} + +func (ref ociArchiveReference) Transport() types.ImageTransport { + return Transport +} + +// StringWithinTransport returns a string representation of the reference, which MUST be such that +// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. +func (ref ociArchiveReference) StringWithinTransport() string { + return fmt.Sprintf("%s:%s", ref.file, ref.image) +} + +// DockerReference returns a Docker reference associated with this reference +func (ref ociArchiveReference) DockerReference() reference.Named { + return nil +} + +// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. +func (ref ociArchiveReference) PolicyConfigurationIdentity() string { + // NOTE: ref.image is not a part of the image identity, because "$dir:$someimage" and "$dir:" may mean the + // same image and the two can’t be statically disambiguated. Using at least the repository directory is + // less granular but hopefully still useful. + return ref.resolvedFile +} + +// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search +// for if explicit configuration for PolicyConfigurationIdentity() is not set +func (ref ociArchiveReference) PolicyConfigurationNamespaces() []string { + res := []string{} + path := ref.resolvedFile + for { + lastSlash := strings.LastIndex(path, "/") + // Note that we do not include "/"; it is redundant with the default "" global default, + // and rejected by ociTransport.ValidatePolicyConfigurationScope above. + if lastSlash == -1 || path == "/" { + break + } + res = append(res, path) + path = path[:lastSlash] + } + return res +} + +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (ref ociArchiveReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + src, err := newImageSource(ctx, sys, ref) + if err != nil { + return nil, err + } + return image.FromSource(ctx, sys, src) +} + +// NewImageSource returns a types.ImageSource for this reference. +// The caller must call .Close() on the returned ImageSource. +func (ref ociArchiveReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { + return newImageSource(ctx, sys, ref) +} + +// NewImageDestination returns a types.ImageDestination for this reference. +// The caller must call .Close() on the returned ImageDestination. +func (ref ociArchiveReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { + return newImageDestination(ctx, sys, ref) +} + +// DeleteImage deletes the named image from the registry, if supported. +func (ref ociArchiveReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + return errors.Errorf("Deleting images not implemented for oci: images") +} + +// struct to store the ociReference and temporary directory returned by createOCIRef +type tempDirOCIRef struct { + tempDirectory string + ociRefExtracted types.ImageReference +} + +// deletes the temporary directory created +func (t *tempDirOCIRef) deleteTempDir() error { + return os.RemoveAll(t.tempDirectory) +} + +// createOCIRef creates the oci reference of the image +// If SystemContext.BigFilesTemporaryDir not "", overrides the temporary directory to use for storing big files +func createOCIRef(sys *types.SystemContext, image string) (tempDirOCIRef, error) { + dir, err := ioutil.TempDir(tmpdir.TemporaryDirectoryForBigFiles(sys), "oci") + if err != nil { + return tempDirOCIRef{}, errors.Wrapf(err, "creating temp directory") + } + ociRef, err := ocilayout.NewReference(dir, image) + if err != nil { + return tempDirOCIRef{}, err + } + + tempDirRef := tempDirOCIRef{tempDirectory: dir, ociRefExtracted: ociRef} + return tempDirRef, nil +} + +// creates the temporary directory and copies the tarred content to it +func createUntarTempDir(sys *types.SystemContext, ref ociArchiveReference) (tempDirOCIRef, error) { + tempDirRef, err := createOCIRef(sys, ref.image) + if err != nil { + return tempDirOCIRef{}, errors.Wrap(err, "creating oci reference") + } + src := ref.resolvedFile + dst := tempDirRef.tempDirectory + // TODO: This can take quite some time, and should ideally be cancellable using a context.Context. + arch, err := os.Open(src) + if err != nil { + return tempDirOCIRef{}, err + } + defer arch.Close() + if err := archive.NewDefaultArchiver().Untar(arch, dst, &archive.TarOptions{NoLchown: true}); err != nil { + if err := tempDirRef.deleteTempDir(); err != nil { + return tempDirOCIRef{}, errors.Wrapf(err, "deleting temp directory %q", tempDirRef.tempDirectory) + } + return tempDirOCIRef{}, errors.Wrapf(err, "untarring file %q", tempDirRef.tempDirectory) + } + return tempDirRef, nil +} diff --git a/vendor/github.com/containers/image/v5/oci/internal/oci_util.go b/vendor/github.com/containers/image/v5/oci/internal/oci_util.go new file mode 100644 index 00000000000..c2012e50e02 --- /dev/null +++ b/vendor/github.com/containers/image/v5/oci/internal/oci_util.go @@ -0,0 +1,126 @@ +package internal + +import ( + "github.com/pkg/errors" + "path/filepath" + "regexp" + "runtime" + "strings" +) + +// annotation spex from https://github.com/opencontainers/image-spec/blob/master/annotations.md#pre-defined-annotation-keys +const ( + separator = `(?:[-._:@+]|--)` + alphanum = `(?:[A-Za-z0-9]+)` + component = `(?:` + alphanum + `(?:` + separator + alphanum + `)*)` +) + +var refRegexp = regexp.MustCompile(`^` + component + `(?:/` + component + `)*$`) +var windowsRefRegexp = regexp.MustCompile(`^([a-zA-Z]:\\.+?):(.*)$`) + +// ValidateImageName returns nil if the image name is empty or matches the open-containers image name specs. +// In any other case an error is returned. +func ValidateImageName(image string) error { + if len(image) == 0 { + return nil + } + + var err error + if !refRegexp.MatchString(image) { + err = errors.Errorf("Invalid image %s", image) + } + return err +} + +// SplitPathAndImage tries to split the provided OCI reference into the OCI path and image. +// Neither path nor image parts are validated at this stage. +func SplitPathAndImage(reference string) (string, string) { + if runtime.GOOS == "windows" { + return splitPathAndImageWindows(reference) + } + return splitPathAndImageNonWindows(reference) +} + +func splitPathAndImageWindows(reference string) (string, string) { + groups := windowsRefRegexp.FindStringSubmatch(reference) + // nil group means no match + if groups == nil { + return reference, "" + } + + // we expect three elements. First one full match, second the capture group for the path and + // the third the capture group for the image + if len(groups) != 3 { + return reference, "" + } + return groups[1], groups[2] +} + +func splitPathAndImageNonWindows(reference string) (string, string) { + sep := strings.SplitN(reference, ":", 2) + path := sep[0] + + var image string + if len(sep) == 2 { + image = sep[1] + } + return path, image +} + +// ValidateOCIPath takes the OCI path and validates it. +func ValidateOCIPath(path string) error { + if runtime.GOOS == "windows" { + // On Windows we must allow for a ':' as part of the path + if strings.Count(path, ":") > 1 { + return errors.Errorf("Invalid OCI reference: path %s contains more than one colon", path) + } + } else { + if strings.Contains(path, ":") { + return errors.Errorf("Invalid OCI reference: path %s contains a colon", path) + } + } + return nil +} + +// ValidateScope validates a policy configuration scope for an OCI transport. +func ValidateScope(scope string) error { + var err error + if runtime.GOOS == "windows" { + err = validateScopeWindows(scope) + } else { + err = validateScopeNonWindows(scope) + } + if err != nil { + return err + } + + cleaned := filepath.Clean(scope) + if cleaned != scope { + return errors.Errorf(`Invalid scope %s: Uses non-canonical path format, perhaps try with path %s`, scope, cleaned) + } + + return nil +} + +func validateScopeWindows(scope string) error { + matched, _ := regexp.Match(`^[a-zA-Z]:\\`, []byte(scope)) + if !matched { + return errors.Errorf("Invalid scope '%s'. Must be an absolute path", scope) + } + + return nil +} + +func validateScopeNonWindows(scope string) error { + if !strings.HasPrefix(scope, "/") { + return errors.Errorf("Invalid scope %s: must be an absolute path", scope) + } + + // Refuse also "/", otherwise "/" and "" would have the same semantics, + // and "" could be unexpectedly shadowed by the "/" entry. + if scope == "/" { + return errors.New(`Invalid scope "/": Use the generic default scope ""`) + } + + return nil +} diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go b/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go new file mode 100644 index 00000000000..d0ee7263527 --- /dev/null +++ b/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go @@ -0,0 +1,347 @@ +package layout + +import ( + "context" + "encoding/json" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + + "github.com/containers/image/v5/internal/putblobdigest" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" + digest "github.com/opencontainers/go-digest" + imgspec "github.com/opencontainers/image-spec/specs-go" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +type ociImageDestination struct { + ref ociReference + index imgspecv1.Index + sharedBlobDir string + acceptUncompressedLayers bool +} + +// newImageDestination returns an ImageDestination for writing to an existing directory. +func newImageDestination(sys *types.SystemContext, ref ociReference) (types.ImageDestination, error) { + var index *imgspecv1.Index + if indexExists(ref) { + var err error + index, err = ref.getIndex() + if err != nil { + return nil, err + } + } else { + index = &imgspecv1.Index{ + Versioned: imgspec.Versioned{ + SchemaVersion: 2, + }, + Annotations: make(map[string]string), + } + } + + d := &ociImageDestination{ref: ref, index: *index} + if sys != nil { + d.sharedBlobDir = sys.OCISharedBlobDirPath + d.acceptUncompressedLayers = sys.OCIAcceptUncompressedLayers + } + + if err := ensureDirectoryExists(d.ref.dir); err != nil { + return nil, err + } + // Per the OCI image specification, layouts MUST have a "blobs" subdirectory, + // but it MAY be empty (e.g. if we never end up calling PutBlob) + // https://github.com/opencontainers/image-spec/blame/7c889fafd04a893f5c5f50b7ab9963d5d64e5242/image-layout.md#L19 + if err := ensureDirectoryExists(filepath.Join(d.ref.dir, "blobs")); err != nil { + return nil, err + } + return d, nil +} + +// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, +// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. +func (d *ociImageDestination) Reference() types.ImageReference { + return d.ref +} + +// Close removes resources associated with an initialized ImageDestination, if any. +func (d *ociImageDestination) Close() error { + return nil +} + +func (d *ociImageDestination) SupportedManifestMIMETypes() []string { + return []string{ + imgspecv1.MediaTypeImageManifest, + imgspecv1.MediaTypeImageIndex, + } +} + +// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. +// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. +func (d *ociImageDestination) SupportsSignatures(ctx context.Context) error { + return errors.Errorf("Pushing signatures for OCI images is not supported") +} + +func (d *ociImageDestination) DesiredLayerCompression() types.LayerCompression { + if d.acceptUncompressedLayers { + return types.PreserveOriginal + } + return types.Compress +} + +// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually +// uploaded to the image destination, true otherwise. +func (d *ociImageDestination) AcceptsForeignLayerURLs() bool { + return true +} + +// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise. +func (d *ociImageDestination) MustMatchRuntimeOS() bool { + return false +} + +// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), +// and would prefer to receive an unmodified manifest instead of one modified for the destination. +// Does not make a difference if Reference().DockerReference() is nil. +func (d *ociImageDestination) IgnoresEmbeddedDockerReference() bool { + return false // N/A, DockerReference() returns nil. +} + +// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. +func (d *ociImageDestination) HasThreadSafePutBlob() bool { + return false +} + +// PutBlob writes contents of stream and returns data representing the result. +// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. +// inputInfo.Size is the expected length of stream, if known. +// inputInfo.MediaType describes the blob format, if known. +// May update cache. +// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available +// to any other readers for download using the supplied digest. +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. +func (d *ociImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { + blobFile, err := ioutil.TempFile(d.ref.dir, "oci-put-blob") + if err != nil { + return types.BlobInfo{}, err + } + succeeded := false + explicitClosed := false + defer func() { + if !explicitClosed { + blobFile.Close() + } + if !succeeded { + os.Remove(blobFile.Name()) + } + }() + + digester, stream := putblobdigest.DigestIfCanonicalUnknown(stream, inputInfo) + // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). + size, err := io.Copy(blobFile, stream) + if err != nil { + return types.BlobInfo{}, err + } + blobDigest := digester.Digest() + if inputInfo.Size != -1 && size != inputInfo.Size { + return types.BlobInfo{}, errors.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size) + } + if err := blobFile.Sync(); err != nil { + return types.BlobInfo{}, err + } + + // On POSIX systems, blobFile was created with mode 0600, so we need to make it readable. + // On Windows, the “permissions of newly created files” argument to syscall.Open is + // ignored and the file is already readable; besides, blobFile.Chmod, i.e. syscall.Fchmod, + // always fails on Windows. + if runtime.GOOS != "windows" { + if err := blobFile.Chmod(0644); err != nil { + return types.BlobInfo{}, err + } + } + + blobPath, err := d.ref.blobPath(blobDigest, d.sharedBlobDir) + if err != nil { + return types.BlobInfo{}, err + } + if err := ensureParentDirectoryExists(blobPath); err != nil { + return types.BlobInfo{}, err + } + + // need to explicitly close the file, since a rename won't otherwise not work on Windows + blobFile.Close() + explicitClosed = true + if err := os.Rename(blobFile.Name(), blobPath); err != nil { + return types.BlobInfo{}, err + } + succeeded = true + return types.BlobInfo{Digest: blobDigest, Size: size}, nil +} + +// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. +// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may +// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be +// reflected in the manifest that will be written. +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +// May use and/or update cache. +func (d *ociImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { + if info.Digest == "" { + return false, types.BlobInfo{}, errors.Errorf(`"Can not check for a blob with unknown digest`) + } + blobPath, err := d.ref.blobPath(info.Digest, d.sharedBlobDir) + if err != nil { + return false, types.BlobInfo{}, err + } + finfo, err := os.Stat(blobPath) + if err != nil && os.IsNotExist(err) { + return false, types.BlobInfo{}, nil + } + if err != nil { + return false, types.BlobInfo{}, err + } + + return true, types.BlobInfo{Digest: info.Digest, Size: finfo.Size()}, nil +} + +// PutManifest writes a manifest to the destination. Per our list of supported manifest MIME types, +// this should be either an OCI manifest (possibly converted to this format by the caller) or index, +// neither of which we'll need to modify further. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to overwrite the manifest for (when +// the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. +// It is expected but not enforced that the instanceDigest, when specified, matches the digest of `manifest` as generated +// by `manifest.Digest()`. +// FIXME? This should also receive a MIME type if known, to differentiate between schema versions. +// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), +// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. +func (d *ociImageDestination) PutManifest(ctx context.Context, m []byte, instanceDigest *digest.Digest) error { + var digest digest.Digest + var err error + if instanceDigest != nil { + digest = *instanceDigest + } else { + digest, err = manifest.Digest(m) + if err != nil { + return err + } + } + + blobPath, err := d.ref.blobPath(digest, d.sharedBlobDir) + if err != nil { + return err + } + if err := ensureParentDirectoryExists(blobPath); err != nil { + return err + } + if err := ioutil.WriteFile(blobPath, m, 0644); err != nil { + return err + } + + if instanceDigest != nil { + return nil + } + + // If we had platform information, we'd build an imgspecv1.Platform structure here. + + // Start filling out the descriptor for this entry + desc := imgspecv1.Descriptor{} + desc.Digest = digest + desc.Size = int64(len(m)) + if d.ref.image != "" { + desc.Annotations = make(map[string]string) + desc.Annotations[imgspecv1.AnnotationRefName] = d.ref.image + } + + // If we knew the MIME type, we wouldn't have to guess here. + desc.MediaType = manifest.GuessMIMEType(m) + + d.addManifest(&desc) + + return nil +} + +func (d *ociImageDestination) addManifest(desc *imgspecv1.Descriptor) { + // If the new entry has a name, remove any conflicting names which we already have. + if desc.Annotations != nil && desc.Annotations[imgspecv1.AnnotationRefName] != "" { + // The name is being set on a new entry, so remove any older ones that had the same name. + // We might be storing an index and all of its component images, and we'll want to attach + // the name to the last one, which is the index. + for i, manifest := range d.index.Manifests { + if manifest.Annotations[imgspecv1.AnnotationRefName] == desc.Annotations[imgspecv1.AnnotationRefName] { + delete(d.index.Manifests[i].Annotations, imgspecv1.AnnotationRefName) + break + } + } + } + // If it has the same digest as another entry in the index, we already overwrote the file, + // so just pick up the other information. + for i, manifest := range d.index.Manifests { + if manifest.Digest == desc.Digest && manifest.Annotations[imgspecv1.AnnotationRefName] == "" { + // Replace it completely. + d.index.Manifests[i] = *desc + return + } + } + // It's a new entry to be added to the index. + d.index.Manifests = append(d.index.Manifests, *desc) +} + +// PutSignatures would add the given signatures to the oci layout (currently not supported). +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for +// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. +func (d *ociImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error { + if len(signatures) != 0 { + return errors.Errorf("Pushing signatures for OCI images is not supported") + } + return nil +} + +// Commit marks the process of storing the image as successful and asks for the image to be persisted. +// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list +// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the +// original manifest list digest, if desired. +// WARNING: This does not have any transactional semantics: +// - Uploaded data MAY be visible to others before Commit() is called +// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) +func (d *ociImageDestination) Commit(context.Context, types.UnparsedImage) error { + if err := ioutil.WriteFile(d.ref.ociLayoutPath(), []byte(`{"imageLayoutVersion": "1.0.0"}`), 0644); err != nil { + return err + } + indexJSON, err := json.Marshal(d.index) + if err != nil { + return err + } + return ioutil.WriteFile(d.ref.indexPath(), indexJSON, 0644) +} + +func ensureDirectoryExists(path string) error { + if _, err := os.Stat(path); err != nil && os.IsNotExist(err) { + if err := os.MkdirAll(path, 0755); err != nil { + return err + } + } + return nil +} + +// ensureParentDirectoryExists ensures the parent of the supplied path exists. +func ensureParentDirectoryExists(path string) error { + return ensureDirectoryExists(filepath.Dir(path)) +} + +// indexExists checks whether the index location specified in the OCI reference exists. +// The implementation is opinionated, since in case of unexpected errors false is returned +func indexExists(ref ociReference) bool { + _, err := os.Stat(ref.indexPath()) + if err == nil { + return true + } + if os.IsNotExist(err) { + return false + } + return true +} diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_src.go b/vendor/github.com/containers/image/v5/oci/layout/oci_src.go new file mode 100644 index 00000000000..9d8ab689ba4 --- /dev/null +++ b/vendor/github.com/containers/image/v5/oci/layout/oci_src.go @@ -0,0 +1,209 @@ +package layout + +import ( + "context" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "strconv" + + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/tlsclientconfig" + "github.com/containers/image/v5/types" + "github.com/docker/go-connections/tlsconfig" + "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +type ociImageSource struct { + ref ociReference + index *imgspecv1.Index + descriptor imgspecv1.Descriptor + client *http.Client + sharedBlobDir string +} + +// newImageSource returns an ImageSource for reading from an existing directory. +func newImageSource(sys *types.SystemContext, ref ociReference) (types.ImageSource, error) { + tr := tlsclientconfig.NewTransport() + tr.TLSClientConfig = tlsconfig.ServerDefault() + + if sys != nil && sys.OCICertPath != "" { + if err := tlsclientconfig.SetupCertificates(sys.OCICertPath, tr.TLSClientConfig); err != nil { + return nil, err + } + tr.TLSClientConfig.InsecureSkipVerify = sys.OCIInsecureSkipTLSVerify + } + + client := &http.Client{} + client.Transport = tr + descriptor, err := ref.getManifestDescriptor() + if err != nil { + return nil, err + } + index, err := ref.getIndex() + if err != nil { + return nil, err + } + d := &ociImageSource{ref: ref, index: index, descriptor: descriptor, client: client} + if sys != nil { + // TODO(jonboulle): check dir existence? + d.sharedBlobDir = sys.OCISharedBlobDirPath + } + return d, nil +} + +// Reference returns the reference used to set up this source. +func (s *ociImageSource) Reference() types.ImageReference { + return s.ref +} + +// Close removes resources associated with an initialized ImageSource, if any. +func (s *ociImageSource) Close() error { + return nil +} + +// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). +// It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); +// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). +func (s *ociImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { + var dig digest.Digest + var mimeType string + var err error + + if instanceDigest == nil { + dig = digest.Digest(s.descriptor.Digest) + mimeType = s.descriptor.MediaType + } else { + dig = *instanceDigest + for _, md := range s.index.Manifests { + if md.Digest == dig { + mimeType = md.MediaType + break + } + } + } + + manifestPath, err := s.ref.blobPath(dig, s.sharedBlobDir) + if err != nil { + return nil, "", err + } + + m, err := ioutil.ReadFile(manifestPath) + if err != nil { + return nil, "", err + } + if mimeType == "" { + mimeType = manifest.GuessMIMEType(m) + } + + return m, mimeType, nil +} + +// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. +func (s *ociImageSource) HasThreadSafeGetBlob() bool { + return false +} + +// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). +// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. +// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. +func (s *ociImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { + if len(info.URLs) != 0 { + r, s, err := s.getExternalBlob(ctx, info.URLs) + if err != nil { + return nil, 0, err + } else if r != nil { + return r, s, nil + } + } + + path, err := s.ref.blobPath(info.Digest, s.sharedBlobDir) + if err != nil { + return nil, 0, err + } + + r, err := os.Open(path) + if err != nil { + return nil, 0, err + } + fi, err := r.Stat() + if err != nil { + return nil, 0, err + } + return r, fi.Size(), nil +} + +// GetSignatures returns the image's signatures. It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +func (s *ociImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + return [][]byte{}, nil +} + +// getExternalBlob returns the reader of the first available blob URL from urls, which must not be empty. +// This function can return nil reader when no url is supported by this function. In this case, the caller +// should fallback to fetch the non-external blob (i.e. pull from the registry). +func (s *ociImageSource) getExternalBlob(ctx context.Context, urls []string) (io.ReadCloser, int64, error) { + if len(urls) == 0 { + return nil, 0, errors.New("internal error: getExternalBlob called with no URLs") + } + + errWrap := errors.New("failed fetching external blob from all urls") + hasSupportedURL := false + for _, u := range urls { + if u, err := url.Parse(u); err != nil || (u.Scheme != "http" && u.Scheme != "https") { + continue // unsupported url. skip this url. + } + hasSupportedURL = true + req, err := http.NewRequestWithContext(ctx, http.MethodGet, u, nil) + if err != nil { + errWrap = errors.Wrapf(errWrap, "fetching %s failed %s", u, err.Error()) + continue + } + + resp, err := s.client.Do(req) + if err != nil { + errWrap = errors.Wrapf(errWrap, "fetching %s failed %s", u, err.Error()) + continue + } + + if resp.StatusCode != http.StatusOK { + resp.Body.Close() + errWrap = errors.Wrapf(errWrap, "fetching %s failed, response code not 200", u) + continue + } + + return resp.Body, getBlobSize(resp), nil + } + if !hasSupportedURL { + return nil, 0, nil // fallback to non-external blob + } + + return nil, 0, errWrap +} + +// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer +// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob() +// to read the image's layers. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (s *ociImageSource) LayerInfosForCopy(context.Context, *digest.Digest) ([]types.BlobInfo, error) { + return nil, nil +} + +func getBlobSize(resp *http.Response) int64 { + size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64) + if err != nil { + size = -1 + } + return size +} diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go b/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go new file mode 100644 index 00000000000..a99b631584d --- /dev/null +++ b/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go @@ -0,0 +1,264 @@ +package layout + +import ( + "context" + "encoding/json" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/containers/image/v5/directory/explicitfilepath" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/image" + "github.com/containers/image/v5/oci/internal" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +func init() { + transports.Register(Transport) +} + +var ( + // Transport is an ImageTransport for OCI directories. + Transport = ociTransport{} + + // ErrMoreThanOneImage is an error returned when the manifest includes + // more than one image and the user should choose which one to use. + ErrMoreThanOneImage = errors.New("more than one image in oci, choose an image") +) + +type ociTransport struct{} + +func (t ociTransport) Name() string { + return "oci" +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. +func (t ociTransport) ParseReference(reference string) (types.ImageReference, error) { + return ParseReference(reference) +} + +// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys +// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). +// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. +// scope passed to this function will not be "", that value is always allowed. +func (t ociTransport) ValidatePolicyConfigurationScope(scope string) error { + return internal.ValidateScope(scope) +} + +// ociReference is an ImageReference for OCI directory paths. +type ociReference struct { + // Note that the interpretation of paths below depends on the underlying filesystem state, which may change under us at any time! + // Either of the paths may point to a different, or no, inode over time. resolvedDir may contain symbolic links, and so on. + + // Generally we follow the intent of the user, and use the "dir" member for filesystem operations (e.g. the user can use a relative path to avoid + // being exposed to symlinks and renames in the parent directories to the working directory). + // (But in general, we make no attempt to be completely safe against concurrent hostile filesystem modifications.) + dir string // As specified by the user. May be relative, contain symlinks, etc. + resolvedDir string // Absolute path with no symlinks, at least at the time of its creation. Primarily used for policy namespaces. + // If image=="", it means the "only image" in the index.json is used in the case it is a source + // for destinations, the image name annotation "image.ref.name" is not added to the index.json + image string +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an OCI ImageReference. +func ParseReference(reference string) (types.ImageReference, error) { + dir, image := internal.SplitPathAndImage(reference) + return NewReference(dir, image) +} + +// NewReference returns an OCI reference for a directory and a image. +// +// We do not expose an API supplying the resolvedDir; we could, but recomputing it +// is generally cheap enough that we prefer being confident about the properties of resolvedDir. +func NewReference(dir, image string) (types.ImageReference, error) { + resolved, err := explicitfilepath.ResolvePathToFullyExplicit(dir) + if err != nil { + return nil, err + } + + if err := internal.ValidateOCIPath(dir); err != nil { + return nil, err + } + + if err = internal.ValidateImageName(image); err != nil { + return nil, err + } + + return ociReference{dir: dir, resolvedDir: resolved, image: image}, nil +} + +func (ref ociReference) Transport() types.ImageTransport { + return Transport +} + +// StringWithinTransport returns a string representation of the reference, which MUST be such that +// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. +// NOTE: The returned string is not promised to be equal to the original input to ParseReference; +// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. +// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. +func (ref ociReference) StringWithinTransport() string { + return fmt.Sprintf("%s:%s", ref.dir, ref.image) +} + +// DockerReference returns a Docker reference associated with this reference +// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, +// not e.g. after redirect or alias processing), or nil if unknown/not applicable. +func (ref ociReference) DockerReference() reference.Named { + return nil +} + +// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. +// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; +// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical +// (i.e. various references with exactly the same semantics should return the same configuration identity) +// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but +// not required/guaranteed that it will be a valid input to Transport().ParseReference(). +// Returns "" if configuration identities for these references are not supported. +func (ref ociReference) PolicyConfigurationIdentity() string { + // NOTE: ref.image is not a part of the image identity, because "$dir:$someimage" and "$dir:" may mean the + // same image and the two can’t be statically disambiguated. Using at least the repository directory is + // less granular but hopefully still useful. + return ref.resolvedDir +} + +// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search +// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed +// in order, terminating on first match, and an implicit "" is always checked at the end. +// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), +// and each following element to be a prefix of the element preceding it. +func (ref ociReference) PolicyConfigurationNamespaces() []string { + res := []string{} + path := ref.resolvedDir + for { + lastSlash := strings.LastIndex(path, "/") + // Note that we do not include "/"; it is redundant with the default "" global default, + // and rejected by ociTransport.ValidatePolicyConfigurationScope above. + if lastSlash == -1 || path == "/" { + break + } + res = append(res, path) + path = path[:lastSlash] + } + return res +} + +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (ref ociReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + src, err := newImageSource(sys, ref) + if err != nil { + return nil, err + } + return image.FromSource(ctx, sys, src) +} + +// getIndex returns a pointer to the index references by this ociReference. If an error occurs opening an index nil is returned together +// with an error. +func (ref ociReference) getIndex() (*imgspecv1.Index, error) { + indexJSON, err := os.Open(ref.indexPath()) + if err != nil { + return nil, err + } + defer indexJSON.Close() + + index := &imgspecv1.Index{} + if err := json.NewDecoder(indexJSON).Decode(index); err != nil { + return nil, err + } + return index, nil +} + +func (ref ociReference) getManifestDescriptor() (imgspecv1.Descriptor, error) { + index, err := ref.getIndex() + if err != nil { + return imgspecv1.Descriptor{}, err + } + + var d *imgspecv1.Descriptor + if ref.image == "" { + // return manifest if only one image is in the oci directory + if len(index.Manifests) == 1 { + d = &index.Manifests[0] + } else { + // ask user to choose image when more than one image in the oci directory + return imgspecv1.Descriptor{}, ErrMoreThanOneImage + } + } else { + // if image specified, look through all manifests for a match + for _, md := range index.Manifests { + if md.MediaType != imgspecv1.MediaTypeImageManifest && md.MediaType != imgspecv1.MediaTypeImageIndex { + continue + } + refName, ok := md.Annotations[imgspecv1.AnnotationRefName] + if !ok { + continue + } + if refName == ref.image { + d = &md + break + } + } + } + if d == nil { + return imgspecv1.Descriptor{}, fmt.Errorf("no descriptor found for reference %q", ref.image) + } + return *d, nil +} + +// LoadManifestDescriptor loads the manifest descriptor to be used to retrieve the image name +// when pulling an image +func LoadManifestDescriptor(imgRef types.ImageReference) (imgspecv1.Descriptor, error) { + ociRef, ok := imgRef.(ociReference) + if !ok { + return imgspecv1.Descriptor{}, errors.Errorf("error typecasting, need type ociRef") + } + return ociRef.getManifestDescriptor() +} + +// NewImageSource returns a types.ImageSource for this reference. +// The caller must call .Close() on the returned ImageSource. +func (ref ociReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { + return newImageSource(sys, ref) +} + +// NewImageDestination returns a types.ImageDestination for this reference. +// The caller must call .Close() on the returned ImageDestination. +func (ref ociReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { + return newImageDestination(sys, ref) +} + +// DeleteImage deletes the named image from the registry, if supported. +func (ref ociReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + return errors.Errorf("Deleting images not implemented for oci: images") +} + +// ociLayoutPath returns a path for the oci-layout within a directory using OCI conventions. +func (ref ociReference) ociLayoutPath() string { + return filepath.Join(ref.dir, "oci-layout") +} + +// indexPath returns a path for the index.json within a directory using OCI conventions. +func (ref ociReference) indexPath() string { + return filepath.Join(ref.dir, "index.json") +} + +// blobPath returns a path for a blob within a directory using OCI image-layout conventions. +func (ref ociReference) blobPath(digest digest.Digest, sharedBlobDir string) (string, error) { + if err := digest.Validate(); err != nil { + return "", errors.Wrapf(err, "unexpected digest reference %s", digest) + } + blobDir := filepath.Join(ref.dir, "blobs") + if sharedBlobDir != "" { + blobDir = sharedBlobDir + } + return filepath.Join(blobDir, digest.Algorithm().String(), digest.Hex()), nil +} diff --git a/vendor/github.com/containers/image/v5/openshift/openshift-copies.go b/vendor/github.com/containers/image/v5/openshift/openshift-copies.go new file mode 100644 index 00000000000..4ffbced6bd6 --- /dev/null +++ b/vendor/github.com/containers/image/v5/openshift/openshift-copies.go @@ -0,0 +1,1194 @@ +package openshift + +import ( + "crypto/tls" + "crypto/x509" + "encoding/json" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/url" + "os" + "path" + "path/filepath" + "reflect" + "strings" + "time" + + "github.com/containers/storage/pkg/homedir" + "github.com/ghodss/yaml" + "github.com/imdario/mergo" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/net/http2" +) + +// restTLSClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/restclient.TLSClientConfig. +// restTLSClientConfig contains settings to enable transport layer security +type restTLSClientConfig struct { + // Server requires TLS client certificate authentication + CertFile string + // Server requires TLS client certificate authentication + KeyFile string + // Trusted root certificates for server + CAFile string + + // CertData holds PEM-encoded bytes (typically read from a client certificate file). + // CertData takes precedence over CertFile + CertData []byte + // KeyData holds PEM-encoded bytes (typically read from a client certificate key file). + // KeyData takes precedence over KeyFile + KeyData []byte + // CAData holds PEM-encoded bytes (typically read from a root certificates bundle). + // CAData takes precedence over CAFile + CAData []byte +} + +// restConfig is a modified copy of k8s.io/kubernetes/pkg/client/restclient.Config. +// Config holds the common attributes that can be passed to a Kubernetes client on +// initialization. +type restConfig struct { + // Host must be a host string, a host:port pair, or a URL to the base of the apiserver. + // If a URL is given then the (optional) Path of that URL represents a prefix that must + // be appended to all request URIs used to access the apiserver. This allows a frontend + // proxy to easily relocate all of the apiserver endpoints. + Host string + + // Server requires Basic authentication + Username string + Password string + + // Server requires Bearer authentication. This client will not attempt to use + // refresh tokens for an OAuth2 flow. + // TODO: demonstrate an OAuth2 compatible client. + BearerToken string + + // TLSClientConfig contains settings to enable transport layer security + TLSClientConfig restTLSClientConfig + + // Server should be accessed without verifying the TLS + // certificate. For testing only. + Insecure bool +} + +// ClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfig. +// ClientConfig is used to make it easy to get an api server client +type clientConfig interface { + // ClientConfig returns a complete client config + ClientConfig() (*restConfig, error) +} + +// defaultClientConfig is a modified copy of openshift/origin/pkg/cmd/util/clientcmd.DefaultClientConfig. +func defaultClientConfig() clientConfig { + loadingRules := newOpenShiftClientConfigLoadingRules() + // REMOVED: Allowing command-line overriding of loadingRules + // REMOVED: clientcmd.ConfigOverrides + + clientConfig := newNonInteractiveDeferredLoadingClientConfig(loadingRules) + + return clientConfig +} + +var recommendedHomeFile = path.Join(homedir.Get(), ".kube/config") + +// newOpenShiftClientConfigLoadingRules is a modified copy of openshift/origin/pkg/cmd/cli/config.NewOpenShiftClientConfigLoadingRules. +// NewOpenShiftClientConfigLoadingRules returns file priority loading rules for OpenShift. +// 1. --config value +// 2. if KUBECONFIG env var has a value, use it. Otherwise, ~/.kube/config file +func newOpenShiftClientConfigLoadingRules() *clientConfigLoadingRules { + chain := []string{} + + envVarFile := os.Getenv("KUBECONFIG") + if len(envVarFile) != 0 { + chain = append(chain, filepath.SplitList(envVarFile)...) + } else { + chain = append(chain, recommendedHomeFile) + } + + return &clientConfigLoadingRules{ + Precedence: chain, + // REMOVED: Migration support; run (oc login) to trigger migration + } +} + +// deferredLoadingClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DeferredLoadingClientConfig. +// DeferredLoadingClientConfig is a ClientConfig interface that is backed by a set of loading rules +// It is used in cases where the loading rules may change after you've instantiated them and you want to be sure that +// the most recent rules are used. This is useful in cases where you bind flags to loading rule parameters before +// the parse happens and you want your calling code to be ignorant of how the values are being mutated to avoid +// passing extraneous information down a call stack +type deferredLoadingClientConfig struct { + loadingRules *clientConfigLoadingRules + + clientConfig clientConfig +} + +// NewNonInteractiveDeferredLoadingClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.NewNonInteractiveDeferredLoadingClientConfig. +// NewNonInteractiveDeferredLoadingClientConfig creates a ConfigClientClientConfig using the passed context name +func newNonInteractiveDeferredLoadingClientConfig(loadingRules *clientConfigLoadingRules) clientConfig { + return &deferredLoadingClientConfig{loadingRules: loadingRules} +} + +func (config *deferredLoadingClientConfig) createClientConfig() (clientConfig, error) { + if config.clientConfig == nil { + // REMOVED: Support for concurrent use in multiple threads. + mergedConfig, err := config.loadingRules.Load() + if err != nil { + return nil, err + } + + // REMOVED: Interactive fallback support. + mergedClientConfig := newNonInteractiveClientConfig(*mergedConfig) + + config.clientConfig = mergedClientConfig + } + + return config.clientConfig, nil +} + +// ClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DeferredLoadingClientConfig.ClientConfig. +// ClientConfig implements ClientConfig +func (config *deferredLoadingClientConfig) ClientConfig() (*restConfig, error) { + mergedClientConfig, err := config.createClientConfig() + if err != nil { + return nil, err + } + mergedConfig, err := mergedClientConfig.ClientConfig() + if err != nil { + return nil, err + } + // REMOVED: In-cluster service account configuration use. + + return mergedConfig, nil +} + +var ( + // DefaultCluster is the cluster config used when no other config is specified + // TODO: eventually apiserver should start on 443 and be secure by default + defaultCluster = clientcmdCluster{Server: "http://localhost:8080"} + + // EnvVarCluster allows overriding the DefaultCluster using an envvar for the server name + envVarCluster = clientcmdCluster{Server: os.Getenv("KUBERNETES_MASTER")} +) + +// directClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig. +// DirectClientConfig is a ClientConfig interface that is backed by a clientcmdapi.Config, options overrides, and an optional fallbackReader for auth information +type directClientConfig struct { + config clientcmdConfig +} + +// newNonInteractiveClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.NewNonInteractiveClientConfig. +// NewNonInteractiveClientConfig creates a DirectClientConfig using the passed context name and does not have a fallback reader for auth information +func newNonInteractiveClientConfig(config clientcmdConfig) clientConfig { + return &directClientConfig{config} +} + +// ClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.ClientConfig. +// ClientConfig implements ClientConfig +func (config *directClientConfig) ClientConfig() (*restConfig, error) { + if err := config.ConfirmUsable(); err != nil { + return nil, err + } + + configAuthInfo := config.getAuthInfo() + configClusterInfo := config.getCluster() + + clientConfig := &restConfig{} + clientConfig.Host = configClusterInfo.Server + if u, err := url.ParseRequestURI(clientConfig.Host); err == nil && u.Opaque == "" && len(u.Path) > 1 { + u.RawQuery = "" + u.Fragment = "" + clientConfig.Host = u.String() + } + + // only try to read the auth information if we are secure + if isConfigTransportTLS(*clientConfig) { + var err error + // REMOVED: Support for interactive fallback. + userAuthPartialConfig, err := getUserIdentificationPartialConfig(configAuthInfo) + if err != nil { + return nil, err + } + if err = mergo.MergeWithOverwrite(clientConfig, userAuthPartialConfig); err != nil { + return nil, err + } + + serverAuthPartialConfig, err := getServerIdentificationPartialConfig(configAuthInfo, configClusterInfo) + if err != nil { + return nil, err + } + if err = mergo.MergeWithOverwrite(clientConfig, serverAuthPartialConfig); err != nil { + return nil, err + } + } + + return clientConfig, nil +} + +// getServerIdentificationPartialConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.getServerIdentificationPartialConfig. +// clientauth.Info object contain both user identification and server identification. We want different precedence orders for +// both, so we have to split the objects and merge them separately +// we want this order of precedence for the server identification +// 1. configClusterInfo (the final result of command line flags and merged .kubeconfig files) +// 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority) +// 3. load the ~/.kubernetes_auth file as a default +func getServerIdentificationPartialConfig(configAuthInfo clientcmdAuthInfo, configClusterInfo clientcmdCluster) (*restConfig, error) { + mergedConfig := &restConfig{} + + // configClusterInfo holds the information identify the server provided by .kubeconfig + configClientConfig := &restConfig{} + configClientConfig.TLSClientConfig.CAFile = configClusterInfo.CertificateAuthority + configClientConfig.TLSClientConfig.CAData = configClusterInfo.CertificateAuthorityData + configClientConfig.Insecure = configClusterInfo.InsecureSkipTLSVerify + if err := mergo.MergeWithOverwrite(mergedConfig, configClientConfig); err != nil { + return nil, err + } + + return mergedConfig, nil +} + +// getUserIdentificationPartialConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.getUserIdentificationPartialConfig. +// clientauth.Info object contain both user identification and server identification. We want different precedence orders for +// both, so we have to split the objects and merge them separately +// we want this order of precedence for user identification +// 1. configAuthInfo minus auth-path (the final result of command line flags and merged .kubeconfig files) +// 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority) +// 3. if there is not enough information to identify the user, load try the ~/.kubernetes_auth file +// 4. if there is not enough information to identify the user, prompt if possible +func getUserIdentificationPartialConfig(configAuthInfo clientcmdAuthInfo) (*restConfig, error) { + mergedConfig := &restConfig{} + + // blindly overwrite existing values based on precedence + if len(configAuthInfo.Token) > 0 { + mergedConfig.BearerToken = configAuthInfo.Token + } + if len(configAuthInfo.ClientCertificate) > 0 || len(configAuthInfo.ClientCertificateData) > 0 { + mergedConfig.TLSClientConfig.CertFile = configAuthInfo.ClientCertificate + mergedConfig.TLSClientConfig.CertData = configAuthInfo.ClientCertificateData + mergedConfig.TLSClientConfig.KeyFile = configAuthInfo.ClientKey + mergedConfig.TLSClientConfig.KeyData = configAuthInfo.ClientKeyData + } + if len(configAuthInfo.Username) > 0 || len(configAuthInfo.Password) > 0 { + mergedConfig.Username = configAuthInfo.Username + mergedConfig.Password = configAuthInfo.Password + } + + // REMOVED: prompting for missing information. + return mergedConfig, nil +} + +// ConfirmUsable is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.ConfirmUsable. +// ConfirmUsable looks a particular context and determines if that particular part of the config is usable. There might still be errors in the config, +// but no errors in the sections requested or referenced. It does not return early so that it can find as many errors as possible. +func (config *directClientConfig) ConfirmUsable() error { + var validationErrors []error + validationErrors = append(validationErrors, validateAuthInfo(config.getAuthInfoName(), config.getAuthInfo())...) + validationErrors = append(validationErrors, validateClusterInfo(config.getClusterName(), config.getCluster())...) + // when direct client config is specified, and our only error is that no server is defined, we should + // return a standard "no config" error + if len(validationErrors) == 1 && validationErrors[0] == errEmptyCluster { + return newErrConfigurationInvalid([]error{errEmptyConfig}) + } + return newErrConfigurationInvalid(validationErrors) +} + +// getContextName is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getContextName. +func (config *directClientConfig) getContextName() string { + // REMOVED: overrides support + return config.config.CurrentContext +} + +// getAuthInfoName is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getAuthInfoName. +func (config *directClientConfig) getAuthInfoName() string { + // REMOVED: overrides support + return config.getContext().AuthInfo +} + +// getClusterName is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getClusterName. +func (config *directClientConfig) getClusterName() string { + // REMOVED: overrides support + return config.getContext().Cluster +} + +// getContext is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getContext. +func (config *directClientConfig) getContext() clientcmdContext { + contexts := config.config.Contexts + contextName := config.getContextName() + + var mergedContext clientcmdContext + if configContext, exists := contexts[contextName]; exists { + if err := mergo.MergeWithOverwrite(&mergedContext, configContext); err != nil { + logrus.Debugf("Can't merge configContext: %v", err) + } + } + // REMOVED: overrides support + + return mergedContext +} + +var ( + errEmptyConfig = errors.New("no configuration has been provided") + // message is for consistency with old behavior + errEmptyCluster = errors.New("cluster has no server defined") +) + +//helper for checking certificate/key/CA +func validateFileIsReadable(name string) error { + answer, err := os.Open(name) + defer func() { + if err := answer.Close(); err != nil { + logrus.Debugf("Error closing %v: %v", name, err) + } + }() + return err +} + +// validateClusterInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.validateClusterInfo. +// validateClusterInfo looks for conflicts and errors in the cluster info +func validateClusterInfo(clusterName string, clusterInfo clientcmdCluster) []error { + var validationErrors []error + + if reflect.DeepEqual(clientcmdCluster{}, clusterInfo) { + return []error{errEmptyCluster} + } + + if len(clusterInfo.Server) == 0 { + if len(clusterName) == 0 { + validationErrors = append(validationErrors, errors.Errorf("default cluster has no server defined")) + } else { + validationErrors = append(validationErrors, errors.Errorf("no server found for cluster %q", clusterName)) + } + } + // Make sure CA data and CA file aren't both specified + if len(clusterInfo.CertificateAuthority) != 0 && len(clusterInfo.CertificateAuthorityData) != 0 { + validationErrors = append(validationErrors, errors.Errorf("certificate-authority-data and certificate-authority are both specified for %v. certificate-authority-data will override", clusterName)) + } + if len(clusterInfo.CertificateAuthority) != 0 { + err := validateFileIsReadable(clusterInfo.CertificateAuthority) + if err != nil { + validationErrors = append(validationErrors, errors.Errorf("unable to read certificate-authority %v for %v due to %v", clusterInfo.CertificateAuthority, clusterName, err)) + } + } + + return validationErrors +} + +// validateAuthInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.validateAuthInfo. +// validateAuthInfo looks for conflicts and errors in the auth info +func validateAuthInfo(authInfoName string, authInfo clientcmdAuthInfo) []error { + var validationErrors []error + + usingAuthPath := false + methods := make([]string, 0, 3) + if len(authInfo.Token) != 0 { + methods = append(methods, "token") + } + if len(authInfo.Username) != 0 || len(authInfo.Password) != 0 { + methods = append(methods, "basicAuth") + } + + if len(authInfo.ClientCertificate) != 0 || len(authInfo.ClientCertificateData) != 0 { + // Make sure cert data and file aren't both specified + if len(authInfo.ClientCertificate) != 0 && len(authInfo.ClientCertificateData) != 0 { + validationErrors = append(validationErrors, errors.Errorf("client-cert-data and client-cert are both specified for %v. client-cert-data will override", authInfoName)) + } + // Make sure key data and file aren't both specified + if len(authInfo.ClientKey) != 0 && len(authInfo.ClientKeyData) != 0 { + validationErrors = append(validationErrors, errors.Errorf("client-key-data and client-key are both specified for %v; client-key-data will override", authInfoName)) + } + // Make sure a key is specified + if len(authInfo.ClientKey) == 0 && len(authInfo.ClientKeyData) == 0 { + validationErrors = append(validationErrors, errors.Errorf("client-key-data or client-key must be specified for %v to use the clientCert authentication method", authInfoName)) + } + + if len(authInfo.ClientCertificate) != 0 { + err := validateFileIsReadable(authInfo.ClientCertificate) + if err != nil { + validationErrors = append(validationErrors, errors.Errorf("unable to read client-cert %v for %v due to %v", authInfo.ClientCertificate, authInfoName, err)) + } + } + if len(authInfo.ClientKey) != 0 { + err := validateFileIsReadable(authInfo.ClientKey) + if err != nil { + validationErrors = append(validationErrors, errors.Errorf("unable to read client-key %v for %v due to %v", authInfo.ClientKey, authInfoName, err)) + } + } + } + + // authPath also provides information for the client to identify the server, so allow multiple auth methods in that case + if (len(methods) > 1) && (!usingAuthPath) { + validationErrors = append(validationErrors, errors.Errorf("more than one authentication method found for %v; found %v, only one is allowed", authInfoName, methods)) + } + + return validationErrors +} + +// getAuthInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getAuthInfo. +func (config *directClientConfig) getAuthInfo() clientcmdAuthInfo { + authInfos := config.config.AuthInfos + authInfoName := config.getAuthInfoName() + + var mergedAuthInfo clientcmdAuthInfo + if configAuthInfo, exists := authInfos[authInfoName]; exists { + if err := mergo.MergeWithOverwrite(&mergedAuthInfo, configAuthInfo); err != nil { + logrus.Debugf("Can't merge configAuthInfo: %v", err) + } + } + // REMOVED: overrides support + + return mergedAuthInfo +} + +// getCluster is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getCluster. +func (config *directClientConfig) getCluster() clientcmdCluster { + clusterInfos := config.config.Clusters + clusterInfoName := config.getClusterName() + + var mergedClusterInfo clientcmdCluster + if err := mergo.MergeWithOverwrite(&mergedClusterInfo, defaultCluster); err != nil { + logrus.Debugf("Can't merge defaultCluster: %v", err) + } + if err := mergo.MergeWithOverwrite(&mergedClusterInfo, envVarCluster); err != nil { + logrus.Debugf("Can't merge envVarCluster: %v", err) + } + if configClusterInfo, exists := clusterInfos[clusterInfoName]; exists { + if err := mergo.MergeWithOverwrite(&mergedClusterInfo, configClusterInfo); err != nil { + logrus.Debugf("Can't merge configClusterInfo: %v", err) + } + } + // REMOVED: overrides support + + return mergedClusterInfo +} + +// aggregateErr is a modified copy of k8s.io/apimachinery/pkg/util/errors.aggregate. +// This helper implements the error and Errors interfaces. Keeping it private +// prevents people from making an aggregate of 0 errors, which is not +// an error, but does satisfy the error interface. +type aggregateErr []error + +// newAggregate is a modified copy of k8s.io/apimachinery/pkg/util/errors.NewAggregate. +// NewAggregate converts a slice of errors into an Aggregate interface, which +// is itself an implementation of the error interface. If the slice is empty, +// this returns nil. +// It will check if any of the element of input error list is nil, to avoid +// nil pointer panic when call Error(). +func newAggregate(errlist []error) error { + if len(errlist) == 0 { + return nil + } + // In case of input error list contains nil + var errs []error + for _, e := range errlist { + if e != nil { + errs = append(errs, e) + } + } + if len(errs) == 0 { + return nil + } + return aggregateErr(errs) +} + +// Error is a modified copy of k8s.io/apimachinery/pkg/util/errors.aggregate.Error. +// Error is part of the error interface. +func (agg aggregateErr) Error() string { + if len(agg) == 0 { + // This should never happen, really. + return "" + } + if len(agg) == 1 { + return agg[0].Error() + } + result := fmt.Sprintf("[%s", agg[0].Error()) + for i := 1; i < len(agg); i++ { + result += fmt.Sprintf(", %s", agg[i].Error()) + } + result += "]" + return result +} + +// REMOVED: aggregateErr.Errors + +// errConfigurationInvalid is a modified? copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.errConfigurationInvalid. +// errConfigurationInvalid is a set of errors indicating the configuration is invalid. +type errConfigurationInvalid []error + +var _ error = errConfigurationInvalid{} + +// REMOVED: utilerrors.Aggregate implementation for errConfigurationInvalid. + +// newErrConfigurationInvalid is a modified? copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.newErrConfigurationInvalid. +func newErrConfigurationInvalid(errs []error) error { + switch len(errs) { + case 0: + return nil + default: + return errConfigurationInvalid(errs) + } +} + +// Error implements the error interface +func (e errConfigurationInvalid) Error() string { + return fmt.Sprintf("invalid configuration: %v", newAggregate(e).Error()) +} + +// clientConfigLoadingRules is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules +// ClientConfigLoadingRules is an ExplicitPath and string slice of specific locations that are used for merging together a Config +// Callers can put the chain together however they want, but we'd recommend: +// EnvVarPathFiles if set (a list of files if set) OR the HomeDirectoryPath +// ExplicitPath is special, because if a user specifically requests a certain file be used and error is reported if this file is not present +type clientConfigLoadingRules struct { + Precedence []string +} + +// Load is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules.Load +// Load starts by running the MigrationRules and then +// takes the loading rules and returns a Config object based on following rules. +// if the ExplicitPath, return the unmerged explicit file +// Otherwise, return a merged config based on the Precedence slice +// A missing ExplicitPath file produces an error. Empty filenames or other missing files are ignored. +// Read errors or files with non-deserializable content produce errors. +// The first file to set a particular map key wins and map key's value is never changed. +// BUT, if you set a struct value that is NOT contained inside of map, the value WILL be changed. +// This results in some odd looking logic to merge in one direction, merge in the other, and then merge the two. +// It also means that if two files specify a "red-user", only values from the first file's red-user are used. Even +// non-conflicting entries from the second file's "red-user" are discarded. +// Relative paths inside of the .kubeconfig files are resolved against the .kubeconfig file's parent folder +// and only absolute file paths are returned. +func (rules *clientConfigLoadingRules) Load() (*clientcmdConfig, error) { + errlist := []error{} + + kubeConfigFiles := []string{} + + // REMOVED: explicit path support + kubeConfigFiles = append(kubeConfigFiles, rules.Precedence...) + + kubeconfigs := []*clientcmdConfig{} + // read and cache the config files so that we only look at them once + for _, filename := range kubeConfigFiles { + if len(filename) == 0 { + // no work to do + continue + } + + config, err := loadFromFile(filename) + if os.IsNotExist(err) { + // skip missing files + continue + } + if err != nil { + errlist = append(errlist, errors.Wrapf(err, "loading config file \"%s\"", filename)) + continue + } + + kubeconfigs = append(kubeconfigs, config) + } + + // first merge all of our maps + mapConfig := clientcmdNewConfig() + for _, kubeconfig := range kubeconfigs { + if err := mergo.MergeWithOverwrite(mapConfig, kubeconfig); err != nil { + return nil, err + } + } + + // merge all of the struct values in the reverse order so that priority is given correctly + // errors are not added to the list the second time + nonMapConfig := clientcmdNewConfig() + for i := len(kubeconfigs) - 1; i >= 0; i-- { + kubeconfig := kubeconfigs[i] + if err := mergo.MergeWithOverwrite(nonMapConfig, kubeconfig); err != nil { + return nil, err + } + } + + // since values are overwritten, but maps values are not, we can merge the non-map config on top of the map config and + // get the values we expect. + config := clientcmdNewConfig() + if err := mergo.MergeWithOverwrite(config, mapConfig); err != nil { + return nil, err + } + if err := mergo.MergeWithOverwrite(config, nonMapConfig); err != nil { + return nil, err + } + + // REMOVED: Possibility to skip this. + if err := resolveLocalPaths(config); err != nil { + errlist = append(errlist, err) + } + + return config, newAggregate(errlist) +} + +// loadFromFile is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.LoadFromFile +// LoadFromFile takes a filename and deserializes the contents into Config object +func loadFromFile(filename string) (*clientcmdConfig, error) { + kubeconfigBytes, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + config, err := load(kubeconfigBytes) + if err != nil { + return nil, err + } + + // set LocationOfOrigin on every Cluster, User, and Context + for key, obj := range config.AuthInfos { + obj.LocationOfOrigin = filename + config.AuthInfos[key] = obj + } + for key, obj := range config.Clusters { + obj.LocationOfOrigin = filename + config.Clusters[key] = obj + } + for key, obj := range config.Contexts { + obj.LocationOfOrigin = filename + config.Contexts[key] = obj + } + + if config.AuthInfos == nil { + config.AuthInfos = map[string]*clientcmdAuthInfo{} + } + if config.Clusters == nil { + config.Clusters = map[string]*clientcmdCluster{} + } + if config.Contexts == nil { + config.Contexts = map[string]*clientcmdContext{} + } + + return config, nil +} + +// load is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.Load +// Load takes a byte slice and deserializes the contents into Config object. +// Encapsulates deserialization without assuming the source is a file. +func load(data []byte) (*clientcmdConfig, error) { + config := clientcmdNewConfig() + // if there's no data in a file, return the default object instead of failing (DecodeInto reject empty input) + if len(data) == 0 { + return config, nil + } + // Note: This does absolutely no kind/version checking or conversions. + data, err := yaml.YAMLToJSON(data) + if err != nil { + return nil, err + } + if err := json.Unmarshal(data, config); err != nil { + return nil, err + } + return config, nil +} + +// resolveLocalPaths is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules.resolveLocalPaths. +// ResolveLocalPaths resolves all relative paths in the config object with respect to the stanza's LocationOfOrigin +// this cannot be done directly inside of LoadFromFile because doing so there would make it impossible to load a file without +// modification of its contents. +func resolveLocalPaths(config *clientcmdConfig) error { + for _, cluster := range config.Clusters { + if len(cluster.LocationOfOrigin) == 0 { + continue + } + base, err := filepath.Abs(filepath.Dir(cluster.LocationOfOrigin)) + if err != nil { + return errors.Wrapf(err, "Could not determine the absolute path of config file %s", cluster.LocationOfOrigin) + } + + if err := resolvePaths(getClusterFileReferences(cluster), base); err != nil { + return err + } + } + for _, authInfo := range config.AuthInfos { + if len(authInfo.LocationOfOrigin) == 0 { + continue + } + base, err := filepath.Abs(filepath.Dir(authInfo.LocationOfOrigin)) + if err != nil { + return errors.Wrapf(err, "Could not determine the absolute path of config file %s", authInfo.LocationOfOrigin) + } + + if err := resolvePaths(getAuthInfoFileReferences(authInfo), base); err != nil { + return err + } + } + + return nil +} + +// getClusterFileReferences is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules.GetClusterFileReferences. +func getClusterFileReferences(cluster *clientcmdCluster) []*string { + return []*string{&cluster.CertificateAuthority} +} + +// getAuthInfoFileReferences is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules.GetAuthInfoFileReferences. +func getAuthInfoFileReferences(authInfo *clientcmdAuthInfo) []*string { + return []*string{&authInfo.ClientCertificate, &authInfo.ClientKey} +} + +// resolvePaths is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules.resolvePaths. +// ResolvePaths updates the given refs to be absolute paths, relative to the given base directory +func resolvePaths(refs []*string, base string) error { + for _, ref := range refs { + // Don't resolve empty paths + if len(*ref) > 0 { + // Don't resolve absolute paths + if !filepath.IsAbs(*ref) { + *ref = filepath.Join(base, *ref) + } + } + } + return nil +} + +// restClientFor is a modified copy of k8s.io/kubernetes/pkg/client/restclient.RESTClientFor. +// RESTClientFor returns a RESTClient that satisfies the requested attributes on a client Config +// object. Note that a RESTClient may require fields that are optional when initializing a Client. +// A RESTClient created by this method is generic - it expects to operate on an API that follows +// the Kubernetes conventions, but may not be the Kubernetes API. +func restClientFor(config *restConfig) (*url.URL, *http.Client, error) { + // REMOVED: Configurable GroupVersion, Codec + // REMOVED: Configurable versionedAPIPath + baseURL, err := defaultServerURLFor(config) + if err != nil { + return nil, nil, err + } + + transport, err := transportFor(config) + if err != nil { + return nil, nil, err + } + + var httpClient *http.Client + if transport != http.DefaultTransport { + httpClient = &http.Client{Transport: transport} + } + + // REMOVED: Configurable QPS, Burst, ContentConfig + // REMOVED: Actually returning a RESTClient object. + return baseURL, httpClient, nil +} + +// defaultServerURL is a modified copy of k8s.io/kubernetes/pkg/client/restclient.DefaultServerURL. +// DefaultServerURL converts a host, host:port, or URL string to the default base server API path +// to use with a Client at a given API version following the standard conventions for a +// Kubernetes API. +func defaultServerURL(host string, defaultTLS bool) (*url.URL, error) { + if host == "" { + return nil, errors.Errorf("host must be a URL or a host:port pair") + } + base := host + hostURL, err := url.Parse(base) + if err != nil { + return nil, err + } + if hostURL.Scheme == "" { + scheme := "http://" + if defaultTLS { + scheme = "https://" + } + hostURL, err = url.Parse(scheme + base) + if err != nil { + return nil, err + } + if hostURL.Path != "" && hostURL.Path != "/" { + return nil, errors.Errorf("host must be a URL or a host:port pair: %q", base) + } + } + + // REMOVED: versionedAPIPath computation. + return hostURL, nil +} + +// defaultServerURLFor is a modified copy of k8s.io/kubernetes/pkg/client/restclient.defaultServerURLFor. +// defaultServerUrlFor is shared between IsConfigTransportTLS and RESTClientFor. It +// requires Host and Version to be set prior to being called. +func defaultServerURLFor(config *restConfig) (*url.URL, error) { + // TODO: move the default to secure when the apiserver supports TLS by default + // config.Insecure is taken to mean "I want HTTPS but don't bother checking the certs against a CA." + hasCA := len(config.TLSClientConfig.CAFile) != 0 || len(config.TLSClientConfig.CAData) != 0 + hasCert := len(config.TLSClientConfig.CertFile) != 0 || len(config.TLSClientConfig.CertData) != 0 + defaultTLS := hasCA || hasCert || config.Insecure + host := config.Host + if host == "" { + host = "localhost" + } + + // REMOVED: Configurable APIPath, GroupVersion + return defaultServerURL(host, defaultTLS) +} + +// transportFor is a modified copy of k8s.io/kubernetes/pkg/client/restclient.transportFor. +// TransportFor returns an http.RoundTripper that will provide the authentication +// or transport level security defined by the provided Config. Will return the +// default http.DefaultTransport if no special case behavior is needed. +func transportFor(config *restConfig) (http.RoundTripper, error) { + // REMOVED: separation between restclient.Config and transport.Config, Transport, WrapTransport support + return transportNew(config) +} + +// isConfigTransportTLS is a modified copy of k8s.io/kubernetes/pkg/client/restclient.IsConfigTransportTLS. +// IsConfigTransportTLS returns true if and only if the provided +// config will result in a protected connection to the server when it +// is passed to restclient.RESTClientFor(). Use to determine when to +// send credentials over the wire. +// +// Note: the Insecure flag is ignored when testing for this value, so MITM attacks are +// still possible. +func isConfigTransportTLS(config restConfig) bool { + baseURL, err := defaultServerURLFor(&config) + if err != nil { + return false + } + return baseURL.Scheme == "https" +} + +// transportNew is a modified copy of k8s.io/kubernetes/pkg/client/transport.New. +// New returns an http.RoundTripper that will provide the authentication +// or transport level security defined by the provided Config. +func transportNew(config *restConfig) (http.RoundTripper, error) { + // REMOVED: custom config.Transport support. + // Set transport level security + + var ( + rt http.RoundTripper + err error + ) + + rt, err = tlsCacheGet(config) + if err != nil { + return nil, err + } + + // REMOVED: HTTPWrappersForConfig(config, rt) in favor of the caller setting HTTP headers itself based on restConfig. Only this inlined check remains. + if len(config.Username) != 0 && len(config.BearerToken) != 0 { + return nil, errors.Errorf("username/password or bearer token may be set, but not both") + } + + return rt, nil +} + +// newProxierWithNoProxyCIDR is a modified copy of k8s.io/apimachinery/pkg/util/net.NewProxierWithNoProxyCIDR. +// NewProxierWithNoProxyCIDR constructs a Proxier function that respects CIDRs in NO_PROXY and delegates if +// no matching CIDRs are found +func newProxierWithNoProxyCIDR(delegate func(req *http.Request) (*url.URL, error)) func(req *http.Request) (*url.URL, error) { + // we wrap the default method, so we only need to perform our check if the NO_PROXY envvar has a CIDR in it + noProxyEnv := os.Getenv("NO_PROXY") + noProxyRules := strings.Split(noProxyEnv, ",") + + cidrs := []*net.IPNet{} + for _, noProxyRule := range noProxyRules { + _, cidr, _ := net.ParseCIDR(noProxyRule) + if cidr != nil { + cidrs = append(cidrs, cidr) + } + } + + if len(cidrs) == 0 { + return delegate + } + + return func(req *http.Request) (*url.URL, error) { + host := req.URL.Host + // for some urls, the Host is already the host, not the host:port + if net.ParseIP(host) == nil { + var err error + host, _, err = net.SplitHostPort(req.URL.Host) + if err != nil { + return delegate(req) + } + } + + ip := net.ParseIP(host) + if ip == nil { + return delegate(req) + } + + for _, cidr := range cidrs { + if cidr.Contains(ip) { + return nil, nil + } + } + + return delegate(req) + } +} + +// tlsCacheGet is a modified copy of k8s.io/kubernetes/pkg/client/transport.tlsTransportCache.get. +func tlsCacheGet(config *restConfig) (http.RoundTripper, error) { + // REMOVED: any actual caching + + // Get the TLS options for this client config + tlsConfig, err := tlsConfigFor(config) + if err != nil { + return nil, err + } + // The options didn't require a custom TLS config + if tlsConfig == nil { + return http.DefaultTransport, nil + } + + // REMOVED: Call to k8s.io/apimachinery/pkg/util/net.SetTransportDefaults; instead of the generic machinery and conditionals, hard-coded the result here. + t := &http.Transport{ + // http.ProxyFromEnvironment doesn't respect CIDRs and that makes it impossible to exclude things like pod and service IPs from proxy settings + // ProxierWithNoProxyCIDR allows CIDR rules in NO_PROXY + Proxy: newProxierWithNoProxyCIDR(http.ProxyFromEnvironment), + TLSHandshakeTimeout: 10 * time.Second, + TLSClientConfig: tlsConfig, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + } + // Allow clients to disable http2 if needed. + if s := os.Getenv("DISABLE_HTTP2"); len(s) == 0 { + _ = http2.ConfigureTransport(t) + } + return t, nil +} + +// tlsConfigFor is a modified copy of k8s.io/kubernetes/pkg/client/transport.TLSConfigFor. +// TLSConfigFor returns a tls.Config that will provide the transport level security defined +// by the provided Config. Will return nil if no transport level security is requested. +func tlsConfigFor(c *restConfig) (*tls.Config, error) { + if !(c.HasCA() || c.HasCertAuth() || c.Insecure) { + return nil, nil + } + if c.HasCA() && c.Insecure { + return nil, errors.Errorf("specifying a root certificates file with the insecure flag is not allowed") + } + if err := loadTLSFiles(c); err != nil { + return nil, err + } + + tlsConfig := &tls.Config{ + // Change default from SSLv3 to TLSv1.0 (because of POODLE vulnerability) + MinVersion: tls.VersionTLS10, + InsecureSkipVerify: c.Insecure, + } + + if c.HasCA() { + tlsConfig.RootCAs = rootCertPool(c.TLSClientConfig.CAData) + } + + if c.HasCertAuth() { + cert, err := tls.X509KeyPair(c.TLSClientConfig.CertData, c.TLSClientConfig.KeyData) + if err != nil { + return nil, err + } + tlsConfig.Certificates = []tls.Certificate{cert} + } + + return tlsConfig, nil +} + +// loadTLSFiles is a modified copy of k8s.io/kubernetes/pkg/client/transport.loadTLSFiles. +// loadTLSFiles copies the data from the CertFile, KeyFile, and CAFile fields into the CertData, +// KeyData, and CAFile fields, or returns an error. If no error is returned, all three fields are +// either populated or were empty to start. +func loadTLSFiles(c *restConfig) error { + var err error + c.TLSClientConfig.CAData, err = dataFromSliceOrFile(c.TLSClientConfig.CAData, c.TLSClientConfig.CAFile) + if err != nil { + return err + } + + c.TLSClientConfig.CertData, err = dataFromSliceOrFile(c.TLSClientConfig.CertData, c.TLSClientConfig.CertFile) + if err != nil { + return err + } + + c.TLSClientConfig.KeyData, err = dataFromSliceOrFile(c.TLSClientConfig.KeyData, c.TLSClientConfig.KeyFile) + if err != nil { + return err + } + return nil +} + +// dataFromSliceOrFile is a modified copy of k8s.io/kubernetes/pkg/client/transport.dataFromSliceOrFile. +// dataFromSliceOrFile returns data from the slice (if non-empty), or from the file, +// or an error if an error occurred reading the file +func dataFromSliceOrFile(data []byte, file string) ([]byte, error) { + if len(data) > 0 { + return data, nil + } + if len(file) > 0 { + fileData, err := ioutil.ReadFile(file) + if err != nil { + return []byte{}, err + } + return fileData, nil + } + return nil, nil +} + +// rootCertPool is a modified copy of k8s.io/kubernetes/pkg/client/transport.rootCertPool. +// rootCertPool returns nil if caData is empty. When passed along, this will mean "use system CAs". +// When caData is not empty, it will be the ONLY information used in the CertPool. +func rootCertPool(caData []byte) *x509.CertPool { + // What we really want is a copy of x509.systemRootsPool, but that isn't exposed. It's difficult to build (see the go + // code for a look at the platform specific insanity), so we'll use the fact that RootCAs == nil gives us the system values + // It doesn't allow trusting either/or, but hopefully that won't be an issue + if len(caData) == 0 { + return nil + } + + // if we have caData, use it + certPool := x509.NewCertPool() + certPool.AppendCertsFromPEM(caData) + return certPool +} + +// HasCA is a modified copy of k8s.io/kubernetes/pkg/client/transport.Config.HasCA. +// HasCA returns whether the configuration has a certificate authority or not. +func (c *restConfig) HasCA() bool { + return len(c.TLSClientConfig.CAData) > 0 || len(c.TLSClientConfig.CAFile) > 0 +} + +// HasCertAuth is a modified copy of k8s.io/kubernetes/pkg/client/transport.Config.HasCertAuth. +// HasCertAuth returns whether the configuration has certificate authentication or not. +func (c *restConfig) HasCertAuth() bool { + return len(c.TLSClientConfig.CertData) != 0 || len(c.TLSClientConfig.CertFile) != 0 +} + +// clientcmdConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.Config. +// Config holds the information needed to build connect to remote kubernetes clusters as a given user +// IMPORTANT if you add fields to this struct, please update IsConfigEmpty() +type clientcmdConfig struct { + // Clusters is a map of referenceable names to cluster configs + Clusters clustersMap `json:"clusters"` + // AuthInfos is a map of referenceable names to user configs + AuthInfos authInfosMap `json:"users"` + // Contexts is a map of referenceable names to context configs + Contexts contextsMap `json:"contexts"` + // CurrentContext is the name of the context that you would like to use by default + CurrentContext string `json:"current-context"` +} + +type clustersMap map[string]*clientcmdCluster + +func (m *clustersMap) UnmarshalJSON(data []byte) error { + var a []v1NamedCluster + if err := json.Unmarshal(data, &a); err != nil { + return err + } + for _, e := range a { + cluster := e.Cluster // Allocates a new instance in each iteration + (*m)[e.Name] = &cluster + } + return nil +} + +type authInfosMap map[string]*clientcmdAuthInfo + +func (m *authInfosMap) UnmarshalJSON(data []byte) error { + var a []v1NamedAuthInfo + if err := json.Unmarshal(data, &a); err != nil { + return err + } + for _, e := range a { + authInfo := e.AuthInfo // Allocates a new instance in each iteration + (*m)[e.Name] = &authInfo + } + return nil +} + +type contextsMap map[string]*clientcmdContext + +func (m *contextsMap) UnmarshalJSON(data []byte) error { + var a []v1NamedContext + if err := json.Unmarshal(data, &a); err != nil { + return err + } + for _, e := range a { + context := e.Context // Allocates a new instance in each iteration + (*m)[e.Name] = &context + } + return nil +} + +// clientcmdNewConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.NewConfig. +// NewConfig is a convenience function that returns a new Config object with non-nil maps +func clientcmdNewConfig() *clientcmdConfig { + return &clientcmdConfig{ + Clusters: make(map[string]*clientcmdCluster), + AuthInfos: make(map[string]*clientcmdAuthInfo), + Contexts: make(map[string]*clientcmdContext), + } +} + +// clientcmdCluster is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.Cluster. +// Cluster contains information about how to communicate with a kubernetes cluster +type clientcmdCluster struct { + // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized. + LocationOfOrigin string + // Server is the address of the kubernetes cluster (https://hostname:port). + Server string `json:"server"` + // InsecureSkipTLSVerify skips the validity check for the server's certificate. This will make your HTTPS connections insecure. + InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify,omitempty"` + // CertificateAuthority is the path to a cert file for the certificate authority. + CertificateAuthority string `json:"certificate-authority,omitempty"` + // CertificateAuthorityData contains PEM-encoded certificate authority certificates. Overrides CertificateAuthority + CertificateAuthorityData []byte `json:"certificate-authority-data,omitempty"` +} + +// clientcmdAuthInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.AuthInfo. +// AuthInfo contains information that describes identity information. This is use to tell the kubernetes cluster who you are. +type clientcmdAuthInfo struct { + // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized. + LocationOfOrigin string + // ClientCertificate is the path to a client cert file for TLS. + ClientCertificate string `json:"client-certificate,omitempty"` + // ClientCertificateData contains PEM-encoded data from a client cert file for TLS. Overrides ClientCertificate + ClientCertificateData []byte `json:"client-certificate-data,omitempty"` + // ClientKey is the path to a client key file for TLS. + ClientKey string `json:"client-key,omitempty"` + // ClientKeyData contains PEM-encoded data from a client key file for TLS. Overrides ClientKey + ClientKeyData []byte `json:"client-key-data,omitempty"` + // Token is the bearer token for authentication to the kubernetes cluster. + Token string `json:"token,omitempty"` + // Username is the username for basic authentication to the kubernetes cluster. + Username string `json:"username,omitempty"` + // Password is the password for basic authentication to the kubernetes cluster. + Password string `json:"password,omitempty"` +} + +// clientcmdContext is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.Context. +// Context is a tuple of references to a cluster (how do I communicate with a kubernetes cluster), a user (how do I identify myself), and a namespace (what subset of resources do I want to work with) +type clientcmdContext struct { + // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized. + LocationOfOrigin string + // Cluster is the name of the cluster for this context + Cluster string `json:"cluster"` + // AuthInfo is the name of the authInfo for this context + AuthInfo string `json:"user"` + // Namespace is the default namespace to use on unspecified requests + Namespace string `json:"namespace,omitempty"` +} + +// v1NamedCluster is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.v1.NamedCluster. +// NamedCluster relates nicknames to cluster information +type v1NamedCluster struct { + // Name is the nickname for this Cluster + Name string `json:"name"` + // Cluster holds the cluster information + Cluster clientcmdCluster `json:"cluster"` +} + +// v1NamedContext is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.v1.NamedContext. +// NamedContext relates nicknames to context information +type v1NamedContext struct { + // Name is the nickname for this Context + Name string `json:"name"` + // Context holds the context information + Context clientcmdContext `json:"context"` +} + +// v1NamedAuthInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.v1.NamedAuthInfo. +// NamedAuthInfo relates nicknames to auth information +type v1NamedAuthInfo struct { + // Name is the nickname for this AuthInfo + Name string `json:"name"` + // AuthInfo holds the auth information + AuthInfo clientcmdAuthInfo `json:"user"` +} diff --git a/vendor/github.com/containers/image/v5/openshift/openshift.go b/vendor/github.com/containers/image/v5/openshift/openshift.go new file mode 100644 index 00000000000..c7c6cf6945a --- /dev/null +++ b/vendor/github.com/containers/image/v5/openshift/openshift.go @@ -0,0 +1,584 @@ +package openshift + +import ( + "bytes" + "context" + "crypto/rand" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "strings" + + "github.com/containers/image/v5/docker" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/iolimits" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" + "github.com/containers/image/v5/version" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// openshiftClient is configuration for dealing with a single image stream, for reading or writing. +type openshiftClient struct { + ref openshiftReference + baseURL *url.URL + // Values from Kubernetes configuration + httpClient *http.Client + bearerToken string // "" if not used + username string // "" if not used + password string // if username != "" +} + +// newOpenshiftClient creates a new openshiftClient for the specified reference. +func newOpenshiftClient(ref openshiftReference) (*openshiftClient, error) { + // We have already done this parsing in ParseReference, but thrown away + // httpClient. So, parse again. + // (We could also rework/split restClientFor to "get base URL" to be done + // in ParseReference, and "get httpClient" to be done here. But until/unless + // we support non-default clusters, this is good enough.) + + // Overall, this is modelled on openshift/origin/pkg/cmd/util/clientcmd.New().ClientConfig() and openshift/origin/pkg/client. + cmdConfig := defaultClientConfig() + logrus.Debugf("cmdConfig: %#v", cmdConfig) + restConfig, err := cmdConfig.ClientConfig() + if err != nil { + return nil, err + } + // REMOVED: SetOpenShiftDefaults (values are not overridable in config files, so hard-coded these defaults.) + logrus.Debugf("restConfig: %#v", restConfig) + baseURL, httpClient, err := restClientFor(restConfig) + if err != nil { + return nil, err + } + logrus.Debugf("URL: %#v", *baseURL) + + if httpClient == nil { + httpClient = http.DefaultClient + } + + return &openshiftClient{ + ref: ref, + baseURL: baseURL, + httpClient: httpClient, + bearerToken: restConfig.BearerToken, + username: restConfig.Username, + password: restConfig.Password, + }, nil +} + +// doRequest performs a correctly authenticated request to a specified path, and returns response body or an error object. +func (c *openshiftClient) doRequest(ctx context.Context, method, path string, requestBody []byte) ([]byte, error) { + url := *c.baseURL + url.Path = path + var requestBodyReader io.Reader + if requestBody != nil { + logrus.Debugf("Will send body: %s", requestBody) + requestBodyReader = bytes.NewReader(requestBody) + } + req, err := http.NewRequestWithContext(ctx, method, url.String(), requestBodyReader) + if err != nil { + return nil, err + } + + if len(c.bearerToken) != 0 { + req.Header.Set("Authorization", "Bearer "+c.bearerToken) + } else if len(c.username) != 0 { + req.SetBasicAuth(c.username, c.password) + } + req.Header.Set("Accept", "application/json, */*") + req.Header.Set("User-Agent", fmt.Sprintf("skopeo/%s", version.Version)) + if requestBody != nil { + req.Header.Set("Content-Type", "application/json") + } + + logrus.Debugf("%s %s", method, url.String()) + res, err := c.httpClient.Do(req) + if err != nil { + return nil, err + } + defer res.Body.Close() + body, err := iolimits.ReadAtMost(res.Body, iolimits.MaxOpenShiftStatusBody) + if err != nil { + return nil, err + } + logrus.Debugf("Got body: %s", body) + // FIXME: Just throwing this useful information away only to try to guess later... + logrus.Debugf("Got content-type: %s", res.Header.Get("Content-Type")) + + var status status + statusValid := false + if err := json.Unmarshal(body, &status); err == nil && len(status.Status) > 0 { + statusValid = true + } + + switch { + case res.StatusCode == http.StatusSwitchingProtocols: // FIXME?! No idea why this weird case exists in k8s.io/kubernetes/pkg/client/restclient. + if statusValid && status.Status != "Success" { + return nil, errors.New(status.Message) + } + case res.StatusCode >= http.StatusOK && res.StatusCode <= http.StatusPartialContent: + // OK. + default: + if statusValid { + return nil, errors.New(status.Message) + } + return nil, errors.Errorf("HTTP error: status code: %d (%s), body: %s", res.StatusCode, http.StatusText(res.StatusCode), string(body)) + } + + return body, nil +} + +// getImage loads the specified image object. +func (c *openshiftClient) getImage(ctx context.Context, imageStreamImageName string) (*image, error) { + // FIXME: validate components per validation.IsValidPathSegmentName? + path := fmt.Sprintf("/oapi/v1/namespaces/%s/imagestreamimages/%s@%s", c.ref.namespace, c.ref.stream, imageStreamImageName) + body, err := c.doRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, err + } + // Note: This does absolutely no kind/version checking or conversions. + var isi imageStreamImage + if err := json.Unmarshal(body, &isi); err != nil { + return nil, err + } + return &isi.Image, nil +} + +// convertDockerImageReference takes an image API DockerImageReference value and returns a reference we can actually use; +// currently OpenShift stores the cluster-internal service IPs here, which are unusable from the outside. +func (c *openshiftClient) convertDockerImageReference(ref string) (string, error) { + parts := strings.SplitN(ref, "/", 2) + if len(parts) != 2 { + return "", errors.Errorf("Invalid format of docker reference %s: missing '/'", ref) + } + return reference.Domain(c.ref.dockerReference) + "/" + parts[1], nil +} + +type openshiftImageSource struct { + client *openshiftClient + // Values specific to this image + sys *types.SystemContext + // State + docker types.ImageSource // The docker/distribution API endpoint, or nil if not resolved yet + imageStreamImageName string // Resolved image identifier, or "" if not known yet +} + +// newImageSource creates a new ImageSource for the specified reference. +// The caller must call .Close() on the returned ImageSource. +func newImageSource(sys *types.SystemContext, ref openshiftReference) (types.ImageSource, error) { + client, err := newOpenshiftClient(ref) + if err != nil { + return nil, err + } + + return &openshiftImageSource{ + client: client, + sys: sys, + }, nil +} + +// Reference returns the reference used to set up this source, _as specified by the user_ +// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. +func (s *openshiftImageSource) Reference() types.ImageReference { + return s.client.ref +} + +// Close removes resources associated with an initialized ImageSource, if any. +func (s *openshiftImageSource) Close() error { + if s.docker != nil { + err := s.docker.Close() + s.docker = nil + + return err + } + + return nil +} + +// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). +// It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); +// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). +func (s *openshiftImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { + if err := s.ensureImageIsResolved(ctx); err != nil { + return nil, "", err + } + return s.docker.GetManifest(ctx, instanceDigest) +} + +// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. +func (s *openshiftImageSource) HasThreadSafeGetBlob() bool { + return false +} + +// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). +// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. +// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. +func (s *openshiftImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { + if err := s.ensureImageIsResolved(ctx); err != nil { + return nil, 0, err + } + return s.docker.GetBlob(ctx, info, cache) +} + +// GetSignatures returns the image's signatures. It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +func (s *openshiftImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + var imageStreamImageName string + if instanceDigest == nil { + if err := s.ensureImageIsResolved(ctx); err != nil { + return nil, err + } + imageStreamImageName = s.imageStreamImageName + } else { + imageStreamImageName = instanceDigest.String() + } + image, err := s.client.getImage(ctx, imageStreamImageName) + if err != nil { + return nil, err + } + var sigs [][]byte + for _, sig := range image.Signatures { + if sig.Type == imageSignatureTypeAtomic { + sigs = append(sigs, sig.Content) + } + } + return sigs, nil +} + +// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer +// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob() +// to read the image's layers. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (s *openshiftImageSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) { + return nil, nil +} + +// ensureImageIsResolved sets up s.docker and s.imageStreamImageName +func (s *openshiftImageSource) ensureImageIsResolved(ctx context.Context) error { + if s.docker != nil { + return nil + } + + // FIXME: validate components per validation.IsValidPathSegmentName? + path := fmt.Sprintf("/oapi/v1/namespaces/%s/imagestreams/%s", s.client.ref.namespace, s.client.ref.stream) + body, err := s.client.doRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return err + } + // Note: This does absolutely no kind/version checking or conversions. + var is imageStream + if err := json.Unmarshal(body, &is); err != nil { + return err + } + var te *tagEvent + for _, tag := range is.Status.Tags { + if tag.Tag != s.client.ref.dockerReference.Tag() { + continue + } + if len(tag.Items) > 0 { + te = &tag.Items[0] + break + } + } + if te == nil { + return errors.Errorf("No matching tag found") + } + logrus.Debugf("tag event %#v", te) + dockerRefString, err := s.client.convertDockerImageReference(te.DockerImageReference) + if err != nil { + return err + } + logrus.Debugf("Resolved reference %#v", dockerRefString) + dockerRef, err := docker.ParseReference("//" + dockerRefString) + if err != nil { + return err + } + d, err := dockerRef.NewImageSource(ctx, s.sys) + if err != nil { + return err + } + s.docker = d + s.imageStreamImageName = te.Image + return nil +} + +type openshiftImageDestination struct { + client *openshiftClient + docker types.ImageDestination // The docker/distribution API endpoint + // State + imageStreamImageName string // "" if not yet known +} + +// newImageDestination creates a new ImageDestination for the specified reference. +func newImageDestination(ctx context.Context, sys *types.SystemContext, ref openshiftReference) (types.ImageDestination, error) { + client, err := newOpenshiftClient(ref) + if err != nil { + return nil, err + } + + // FIXME: Should this always use a digest, not a tag? Uploading to Docker by tag requires the tag _inside_ the manifest to match, + // i.e. a single signed image cannot be available under multiple tags. But with types.ImageDestination, we don't know + // the manifest digest at this point. + dockerRefString := fmt.Sprintf("//%s/%s/%s:%s", reference.Domain(client.ref.dockerReference), client.ref.namespace, client.ref.stream, client.ref.dockerReference.Tag()) + dockerRef, err := docker.ParseReference(dockerRefString) + if err != nil { + return nil, err + } + docker, err := dockerRef.NewImageDestination(ctx, sys) + if err != nil { + return nil, err + } + + return &openshiftImageDestination{ + client: client, + docker: docker, + }, nil +} + +// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, +// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. +func (d *openshiftImageDestination) Reference() types.ImageReference { + return d.client.ref +} + +// Close removes resources associated with an initialized ImageDestination, if any. +func (d *openshiftImageDestination) Close() error { + return d.docker.Close() +} + +func (d *openshiftImageDestination) SupportedManifestMIMETypes() []string { + return d.docker.SupportedManifestMIMETypes() +} + +// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. +// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. +func (d *openshiftImageDestination) SupportsSignatures(ctx context.Context) error { + return nil +} + +func (d *openshiftImageDestination) DesiredLayerCompression() types.LayerCompression { + return types.Compress +} + +// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually +// uploaded to the image destination, true otherwise. +func (d *openshiftImageDestination) AcceptsForeignLayerURLs() bool { + return true +} + +// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise. +func (d *openshiftImageDestination) MustMatchRuntimeOS() bool { + return false +} + +// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), +// and would prefer to receive an unmodified manifest instead of one modified for the destination. +// Does not make a difference if Reference().DockerReference() is nil. +func (d *openshiftImageDestination) IgnoresEmbeddedDockerReference() bool { + return d.docker.IgnoresEmbeddedDockerReference() +} + +// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. +func (d *openshiftImageDestination) HasThreadSafePutBlob() bool { + return false +} + +// PutBlob writes contents of stream and returns data representing the result (with all data filled in). +// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. +// inputInfo.Size is the expected length of stream, if known. +// May update cache. +// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available +// to any other readers for download using the supplied digest. +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. +func (d *openshiftImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { + return d.docker.PutBlob(ctx, stream, inputInfo, cache, isConfig) +} + +// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. +// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may +// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be +// reflected in the manifest that will be written. +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +// May use and/or update cache. +func (d *openshiftImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { + return d.docker.TryReusingBlob(ctx, info, cache, canSubstitute) +} + +// PutManifest writes manifest to the destination. +// FIXME? This should also receive a MIME type if known, to differentiate between schema versions. +// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), +// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. +func (d *openshiftImageDestination) PutManifest(ctx context.Context, m []byte, instanceDigest *digest.Digest) error { + if instanceDigest == nil { + manifestDigest, err := manifest.Digest(m) + if err != nil { + return err + } + d.imageStreamImageName = manifestDigest.String() + } + return d.docker.PutManifest(ctx, m, instanceDigest) +} + +func (d *openshiftImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error { + var imageStreamImageName string + if instanceDigest == nil { + if d.imageStreamImageName == "" { + return errors.Errorf("Internal error: Unknown manifest digest, can't add signatures") + } + imageStreamImageName = d.imageStreamImageName + } else { + imageStreamImageName = instanceDigest.String() + } + + // Because image signatures are a shared resource in Atomic Registry, the default upload + // always adds signatures. Eventually we should also allow removing signatures. + + if len(signatures) == 0 { + return nil // No need to even read the old state. + } + + image, err := d.client.getImage(ctx, imageStreamImageName) + if err != nil { + return err + } + existingSigNames := map[string]struct{}{} + for _, sig := range image.Signatures { + existingSigNames[sig.objectMeta.Name] = struct{}{} + } + +sigExists: + for _, newSig := range signatures { + for _, existingSig := range image.Signatures { + if existingSig.Type == imageSignatureTypeAtomic && bytes.Equal(existingSig.Content, newSig) { + continue sigExists + } + } + + // The API expect us to invent a new unique name. This is racy, but hopefully good enough. + var signatureName string + for { + randBytes := make([]byte, 16) + n, err := rand.Read(randBytes) + if err != nil || n != 16 { + return errors.Wrapf(err, "generating random signature len %d", n) + } + signatureName = fmt.Sprintf("%s@%032x", imageStreamImageName, randBytes) + if _, ok := existingSigNames[signatureName]; !ok { + break + } + } + // Note: This does absolutely no kind/version checking or conversions. + sig := imageSignature{ + typeMeta: typeMeta{ + Kind: "ImageSignature", + APIVersion: "v1", + }, + objectMeta: objectMeta{Name: signatureName}, + Type: imageSignatureTypeAtomic, + Content: newSig, + } + body, err := json.Marshal(sig) + if err != nil { + return err + } + _, err = d.client.doRequest(ctx, http.MethodPost, "/oapi/v1/imagesignatures", body) + if err != nil { + return err + } + } + + return nil +} + +// Commit marks the process of storing the image as successful and asks for the image to be persisted. +// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list +// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the +// original manifest list digest, if desired. +// WARNING: This does not have any transactional semantics: +// - Uploaded data MAY be visible to others before Commit() is called +// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) +func (d *openshiftImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error { + return d.docker.Commit(ctx, unparsedToplevel) +} + +// These structs are subsets of github.com/openshift/origin/pkg/image/api/v1 and its dependencies. +type imageStream struct { + Status imageStreamStatus `json:"status,omitempty"` +} +type imageStreamStatus struct { + DockerImageRepository string `json:"dockerImageRepository"` + Tags []namedTagEventList `json:"tags,omitempty"` +} +type namedTagEventList struct { + Tag string `json:"tag"` + Items []tagEvent `json:"items"` +} +type tagEvent struct { + DockerImageReference string `json:"dockerImageReference"` + Image string `json:"image"` +} +type imageStreamImage struct { + Image image `json:"image"` +} +type image struct { + objectMeta `json:"metadata,omitempty"` + DockerImageReference string `json:"dockerImageReference,omitempty"` + // DockerImageMetadata runtime.RawExtension `json:"dockerImageMetadata,omitempty"` + DockerImageMetadataVersion string `json:"dockerImageMetadataVersion,omitempty"` + DockerImageManifest string `json:"dockerImageManifest,omitempty"` + // DockerImageLayers []ImageLayer `json:"dockerImageLayers"` + Signatures []imageSignature `json:"signatures,omitempty"` +} + +const imageSignatureTypeAtomic string = "atomic" + +type imageSignature struct { + typeMeta `json:",inline"` + objectMeta `json:"metadata,omitempty"` + Type string `json:"type"` + Content []byte `json:"content"` + // Conditions []SignatureCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` + // ImageIdentity string `json:"imageIdentity,omitempty"` + // SignedClaims map[string]string `json:"signedClaims,omitempty"` + // Created *unversioned.Time `json:"created,omitempty"` + // IssuedBy SignatureIssuer `json:"issuedBy,omitempty"` + // IssuedTo SignatureSubject `json:"issuedTo,omitempty"` +} +type typeMeta struct { + Kind string `json:"kind,omitempty"` + APIVersion string `json:"apiVersion,omitempty"` +} +type objectMeta struct { + Name string `json:"name,omitempty"` + GenerateName string `json:"generateName,omitempty"` + Namespace string `json:"namespace,omitempty"` + SelfLink string `json:"selfLink,omitempty"` + ResourceVersion string `json:"resourceVersion,omitempty"` + Generation int64 `json:"generation,omitempty"` + DeletionGracePeriodSeconds *int64 `json:"deletionGracePeriodSeconds,omitempty"` + Labels map[string]string `json:"labels,omitempty"` + Annotations map[string]string `json:"annotations,omitempty"` +} + +// A subset of k8s.io/kubernetes/pkg/api/unversioned/Status +type status struct { + Status string `json:"status,omitempty"` + Message string `json:"message,omitempty"` + // Reason StatusReason `json:"reason,omitempty"` + // Details *StatusDetails `json:"details,omitempty"` + Code int32 `json:"code,omitempty"` +} diff --git a/vendor/github.com/containers/image/v5/openshift/openshift_transport.go b/vendor/github.com/containers/image/v5/openshift/openshift_transport.go new file mode 100644 index 00000000000..6bbb43be283 --- /dev/null +++ b/vendor/github.com/containers/image/v5/openshift/openshift_transport.go @@ -0,0 +1,157 @@ +package openshift + +import ( + "context" + "fmt" + "regexp" + "strings" + + "github.com/containers/image/v5/docker/policyconfiguration" + "github.com/containers/image/v5/docker/reference" + genericImage "github.com/containers/image/v5/image" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" + "github.com/pkg/errors" +) + +func init() { + transports.Register(Transport) +} + +// Transport is an ImageTransport for OpenShift registry-hosted images. +var Transport = openshiftTransport{} + +type openshiftTransport struct{} + +func (t openshiftTransport) Name() string { + return "atomic" +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. +func (t openshiftTransport) ParseReference(reference string) (types.ImageReference, error) { + return ParseReference(reference) +} + +// Note that imageNameRegexp is namespace/stream:tag, this +// is HOSTNAME/namespace/stream:tag or parent prefixes. +// Keep this in sync with imageNameRegexp! +var scopeRegexp = regexp.MustCompile("^[^/]*(/[^:/]*(/[^:/]*(:[^:/]*)?)?)?$") + +// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys +// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). +// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. +// scope passed to this function will not be "", that value is always allowed. +func (t openshiftTransport) ValidatePolicyConfigurationScope(scope string) error { + if scopeRegexp.FindStringIndex(scope) == nil { + return errors.Errorf("Invalid scope name %s", scope) + } + return nil +} + +// openshiftReference is an ImageReference for OpenShift images. +type openshiftReference struct { + dockerReference reference.NamedTagged + namespace string // Computed from dockerReference in advance. + stream string // Computed from dockerReference in advance. +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an OpenShift ImageReference. +func ParseReference(ref string) (types.ImageReference, error) { + r, err := reference.ParseNormalizedNamed(ref) + if err != nil { + return nil, errors.Wrapf(err, "failed to parse image reference %q", ref) + } + tagged, ok := r.(reference.NamedTagged) + if !ok { + return nil, errors.Errorf("invalid image reference %s, expected format: 'hostname/namespace/stream:tag'", ref) + } + return NewReference(tagged) +} + +// NewReference returns an OpenShift reference for a reference.NamedTagged +func NewReference(dockerRef reference.NamedTagged) (types.ImageReference, error) { + r := strings.SplitN(reference.Path(dockerRef), "/", 3) + if len(r) != 2 { + return nil, errors.Errorf("invalid image reference: %s, expected format: 'hostname/namespace/stream:tag'", + reference.FamiliarString(dockerRef)) + } + return openshiftReference{ + namespace: r[0], + stream: r[1], + dockerReference: dockerRef, + }, nil +} + +func (ref openshiftReference) Transport() types.ImageTransport { + return Transport +} + +// StringWithinTransport returns a string representation of the reference, which MUST be such that +// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. +// NOTE: The returned string is not promised to be equal to the original input to ParseReference; +// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. +// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. +func (ref openshiftReference) StringWithinTransport() string { + return reference.FamiliarString(ref.dockerReference) +} + +// DockerReference returns a Docker reference associated with this reference +// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, +// not e.g. after redirect or alias processing), or nil if unknown/not applicable. +func (ref openshiftReference) DockerReference() reference.Named { + return ref.dockerReference +} + +// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. +// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; +// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical +// (i.e. various references with exactly the same semantics should return the same configuration identity) +// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but +// not required/guaranteed that it will be a valid input to Transport().ParseReference(). +// Returns "" if configuration identities for these references are not supported. +func (ref openshiftReference) PolicyConfigurationIdentity() string { + res, err := policyconfiguration.DockerReferenceIdentity(ref.dockerReference) + if res == "" || err != nil { // Coverage: Should never happen, NewReference constructs a valid tagged reference. + panic(fmt.Sprintf("Internal inconsistency: policyconfiguration.DockerReferenceIdentity returned %#v, %v", res, err)) + } + return res +} + +// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search +// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed +// in order, terminating on first match, and an implicit "" is always checked at the end. +// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), +// and each following element to be a prefix of the element preceding it. +func (ref openshiftReference) PolicyConfigurationNamespaces() []string { + return policyconfiguration.DockerReferenceNamespaces(ref.dockerReference) +} + +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (ref openshiftReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + src, err := newImageSource(sys, ref) + if err != nil { + return nil, err + } + return genericImage.FromSource(ctx, sys, src) +} + +// NewImageSource returns a types.ImageSource for this reference. +// The caller must call .Close() on the returned ImageSource. +func (ref openshiftReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { + return newImageSource(sys, ref) +} + +// NewImageDestination returns a types.ImageDestination for this reference. +// The caller must call .Close() on the returned ImageDestination. +func (ref openshiftReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { + return newImageDestination(ctx, sys, ref) +} + +// DeleteImage deletes the named image from the registry, if supported. +func (ref openshiftReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + return errors.Errorf("Deleting images not implemented for atomic: images") +} diff --git a/vendor/github.com/containers/image/v5/ostree/ostree_dest.go b/vendor/github.com/containers/image/v5/ostree/ostree_dest.go new file mode 100644 index 00000000000..3eb2a2cba22 --- /dev/null +++ b/vendor/github.com/containers/image/v5/ostree/ostree_dest.go @@ -0,0 +1,519 @@ +//go:build containers_image_ostree +// +build containers_image_ostree + +package ostree + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "runtime" + "strconv" + "strings" + "syscall" + "time" + "unsafe" + + "github.com/containers/image/v5/internal/putblobdigest" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" + "github.com/containers/storage/pkg/archive" + "github.com/klauspost/pgzip" + "github.com/opencontainers/go-digest" + selinux "github.com/opencontainers/selinux/go-selinux" + "github.com/ostreedev/ostree-go/pkg/otbuiltin" + "github.com/pkg/errors" + "github.com/vbatts/tar-split/tar/asm" + "github.com/vbatts/tar-split/tar/storage" +) + +// #cgo pkg-config: glib-2.0 gobject-2.0 ostree-1 libselinux +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +import "C" + +type blobToImport struct { + Size int64 + Digest digest.Digest + BlobPath string +} + +type descriptor struct { + Size int64 `json:"size"` + Digest digest.Digest `json:"digest"` +} + +type fsLayersSchema1 struct { + BlobSum digest.Digest `json:"blobSum"` +} + +type manifestSchema struct { + LayersDescriptors []descriptor `json:"layers"` + FSLayers []fsLayersSchema1 `json:"fsLayers"` +} + +type ostreeImageDestination struct { + ref ostreeReference + manifest string + schema manifestSchema + tmpDirPath string + blobs map[string]*blobToImport + digest digest.Digest + signaturesLen int + repo *C.struct_OstreeRepo +} + +// newImageDestination returns an ImageDestination for writing to an existing ostree. +func newImageDestination(ref ostreeReference, tmpDirPath string) (types.ImageDestination, error) { + tmpDirPath = filepath.Join(tmpDirPath, ref.branchName) + if err := ensureDirectoryExists(tmpDirPath); err != nil { + return nil, err + } + return &ostreeImageDestination{ref, "", manifestSchema{}, tmpDirPath, map[string]*blobToImport{}, "", 0, nil}, nil +} + +// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, +// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. +func (d *ostreeImageDestination) Reference() types.ImageReference { + return d.ref +} + +// Close removes resources associated with an initialized ImageDestination, if any. +func (d *ostreeImageDestination) Close() error { + if d.repo != nil { + C.g_object_unref(C.gpointer(d.repo)) + } + return os.RemoveAll(d.tmpDirPath) +} + +func (d *ostreeImageDestination) SupportedManifestMIMETypes() []string { + return []string{ + manifest.DockerV2Schema2MediaType, + } +} + +// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. +// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. +func (d *ostreeImageDestination) SupportsSignatures(ctx context.Context) error { + return nil +} + +// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination. +func (d *ostreeImageDestination) DesiredLayerCompression() types.LayerCompression { + return types.PreserveOriginal +} + +// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually +// uploaded to the image destination, true otherwise. +func (d *ostreeImageDestination) AcceptsForeignLayerURLs() bool { + return false +} + +// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise. +func (d *ostreeImageDestination) MustMatchRuntimeOS() bool { + return true +} + +// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), +// and would prefer to receive an unmodified manifest instead of one modified for the destination. +// Does not make a difference if Reference().DockerReference() is nil. +func (d *ostreeImageDestination) IgnoresEmbeddedDockerReference() bool { + return false // N/A, DockerReference() returns nil. +} + +// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. +func (d *ostreeImageDestination) HasThreadSafePutBlob() bool { + return false +} + +// PutBlob writes contents of stream and returns data representing the result. +// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. +// inputInfo.Size is the expected length of stream, if known. +// inputInfo.MediaType describes the blob format, if known. +// May update cache. +// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available +// to any other readers for download using the supplied digest. +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. +func (d *ostreeImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { + tmpDir, err := ioutil.TempDir(d.tmpDirPath, "blob") + if err != nil { + return types.BlobInfo{}, err + } + + blobPath := filepath.Join(tmpDir, "content") + blobFile, err := os.Create(blobPath) + if err != nil { + return types.BlobInfo{}, err + } + defer blobFile.Close() + + digester, stream := putblobdigest.DigestIfCanonicalUnknown(stream, inputInfo) + // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). + size, err := io.Copy(blobFile, stream) + if err != nil { + return types.BlobInfo{}, err + } + blobDigest := digester.Digest() + if inputInfo.Size != -1 && size != inputInfo.Size { + return types.BlobInfo{}, errors.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size) + } + if err := blobFile.Sync(); err != nil { + return types.BlobInfo{}, err + } + + hash := blobDigest.Hex() + d.blobs[hash] = &blobToImport{Size: size, Digest: blobDigest, BlobPath: blobPath} + return types.BlobInfo{Digest: blobDigest, Size: size}, nil +} + +func fixFiles(selinuxHnd *C.struct_selabel_handle, root string, dir string, usermode bool) error { + entries, err := ioutil.ReadDir(dir) + if err != nil { + return err + } + + for _, info := range entries { + fullpath := filepath.Join(dir, info.Name()) + if info.Mode()&(os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 { + if err := os.Remove(fullpath); err != nil { + return err + } + continue + } + + if selinuxHnd != nil { + relPath, err := filepath.Rel(root, fullpath) + if err != nil { + return err + } + // Handle /exports/hostfs as a special case. Files under this directory are copied to the host, + // thus we benefit from maintaining the same SELinux label they would have on the host as we could + // use hard links instead of copying the files. + relPath = fmt.Sprintf("/%s", strings.TrimPrefix(relPath, "exports/hostfs/")) + + relPathC := C.CString(relPath) + defer C.free(unsafe.Pointer(relPathC)) + var context *C.char + + res, err := C.selabel_lookup_raw(selinuxHnd, &context, relPathC, C.int(info.Mode()&os.ModePerm)) + if int(res) < 0 && err != syscall.ENOENT { + return errors.Wrapf(err, "cannot selabel_lookup_raw %s", relPath) + } + if int(res) == 0 { + defer C.freecon(context) + fullpathC := C.CString(fullpath) + defer C.free(unsafe.Pointer(fullpathC)) + res, err = C.lsetfilecon_raw(fullpathC, context) + if int(res) < 0 { + return errors.Wrapf(err, "cannot setfilecon_raw %s to %s", fullpath, C.GoString(context)) + } + } + } + + if info.IsDir() { + if usermode { + if err := os.Chmod(fullpath, info.Mode()|0700); err != nil { + return err + } + } + err = fixFiles(selinuxHnd, root, fullpath, usermode) + if err != nil { + return err + } + } else if usermode && (info.Mode().IsRegular()) { + if err := os.Chmod(fullpath, info.Mode()|0600); err != nil { + return err + } + } + } + + return nil +} + +func (d *ostreeImageDestination) ostreeCommit(repo *otbuiltin.Repo, branch string, root string, metadata []string) error { + opts := otbuiltin.NewCommitOptions() + opts.AddMetadataString = metadata + opts.Timestamp = time.Now() + // OCI layers have no parent OSTree commit + opts.Parent = "0000000000000000000000000000000000000000000000000000000000000000" + _, err := repo.Commit(root, branch, opts) + return err +} + +func generateTarSplitMetadata(output *bytes.Buffer, file string) (digest.Digest, int64, error) { + mfz := pgzip.NewWriter(output) + defer mfz.Close() + metaPacker := storage.NewJSONPacker(mfz) + + stream, err := os.OpenFile(file, os.O_RDONLY, 0) + if err != nil { + return "", -1, err + } + defer stream.Close() + + gzReader, err := archive.DecompressStream(stream) + if err != nil { + return "", -1, err + } + defer gzReader.Close() + + its, err := asm.NewInputTarStream(gzReader, metaPacker, nil) + if err != nil { + return "", -1, err + } + + digester := digest.Canonical.Digester() + + written, err := io.Copy(digester.Hash(), its) + if err != nil { + return "", -1, err + } + + return digester.Digest(), written, nil +} + +func (d *ostreeImageDestination) importBlob(selinuxHnd *C.struct_selabel_handle, repo *otbuiltin.Repo, blob *blobToImport) error { + // TODO: This can take quite some time, and should ideally be cancellable using a context.Context. + + ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Hex()) + destinationPath := filepath.Join(d.tmpDirPath, blob.Digest.Hex(), "root") + if err := ensureDirectoryExists(destinationPath); err != nil { + return err + } + defer func() { + os.Remove(blob.BlobPath) + os.RemoveAll(destinationPath) + }() + + var tarSplitOutput bytes.Buffer + uncompressedDigest, uncompressedSize, err := generateTarSplitMetadata(&tarSplitOutput, blob.BlobPath) + if err != nil { + return err + } + + if os.Getuid() == 0 { + if err := archive.UntarPath(blob.BlobPath, destinationPath); err != nil { + return err + } + if err := fixFiles(selinuxHnd, destinationPath, destinationPath, false); err != nil { + return err + } + } else { + os.MkdirAll(destinationPath, 0755) + if err := exec.Command("tar", "-C", destinationPath, "--no-same-owner", "--no-same-permissions", "--delay-directory-restore", "-xf", blob.BlobPath).Run(); err != nil { + return err + } + + if err := fixFiles(selinuxHnd, destinationPath, destinationPath, true); err != nil { + return err + } + } + return d.ostreeCommit(repo, ostreeBranch, destinationPath, []string{fmt.Sprintf("docker.size=%d", blob.Size), + fmt.Sprintf("docker.uncompressed_size=%d", uncompressedSize), + fmt.Sprintf("docker.uncompressed_digest=%s", uncompressedDigest.String()), + fmt.Sprintf("tarsplit.output=%s", base64.StdEncoding.EncodeToString(tarSplitOutput.Bytes()))}) + +} + +func (d *ostreeImageDestination) importConfig(repo *otbuiltin.Repo, blob *blobToImport) error { + ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Hex()) + destinationPath := filepath.Dir(blob.BlobPath) + + return d.ostreeCommit(repo, ostreeBranch, destinationPath, []string{fmt.Sprintf("docker.size=%d", blob.Size)}) +} + +// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. +// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may +// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be +// reflected in the manifest that will be written. +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +// May use and/or update cache. +func (d *ostreeImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { + if d.repo == nil { + repo, err := openRepo(d.ref.repo) + if err != nil { + return false, types.BlobInfo{}, err + } + d.repo = repo + } + branch := fmt.Sprintf("ociimage/%s", info.Digest.Hex()) + + found, data, err := readMetadata(d.repo, branch, "docker.uncompressed_digest") + if err != nil || !found { + return found, types.BlobInfo{}, err + } + + found, data, err = readMetadata(d.repo, branch, "docker.uncompressed_size") + if err != nil || !found { + return found, types.BlobInfo{}, err + } + + found, data, err = readMetadata(d.repo, branch, "docker.size") + if err != nil || !found { + return found, types.BlobInfo{}, err + } + + size, err := strconv.ParseInt(data, 10, 64) + if err != nil { + return false, types.BlobInfo{}, err + } + + return true, types.BlobInfo{Digest: info.Digest, Size: size}, nil +} + +// PutManifest writes manifest to the destination. +// The instanceDigest value is expected to always be nil, because this transport does not support manifest lists, so +// there can be no secondary manifests. +// FIXME? This should also receive a MIME type if known, to differentiate between schema versions. +// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), +// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. +func (d *ostreeImageDestination) PutManifest(ctx context.Context, manifestBlob []byte, instanceDigest *digest.Digest) error { + if instanceDigest != nil { + return errors.New(`Manifest lists are not supported by "ostree:"`) + } + + d.manifest = string(manifestBlob) + + if err := json.Unmarshal(manifestBlob, &d.schema); err != nil { + return err + } + + manifestPath := filepath.Join(d.tmpDirPath, d.ref.manifestPath()) + if err := ensureParentDirectoryExists(manifestPath); err != nil { + return err + } + + digest, err := manifest.Digest(manifestBlob) + if err != nil { + return err + } + d.digest = digest + + return ioutil.WriteFile(manifestPath, manifestBlob, 0644) +} + +// PutSignatures writes signatures to the destination. +// The instanceDigest value is expected to always be nil, because this transport does not support manifest lists, so +// there can be no secondary manifests. +func (d *ostreeImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error { + if instanceDigest != nil { + return errors.New(`Manifest lists are not supported by "ostree:"`) + } + + path := filepath.Join(d.tmpDirPath, d.ref.signaturePath(0)) + if err := ensureParentDirectoryExists(path); err != nil { + return err + } + + for i, sig := range signatures { + signaturePath := filepath.Join(d.tmpDirPath, d.ref.signaturePath(i)) + if err := ioutil.WriteFile(signaturePath, sig, 0644); err != nil { + return err + } + } + d.signaturesLen = len(signatures) + return nil +} + +func (d *ostreeImageDestination) Commit(context.Context, types.UnparsedImage) error { + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + repo, err := otbuiltin.OpenRepo(d.ref.repo) + if err != nil { + return err + } + + _, err = repo.PrepareTransaction() + if err != nil { + return err + } + + var selinuxHnd *C.struct_selabel_handle + + if os.Getuid() == 0 && selinux.GetEnabled() { + selinuxHnd, err = C.selabel_open(C.SELABEL_CTX_FILE, nil, 0) + if selinuxHnd == nil { + return errors.Wrapf(err, "cannot open the SELinux DB") + } + + defer C.selabel_close(selinuxHnd) + } + + checkLayer := func(hash string) error { + blob := d.blobs[hash] + // if the blob is not present in d.blobs then it is already stored in OSTree, + // and we don't need to import it. + if blob == nil { + return nil + } + err := d.importBlob(selinuxHnd, repo, blob) + if err != nil { + return err + } + + delete(d.blobs, hash) + return nil + } + for _, layer := range d.schema.LayersDescriptors { + hash := layer.Digest.Hex() + if err = checkLayer(hash); err != nil { + return err + } + } + for _, layer := range d.schema.FSLayers { + hash := layer.BlobSum.Hex() + if err = checkLayer(hash); err != nil { + return err + } + } + + // Import the other blobs that are not layers + for _, blob := range d.blobs { + err := d.importConfig(repo, blob) + if err != nil { + return err + } + } + + manifestPath := filepath.Join(d.tmpDirPath, "manifest") + + metadata := []string{fmt.Sprintf("docker.manifest=%s", string(d.manifest)), + fmt.Sprintf("signatures=%d", d.signaturesLen), + fmt.Sprintf("docker.digest=%s", string(d.digest))} + if err := d.ostreeCommit(repo, fmt.Sprintf("ociimage/%s", d.ref.branchName), manifestPath, metadata); err != nil { + return err + } + + _, err = repo.CommitTransaction() + return err +} + +func ensureDirectoryExists(path string) error { + if _, err := os.Stat(path); err != nil && os.IsNotExist(err) { + if err := os.MkdirAll(path, 0755); err != nil { + return err + } + } + return nil +} + +func ensureParentDirectoryExists(path string) error { + return ensureDirectoryExists(filepath.Dir(path)) +} diff --git a/vendor/github.com/containers/image/v5/ostree/ostree_src.go b/vendor/github.com/containers/image/v5/ostree/ostree_src.go new file mode 100644 index 00000000000..d30c764a630 --- /dev/null +++ b/vendor/github.com/containers/image/v5/ostree/ostree_src.go @@ -0,0 +1,431 @@ +//go:build containers_image_ostree +// +build containers_image_ostree + +package ostree + +import ( + "bytes" + "context" + "encoding/base64" + "fmt" + "io" + "io/ioutil" + "strconv" + "strings" + "unsafe" + + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" + "github.com/containers/storage/pkg/ioutils" + "github.com/klauspost/pgzip" + digest "github.com/opencontainers/go-digest" + glib "github.com/ostreedev/ostree-go/pkg/glibobject" + "github.com/pkg/errors" + "github.com/vbatts/tar-split/tar/asm" + "github.com/vbatts/tar-split/tar/storage" +) + +// #cgo pkg-config: glib-2.0 gobject-2.0 ostree-1 +// #include +// #include +// #include +// #include +// #include +// #include +import "C" + +type ostreeImageSource struct { + ref ostreeReference + tmpDir string + repo *C.struct_OstreeRepo + // get the compressed layer by its uncompressed checksum + compressed map[digest.Digest]digest.Digest +} + +// newImageSource returns an ImageSource for reading from an existing directory. +func newImageSource(tmpDir string, ref ostreeReference) (types.ImageSource, error) { + return &ostreeImageSource{ref: ref, tmpDir: tmpDir, compressed: nil}, nil +} + +// Reference returns the reference used to set up this source. +func (s *ostreeImageSource) Reference() types.ImageReference { + return s.ref +} + +// Close removes resources associated with an initialized ImageSource, if any. +func (s *ostreeImageSource) Close() error { + if s.repo != nil { + C.g_object_unref(C.gpointer(s.repo)) + } + return nil +} + +func (s *ostreeImageSource) getBlobUncompressedSize(blob string, isCompressed bool) (int64, error) { + var metadataKey string + if isCompressed { + metadataKey = "docker.uncompressed_size" + } else { + metadataKey = "docker.size" + } + b := fmt.Sprintf("ociimage/%s", blob) + found, data, err := readMetadata(s.repo, b, metadataKey) + if err != nil || !found { + return 0, err + } + return strconv.ParseInt(data, 10, 64) +} + +func (s *ostreeImageSource) getLenSignatures() (int64, error) { + b := fmt.Sprintf("ociimage/%s", s.ref.branchName) + found, data, err := readMetadata(s.repo, b, "signatures") + if err != nil { + return -1, err + } + if !found { + // if 'signatures' is not present, just return 0 signatures. + return 0, nil + } + return strconv.ParseInt(data, 10, 64) +} + +func (s *ostreeImageSource) getTarSplitData(blob string) ([]byte, error) { + b := fmt.Sprintf("ociimage/%s", blob) + found, out, err := readMetadata(s.repo, b, "tarsplit.output") + if err != nil || !found { + return nil, err + } + return base64.StdEncoding.DecodeString(out) +} + +// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). +// It may use a remote (= slow) service. +// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil, +// as the primary manifest can not be a list, so there can be non-default instances. +func (s *ostreeImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { + if instanceDigest != nil { + return nil, "", errors.New(`Manifest lists are not supported by "ostree:"`) + } + if s.repo == nil { + repo, err := openRepo(s.ref.repo) + if err != nil { + return nil, "", err + } + s.repo = repo + } + + b := fmt.Sprintf("ociimage/%s", s.ref.branchName) + found, out, err := readMetadata(s.repo, b, "docker.manifest") + if err != nil { + return nil, "", err + } + if !found { + return nil, "", errors.New("manifest not found") + } + m := []byte(out) + return m, manifest.GuessMIMEType(m), nil +} + +func (s *ostreeImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) { + return nil, "", errors.New("manifest lists are not supported by this transport") +} + +func openRepo(path string) (*C.struct_OstreeRepo, error) { + var cerr *C.GError + cpath := C.CString(path) + defer C.free(unsafe.Pointer(cpath)) + pathc := C.g_file_new_for_path(cpath) + defer C.g_object_unref(C.gpointer(pathc)) + repo := C.ostree_repo_new(pathc) + r := glib.GoBool(glib.GBoolean(C.ostree_repo_open(repo, nil, &cerr))) + if !r { + C.g_object_unref(C.gpointer(repo)) + return nil, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) + } + return repo, nil +} + +type ostreePathFileGetter struct { + repo *C.struct_OstreeRepo + parentRoot *C.GFile +} + +type ostreeReader struct { + stream *C.GFileInputStream +} + +func (o ostreeReader) Close() error { + C.g_object_unref(C.gpointer(o.stream)) + return nil +} +func (o ostreeReader) Read(p []byte) (int, error) { + var cerr *C.GError + instanceCast := C.g_type_check_instance_cast((*C.GTypeInstance)(unsafe.Pointer(o.stream)), C.g_input_stream_get_type()) + stream := (*C.GInputStream)(unsafe.Pointer(instanceCast)) + + b := C.g_input_stream_read_bytes(stream, (C.gsize)(cap(p)), nil, &cerr) + if b == nil { + return 0, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) + } + defer C.g_bytes_unref(b) + + count := int(C.g_bytes_get_size(b)) + if count == 0 { + return 0, io.EOF + } + data := (*[1 << 30]byte)(unsafe.Pointer(C.g_bytes_get_data(b, nil)))[:count:count] + copy(p, data) + return count, nil +} + +func readMetadata(repo *C.struct_OstreeRepo, commit, key string) (bool, string, error) { + var cerr *C.GError + var ref *C.char + defer C.free(unsafe.Pointer(ref)) + + cCommit := C.CString(commit) + defer C.free(unsafe.Pointer(cCommit)) + + if !glib.GoBool(glib.GBoolean(C.ostree_repo_resolve_rev(repo, cCommit, C.gboolean(1), &ref, &cerr))) { + return false, "", glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) + } + + if ref == nil { + return false, "", nil + } + + var variant *C.GVariant + if !glib.GoBool(glib.GBoolean(C.ostree_repo_load_variant(repo, C.OSTREE_OBJECT_TYPE_COMMIT, ref, &variant, &cerr))) { + return false, "", glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) + } + defer C.g_variant_unref(variant) + if variant != nil { + cKey := C.CString(key) + defer C.free(unsafe.Pointer(cKey)) + + metadata := C.g_variant_get_child_value(variant, 0) + defer C.g_variant_unref(metadata) + + data := C.g_variant_lookup_value(metadata, (*C.gchar)(cKey), nil) + if data != nil { + defer C.g_variant_unref(data) + ptr := (*C.char)(C.g_variant_get_string(data, nil)) + val := C.GoString(ptr) + return true, val, nil + } + } + return false, "", nil +} + +func newOSTreePathFileGetter(repo *C.struct_OstreeRepo, commit string) (*ostreePathFileGetter, error) { + var cerr *C.GError + var parentRoot *C.GFile + cCommit := C.CString(commit) + defer C.free(unsafe.Pointer(cCommit)) + if !glib.GoBool(glib.GBoolean(C.ostree_repo_read_commit(repo, cCommit, &parentRoot, nil, nil, &cerr))) { + return &ostreePathFileGetter{}, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) + } + + C.g_object_ref(C.gpointer(repo)) + + return &ostreePathFileGetter{repo: repo, parentRoot: parentRoot}, nil +} + +func (o ostreePathFileGetter) Get(filename string) (io.ReadCloser, error) { + var file *C.GFile + if strings.HasPrefix(filename, "./") { + filename = filename[2:] + } + cfilename := C.CString(filename) + defer C.free(unsafe.Pointer(cfilename)) + + file = (*C.GFile)(C.g_file_resolve_relative_path(o.parentRoot, cfilename)) + + var cerr *C.GError + stream := C.g_file_read(file, nil, &cerr) + if stream == nil { + return nil, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) + } + + return &ostreeReader{stream: stream}, nil +} + +func (o ostreePathFileGetter) Close() { + C.g_object_unref(C.gpointer(o.repo)) + C.g_object_unref(C.gpointer(o.parentRoot)) +} + +func (s *ostreeImageSource) readSingleFile(commit, path string) (io.ReadCloser, error) { + getter, err := newOSTreePathFileGetter(s.repo, commit) + if err != nil { + return nil, err + } + defer getter.Close() + + return getter.Get(path) +} + +// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. +func (s *ostreeImageSource) HasThreadSafeGetBlob() bool { + return false +} + +// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). +// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. +// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. +func (s *ostreeImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { + + blob := info.Digest.Hex() + + // Ensure s.compressed is initialized. It is build by LayerInfosForCopy. + if s.compressed == nil { + _, err := s.LayerInfosForCopy(ctx, nil) + if err != nil { + return nil, -1, err + } + + } + compressedBlob, isCompressed := s.compressed[info.Digest] + if isCompressed { + blob = compressedBlob.Hex() + } + branch := fmt.Sprintf("ociimage/%s", blob) + + if s.repo == nil { + repo, err := openRepo(s.ref.repo) + if err != nil { + return nil, 0, err + } + s.repo = repo + } + + layerSize, err := s.getBlobUncompressedSize(blob, isCompressed) + if err != nil { + return nil, 0, err + } + + tarsplit, err := s.getTarSplitData(blob) + if err != nil { + return nil, 0, err + } + + // if tarsplit is nil we are looking at the manifest. Return directly the file in /content + if tarsplit == nil { + file, err := s.readSingleFile(branch, "/content") + if err != nil { + return nil, 0, err + } + return file, layerSize, nil + } + + mf := bytes.NewReader(tarsplit) + mfz, err := pgzip.NewReader(mf) + if err != nil { + return nil, 0, err + } + metaUnpacker := storage.NewJSONUnpacker(mfz) + + getter, err := newOSTreePathFileGetter(s.repo, branch) + if err != nil { + mfz.Close() + return nil, 0, err + } + + ots := asm.NewOutputTarStream(getter, metaUnpacker) + + rc := ioutils.NewReadCloserWrapper(ots, func() error { + getter.Close() + mfz.Close() + return ots.Close() + }) + return rc, layerSize, nil +} + +// GetSignatures returns the image's signatures. It may use a remote (= slow) service. +// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil, +// as there can be no secondary manifests. +func (s *ostreeImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + if instanceDigest != nil { + return nil, errors.New(`Manifest lists are not supported by "ostree:"`) + } + lenSignatures, err := s.getLenSignatures() + if err != nil { + return nil, err + } + branch := fmt.Sprintf("ociimage/%s", s.ref.branchName) + + if s.repo == nil { + repo, err := openRepo(s.ref.repo) + if err != nil { + return nil, err + } + s.repo = repo + } + + signatures := [][]byte{} + for i := int64(1); i <= lenSignatures; i++ { + sigReader, err := s.readSingleFile(branch, fmt.Sprintf("/signature-%d", i)) + if err != nil { + return nil, err + } + defer sigReader.Close() + + sig, err := ioutil.ReadAll(sigReader) + if err != nil { + return nil, err + } + signatures = append(signatures, sig) + } + return signatures, nil +} + +// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer +// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob() +// to read the image's layers. +// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil, +// as the primary manifest can not be a list, so there can be secondary manifests. +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (s *ostreeImageSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) { + if instanceDigest != nil { + return nil, errors.New(`Manifest lists are not supported by "ostree:"`) + } + + updatedBlobInfos := []types.BlobInfo{} + manifestBlob, manifestType, err := s.GetManifest(ctx, nil) + if err != nil { + return nil, err + } + + man, err := manifest.FromBlob(manifestBlob, manifestType) + + s.compressed = make(map[digest.Digest]digest.Digest) + + layerBlobs := man.LayerInfos() + + for _, layerBlob := range layerBlobs { + branch := fmt.Sprintf("ociimage/%s", layerBlob.Digest.Hex()) + found, uncompressedDigestStr, err := readMetadata(s.repo, branch, "docker.uncompressed_digest") + if err != nil || !found { + return nil, err + } + + found, uncompressedSizeStr, err := readMetadata(s.repo, branch, "docker.uncompressed_size") + if err != nil || !found { + return nil, err + } + + uncompressedSize, err := strconv.ParseInt(uncompressedSizeStr, 10, 64) + if err != nil { + return nil, err + } + uncompressedDigest := digest.Digest(uncompressedDigestStr) + blobInfo := types.BlobInfo{ + Digest: uncompressedDigest, + Size: uncompressedSize, + MediaType: layerBlob.MediaType, + } + s.compressed[uncompressedDigest] = layerBlob.Digest + updatedBlobInfos = append(updatedBlobInfos, blobInfo) + } + return updatedBlobInfos, nil +} diff --git a/vendor/github.com/containers/image/v5/ostree/ostree_transport.go b/vendor/github.com/containers/image/v5/ostree/ostree_transport.go new file mode 100644 index 00000000000..1e35ab6059f --- /dev/null +++ b/vendor/github.com/containers/image/v5/ostree/ostree_transport.go @@ -0,0 +1,253 @@ +//go:build containers_image_ostree +// +build containers_image_ostree + +package ostree + +import ( + "bytes" + "context" + "fmt" + "os" + "path/filepath" + "regexp" + "strings" + + "github.com/containers/image/v5/directory/explicitfilepath" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/image" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" + "github.com/pkg/errors" +) + +const defaultOSTreeRepo = "/ostree/repo" + +// Transport is an ImageTransport for ostree paths. +var Transport = ostreeTransport{} + +type ostreeTransport struct{} + +func (t ostreeTransport) Name() string { + return "ostree" +} + +func init() { + transports.Register(Transport) +} + +// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys +// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). +// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. +// scope passed to this function will not be "", that value is always allowed. +func (t ostreeTransport) ValidatePolicyConfigurationScope(scope string) error { + sep := strings.Index(scope, ":") + if sep < 0 { + return errors.Errorf("Invalid ostree: scope %s: Must include a repo", scope) + } + repo := scope[:sep] + + if !strings.HasPrefix(repo, "/") { + return errors.Errorf("Invalid ostree: scope %s: repository must be an absolute path", scope) + } + cleaned := filepath.Clean(repo) + if cleaned != repo { + return errors.Errorf(`Invalid ostree: scope %s: Uses non-canonical path format, perhaps try with path %s`, scope, cleaned) + } + + // FIXME? In the namespaces within a repo, + // we could be verifying the various character set and length restrictions + // from docker/distribution/reference.regexp.go, but other than that there + // are few semantically invalid strings. + return nil +} + +// ostreeReference is an ImageReference for ostree paths. +type ostreeReference struct { + image string + branchName string + repo string +} + +type ostreeImageCloser struct { + types.ImageCloser + size int64 +} + +func (t ostreeTransport) ParseReference(ref string) (types.ImageReference, error) { + var repo = "" + var image = "" + s := strings.SplitN(ref, "@/", 2) + if len(s) == 1 { + image, repo = s[0], defaultOSTreeRepo + } else { + image, repo = s[0], "/"+s[1] + } + + return NewReference(image, repo) +} + +// NewReference returns an OSTree reference for a specified repo and image. +func NewReference(image string, repo string) (types.ImageReference, error) { + // image is not _really_ in a containers/image/docker/reference format; + // as far as the libOSTree ociimage/* namespace is concerned, it is more or + // less an arbitrary string with an implied tag. + // Parse the image using reference.ParseNormalizedNamed so that we can + // check whether the images has a tag specified and we can add ":latest" if needed + ostreeImage, err := reference.ParseNormalizedNamed(image) + if err != nil { + return nil, err + } + + if reference.IsNameOnly(ostreeImage) { + image = image + ":latest" + } + + resolved, err := explicitfilepath.ResolvePathToFullyExplicit(repo) + if err != nil { + // With os.IsNotExist(err), the parent directory of repo is also not existent; + // that should ordinarily not happen, but it would be a bit weird to reject + // references which do not specify a repo just because the implicit defaultOSTreeRepo + // does not exist. + if os.IsNotExist(err) && repo == defaultOSTreeRepo { + resolved = repo + } else { + return nil, err + } + } + // This is necessary to prevent directory paths returned by PolicyConfigurationNamespaces + // from being ambiguous with values of PolicyConfigurationIdentity. + if strings.Contains(resolved, ":") { + return nil, errors.Errorf("Invalid OSTree reference %s@%s: path %s contains a colon", image, repo, resolved) + } + + return ostreeReference{ + image: image, + branchName: encodeOStreeRef(image), + repo: resolved, + }, nil +} + +func (ref ostreeReference) Transport() types.ImageTransport { + return Transport +} + +// StringWithinTransport returns a string representation of the reference, which MUST be such that +// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. +// NOTE: The returned string is not promised to be equal to the original input to ParseReference; +// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. +// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. +func (ref ostreeReference) StringWithinTransport() string { + return fmt.Sprintf("%s@%s", ref.image, ref.repo) +} + +// DockerReference returns a Docker reference associated with this reference +// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, +// not e.g. after redirect or alias processing), or nil if unknown/not applicable. +func (ref ostreeReference) DockerReference() reference.Named { + return nil +} + +func (ref ostreeReference) PolicyConfigurationIdentity() string { + return fmt.Sprintf("%s:%s", ref.repo, ref.image) +} + +// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search +// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed +// in order, terminating on first match, and an implicit "" is always checked at the end. +// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), +// and each following element to be a prefix of the element preceding it. +func (ref ostreeReference) PolicyConfigurationNamespaces() []string { + s := strings.SplitN(ref.image, ":", 2) + if len(s) != 2 { // Coverage: Should never happen, NewReference above ensures ref.image has a :tag. + panic(fmt.Sprintf("Internal inconsistency: ref.image value %q does not have a :tag", ref.image)) + } + name := s[0] + res := []string{} + for { + res = append(res, fmt.Sprintf("%s:%s", ref.repo, name)) + + lastSlash := strings.LastIndex(name, "/") + if lastSlash == -1 { + break + } + name = name[:lastSlash] + } + return res +} + +func (s *ostreeImageCloser) Size() (int64, error) { + return s.size, nil +} + +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +func (ref ostreeReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + var tmpDir string + if sys == nil || sys.OSTreeTmpDirPath == "" { + tmpDir = os.TempDir() + } else { + tmpDir = sys.OSTreeTmpDirPath + } + src, err := newImageSource(tmpDir, ref) + if err != nil { + return nil, err + } + return image.FromSource(ctx, sys, src) +} + +// NewImageSource returns a types.ImageSource for this reference. +// The caller must call .Close() on the returned ImageSource. +func (ref ostreeReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { + var tmpDir string + if sys == nil || sys.OSTreeTmpDirPath == "" { + tmpDir = os.TempDir() + } else { + tmpDir = sys.OSTreeTmpDirPath + } + return newImageSource(tmpDir, ref) +} + +// NewImageDestination returns a types.ImageDestination for this reference. +// The caller must call .Close() on the returned ImageDestination. +func (ref ostreeReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { + var tmpDir string + if sys == nil || sys.OSTreeTmpDirPath == "" { + tmpDir = os.TempDir() + } else { + tmpDir = sys.OSTreeTmpDirPath + } + return newImageDestination(ref, tmpDir) +} + +// DeleteImage deletes the named image from the registry, if supported. +func (ref ostreeReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + return errors.Errorf("Deleting images not implemented for ostree: images") +} + +var ostreeRefRegexp = regexp.MustCompile(`^[A-Za-z0-9.-]$`) + +func encodeOStreeRef(in string) string { + var buffer bytes.Buffer + for i := range in { + sub := in[i : i+1] + if ostreeRefRegexp.MatchString(sub) { + buffer.WriteString(sub) + } else { + buffer.WriteString(fmt.Sprintf("_%02X", sub[0])) + } + + } + return buffer.String() +} + +// manifestPath returns a path for the manifest within a ostree using our conventions. +func (ref ostreeReference) manifestPath() string { + return filepath.Join("manifest", "manifest.json") +} + +// signaturePath returns a path for a signature within a ostree using our conventions. +func (ref ostreeReference) signaturePath(index int) string { + return filepath.Join("manifest", fmt.Sprintf("signature-%d", index+1)) +} diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/boltdb/boltdb.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/boltdb/boltdb.go new file mode 100644 index 00000000000..a472efd95bf --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/boltdb/boltdb.go @@ -0,0 +1,393 @@ +// Package boltdb implements a BlobInfoCache backed by BoltDB. +package boltdb + +import ( + "fmt" + "os" + "sync" + "time" + + "github.com/containers/image/v5/internal/blobinfocache" + "github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" + bolt "go.etcd.io/bbolt" +) + +var ( + // NOTE: There is no versioning data inside the file; this is a “cache”, so on an incompatible format upgrade + // we can simply start over with a different filename; update blobInfoCacheFilename. + + // FIXME: For CRI-O, does this need to hide information between different users? + + // uncompressedDigestBucket stores a mapping from any digest to an uncompressed digest. + uncompressedDigestBucket = []byte("uncompressedDigest") + // digestCompressorBucket stores a mapping from any digest to a compressor, or blobinfocache.Uncompressed + // It may not exist in caches created by older versions, even if uncompressedDigestBucket is present. + digestCompressorBucket = []byte("digestCompressor") + // digestByUncompressedBucket stores a bucket per uncompressed digest, with the bucket containing a set of digests for that uncompressed digest + // (as a set of key=digest, value="" pairs) + digestByUncompressedBucket = []byte("digestByUncompressed") + // knownLocationsBucket stores a nested structure of buckets, keyed by (transport name, scope string, blob digest), ultimately containing + // a bucket of (opaque location reference, BinaryMarshaller-encoded time.Time value). + knownLocationsBucket = []byte("knownLocations") +) + +// Concurrency: +// See https://www.sqlite.org/src/artifact/c230a7a24?ln=994-1081 for all the issues with locks, which make it extremely +// difficult to use a single BoltDB file from multiple threads/goroutines inside a process. So, we punt and only allow one at a time. + +// pathLock contains a lock for a specific BoltDB database path. +type pathLock struct { + refCount int64 // Number of threads/goroutines owning or waiting on this lock. Protected by global pathLocksMutex, NOT by the mutex field below! + mutex sync.Mutex // Owned by the thread/goroutine allowed to access the BoltDB database. +} + +var ( + // pathLocks contains a lock for each currently open file. + // This must be global so that independently created instances of boltDBCache exclude each other. + // The map is protected by pathLocksMutex. + // FIXME? Should this be based on device:inode numbers instead of paths instead? + pathLocks = map[string]*pathLock{} + pathLocksMutex = sync.Mutex{} +) + +// lockPath obtains the pathLock for path. +// The caller must call unlockPath eventually. +func lockPath(path string) { + pl := func() *pathLock { // A scope for defer + pathLocksMutex.Lock() + defer pathLocksMutex.Unlock() + pl, ok := pathLocks[path] + if ok { + pl.refCount++ + } else { + pl = &pathLock{refCount: 1, mutex: sync.Mutex{}} + pathLocks[path] = pl + } + return pl + }() + pl.mutex.Lock() +} + +// unlockPath releases the pathLock for path. +func unlockPath(path string) { + pathLocksMutex.Lock() + defer pathLocksMutex.Unlock() + pl, ok := pathLocks[path] + if !ok { + // Should this return an error instead? BlobInfoCache ultimately ignores errors… + panic(fmt.Sprintf("Internal error: unlocking nonexistent lock for path %s", path)) + } + pl.mutex.Unlock() + pl.refCount-- + if pl.refCount == 0 { + delete(pathLocks, path) + } +} + +// cache is a BlobInfoCache implementation which uses a BoltDB file at the specified path. +// +// Note that we don’t keep the database open across operations, because that would lock the file and block any other +// users; instead, we need to open/close it for every single write or lookup. +type cache struct { + path string +} + +// New returns a BlobInfoCache implementation which uses a BoltDB file at path. +// +// Most users should call blobinfocache.DefaultCache instead. +func New(path string) types.BlobInfoCache { + return new2(path) +} +func new2(path string) *cache { + return &cache{path: path} +} + +// view returns runs the specified fn within a read-only transaction on the database. +func (bdc *cache) view(fn func(tx *bolt.Tx) error) (retErr error) { + // bolt.Open(bdc.path, 0600, &bolt.Options{ReadOnly: true}) will, if the file does not exist, + // nevertheless create it, but with an O_RDONLY file descriptor, try to initialize it, and fail — while holding + // a read lock, blocking any future writes. + // Hence this preliminary check, which is RACY: Another process could remove the file + // between the Lstat call and opening the database. + if _, err := os.Lstat(bdc.path); err != nil && os.IsNotExist(err) { + return err + } + + lockPath(bdc.path) + defer unlockPath(bdc.path) + db, err := bolt.Open(bdc.path, 0600, &bolt.Options{ReadOnly: true}) + if err != nil { + return err + } + defer func() { + if err := db.Close(); retErr == nil && err != nil { + retErr = err + } + }() + + return db.View(fn) +} + +// update returns runs the specified fn within a read-write transaction on the database. +func (bdc *cache) update(fn func(tx *bolt.Tx) error) (retErr error) { + lockPath(bdc.path) + defer unlockPath(bdc.path) + db, err := bolt.Open(bdc.path, 0600, nil) + if err != nil { + return err + } + defer func() { + if err := db.Close(); retErr == nil && err != nil { + retErr = err + } + }() + + return db.Update(fn) +} + +// uncompressedDigest implements BlobInfoCache.UncompressedDigest within the provided read-only transaction. +func (bdc *cache) uncompressedDigest(tx *bolt.Tx, anyDigest digest.Digest) digest.Digest { + if b := tx.Bucket(uncompressedDigestBucket); b != nil { + if uncompressedBytes := b.Get([]byte(anyDigest.String())); uncompressedBytes != nil { + d, err := digest.Parse(string(uncompressedBytes)) + if err == nil { + return d + } + // FIXME? Log err (but throttle the log volume on repeated accesses)? + } + } + // Presence in digestsByUncompressedBucket implies that anyDigest must already refer to an uncompressed digest. + // This way we don't have to waste storage space with trivial (uncompressed, uncompressed) mappings + // when we already record a (compressed, uncompressed) pair. + if b := tx.Bucket(digestByUncompressedBucket); b != nil { + if b = b.Bucket([]byte(anyDigest.String())); b != nil { + c := b.Cursor() + if k, _ := c.First(); k != nil { // The bucket is non-empty + return anyDigest + } + } + } + return "" +} + +// UncompressedDigest returns an uncompressed digest corresponding to anyDigest. +// May return anyDigest if it is known to be uncompressed. +// Returns "" if nothing is known about the digest (it may be compressed or uncompressed). +func (bdc *cache) UncompressedDigest(anyDigest digest.Digest) digest.Digest { + var res digest.Digest + if err := bdc.view(func(tx *bolt.Tx) error { + res = bdc.uncompressedDigest(tx, anyDigest) + return nil + }); err != nil { // Including os.IsNotExist(err) + return "" // FIXME? Log err (but throttle the log volume on repeated accesses)? + } + return res +} + +// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed. +// It’s allowed for anyDigest == uncompressed. +// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. +// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. +// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) +func (bdc *cache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) { + _ = bdc.update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucketIfNotExists(uncompressedDigestBucket) + if err != nil { + return err + } + key := []byte(anyDigest.String()) + if previousBytes := b.Get(key); previousBytes != nil { + previous, err := digest.Parse(string(previousBytes)) + if err != nil { + return err + } + if previous != uncompressed { + logrus.Warnf("Uncompressed digest for blob %s previously recorded as %s, now %s", anyDigest, previous, uncompressed) + } + } + if err := b.Put(key, []byte(uncompressed.String())); err != nil { + return err + } + + b, err = tx.CreateBucketIfNotExists(digestByUncompressedBucket) + if err != nil { + return err + } + b, err = b.CreateBucketIfNotExists([]byte(uncompressed.String())) + if err != nil { + return err + } + if err := b.Put([]byte(anyDigest.String()), []byte{}); err != nil { // Possibly writing the same []byte{} presence marker again. + return err + } + return nil + }) // FIXME? Log error (but throttle the log volume on repeated accesses)? +} + +// RecordDigestCompressorName records that the blob with digest anyDigest was compressed with the specified +// compressor, or is blobinfocache.Uncompressed. +// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. +// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. +// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) +func (bdc *cache) RecordDigestCompressorName(anyDigest digest.Digest, compressorName string) { + _ = bdc.update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucketIfNotExists(digestCompressorBucket) + if err != nil { + return err + } + key := []byte(anyDigest.String()) + if previousBytes := b.Get(key); previousBytes != nil { + if string(previousBytes) != compressorName { + logrus.Warnf("Compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, string(previousBytes), compressorName) + } + } + if compressorName == blobinfocache.UnknownCompression { + return b.Delete(key) + } + return b.Put(key, []byte(compressorName)) + }) // FIXME? Log error (but throttle the log volume on repeated accesses)? +} + +// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope, +// and can be reused given the opaque location data. +func (bdc *cache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) { + _ = bdc.update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucketIfNotExists(knownLocationsBucket) + if err != nil { + return err + } + b, err = b.CreateBucketIfNotExists([]byte(transport.Name())) + if err != nil { + return err + } + b, err = b.CreateBucketIfNotExists([]byte(scope.Opaque)) + if err != nil { + return err + } + b, err = b.CreateBucketIfNotExists([]byte(blobDigest.String())) + if err != nil { + return err + } + value, err := time.Now().MarshalBinary() + if err != nil { + return err + } + if err := b.Put([]byte(location.Opaque), value); err != nil { // Possibly overwriting an older entry. + return err + } + return nil + }) // FIXME? Log error (but throttle the log volume on repeated accesses)? +} + +// appendReplacementCandidates creates prioritize.CandidateWithTime values for digest in scopeBucket with corresponding compression info from compressionBucket (if compressionBucket is not nil), and returns the result of appending them to candidates. +func (bdc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, scopeBucket, compressionBucket *bolt.Bucket, digest digest.Digest, requireCompressionInfo bool) []prioritize.CandidateWithTime { + digestKey := []byte(digest.String()) + b := scopeBucket.Bucket(digestKey) + if b == nil { + return candidates + } + compressorName := blobinfocache.UnknownCompression + if compressionBucket != nil { + // the bucket won't exist if the cache was created by a v1 implementation and + // hasn't yet been updated by a v2 implementation + if compressorNameValue := compressionBucket.Get(digestKey); len(compressorNameValue) > 0 { + compressorName = string(compressorNameValue) + } + } + if compressorName == blobinfocache.UnknownCompression && requireCompressionInfo { + return candidates + } + _ = b.ForEach(func(k, v []byte) error { + t := time.Time{} + if err := t.UnmarshalBinary(v); err != nil { + return err + } + candidates = append(candidates, prioritize.CandidateWithTime{ + Candidate: blobinfocache.BICReplacementCandidate2{ + Digest: digest, + CompressorName: compressorName, + Location: types.BICLocationReference{Opaque: string(k)}, + }, + LastSeen: t, + }) + return nil + }) // FIXME? Log error (but throttle the log volume on repeated accesses)? + return candidates +} + +// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations that could possibly be reused +// within the specified (transport scope) (if they still exist, which is not guaranteed). +// +// If !canSubstitute, the returned candidates will match the submitted digest exactly; if canSubstitute, +// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same +// uncompressed digest. +func (bdc *cache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []blobinfocache.BICReplacementCandidate2 { + return bdc.candidateLocations(transport, scope, primaryDigest, canSubstitute, true) +} + +func (bdc *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute, requireCompressionInfo bool) []blobinfocache.BICReplacementCandidate2 { + res := []prioritize.CandidateWithTime{} + var uncompressedDigestValue digest.Digest // = "" + if err := bdc.view(func(tx *bolt.Tx) error { + scopeBucket := tx.Bucket(knownLocationsBucket) + if scopeBucket == nil { + return nil + } + scopeBucket = scopeBucket.Bucket([]byte(transport.Name())) + if scopeBucket == nil { + return nil + } + scopeBucket = scopeBucket.Bucket([]byte(scope.Opaque)) + if scopeBucket == nil { + return nil + } + // compressionBucket won't have been created if previous writers never recorded info about compression, + // and we don't want to fail just because of that + compressionBucket := tx.Bucket(digestCompressorBucket) + + res = bdc.appendReplacementCandidates(res, scopeBucket, compressionBucket, primaryDigest, requireCompressionInfo) + if canSubstitute { + if uncompressedDigestValue = bdc.uncompressedDigest(tx, primaryDigest); uncompressedDigestValue != "" { + b := tx.Bucket(digestByUncompressedBucket) + if b != nil { + b = b.Bucket([]byte(uncompressedDigestValue.String())) + if b != nil { + if err := b.ForEach(func(k, _ []byte) error { + d, err := digest.Parse(string(k)) + if err != nil { + return err + } + if d != primaryDigest && d != uncompressedDigestValue { + res = bdc.appendReplacementCandidates(res, scopeBucket, compressionBucket, d, requireCompressionInfo) + } + return nil + }); err != nil { + return err + } + } + } + if uncompressedDigestValue != primaryDigest { + res = bdc.appendReplacementCandidates(res, scopeBucket, compressionBucket, uncompressedDigestValue, requireCompressionInfo) + } + } + } + return nil + }); err != nil { // Including os.IsNotExist(err) + return []blobinfocache.BICReplacementCandidate2{} // FIXME? Log err (but throttle the log volume on repeated accesses)? + } + + return prioritize.DestructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigestValue) +} + +// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused +// within the specified (transport scope) (if they still exist, which is not guaranteed). +// +// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute, +// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same +// uncompressed digest. +func (bdc *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate { + return blobinfocache.CandidateLocationsFromV2(bdc.candidateLocations(transport, scope, primaryDigest, canSubstitute, false)) +} diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/default.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/default.go new file mode 100644 index 00000000000..83034b618d9 --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/default.go @@ -0,0 +1,66 @@ +package blobinfocache + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/containers/image/v5/internal/rootless" + "github.com/containers/image/v5/pkg/blobinfocache/boltdb" + "github.com/containers/image/v5/pkg/blobinfocache/memory" + "github.com/containers/image/v5/types" + "github.com/sirupsen/logrus" +) + +const ( + // blobInfoCacheFilename is the file name used for blob info caches. + // If the format changes in an incompatible way, increase the version number. + blobInfoCacheFilename = "blob-info-cache-v1.boltdb" + // systemBlobInfoCacheDir is the directory containing the blob info cache (in blobInfocacheFilename) for root-running processes. + systemBlobInfoCacheDir = "/var/lib/containers/cache" +) + +// blobInfoCacheDir returns a path to a blob info cache appropriate for sys and euid. +// euid is used so that (sudo …) does not write root-owned files into the unprivileged users’ home directory. +func blobInfoCacheDir(sys *types.SystemContext, euid int) (string, error) { + if sys != nil && sys.BlobInfoCacheDir != "" { + return sys.BlobInfoCacheDir, nil + } + + // FIXME? On Windows, os.Geteuid() returns -1. What should we do? Right now we treat it as unprivileged + // and fail (fall back to memory-only) if neither HOME nor XDG_DATA_HOME is set, which is, at least, safe. + if euid == 0 { + if sys != nil && sys.RootForImplicitAbsolutePaths != "" { + return filepath.Join(sys.RootForImplicitAbsolutePaths, systemBlobInfoCacheDir), nil + } + return systemBlobInfoCacheDir, nil + } + + // This is intended to mirror the GraphRoot determination in github.com/containers/libpod/pkg/util.GetRootlessStorageOpts. + dataDir := os.Getenv("XDG_DATA_HOME") + if dataDir == "" { + home := os.Getenv("HOME") + if home == "" { + return "", fmt.Errorf("neither XDG_DATA_HOME nor HOME was set non-empty") + } + dataDir = filepath.Join(home, ".local", "share") + } + return filepath.Join(dataDir, "containers", "cache"), nil +} + +// DefaultCache returns the default BlobInfoCache implementation appropriate for sys. +func DefaultCache(sys *types.SystemContext) types.BlobInfoCache { + dir, err := blobInfoCacheDir(sys, rootless.GetRootlessEUID()) + if err != nil { + logrus.Debugf("Error determining a location for %s, using a memory-only cache", blobInfoCacheFilename) + return memory.New() + } + path := filepath.Join(dir, blobInfoCacheFilename) + if err := os.MkdirAll(dir, 0700); err != nil { + logrus.Debugf("Error creating parent directories for %s, using a memory-only cache: %v", blobInfoCacheFilename, err) + return memory.New() + } + + logrus.Debugf("Using blob info cache at %s", path) + return boltdb.New(path) +} diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go new file mode 100644 index 00000000000..6f5506d94c1 --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go @@ -0,0 +1,110 @@ +// Package prioritize provides utilities for prioritizing locations in +// types.BlobInfoCache.CandidateLocations. +package prioritize + +import ( + "sort" + "time" + + "github.com/containers/image/v5/internal/blobinfocache" + "github.com/opencontainers/go-digest" +) + +// replacementAttempts is the number of blob replacement candidates returned by destructivelyPrioritizeReplacementCandidates, +// and therefore ultimately by types.BlobInfoCache.CandidateLocations. +// This is a heuristic/guess, and could well use a different value. +const replacementAttempts = 5 + +// CandidateWithTime is the input to types.BICReplacementCandidate prioritization. +type CandidateWithTime struct { + Candidate blobinfocache.BICReplacementCandidate2 // The replacement candidate + LastSeen time.Time // Time the candidate was last known to exist (either read or written) +} + +// candidateSortState is a local state implementing sort.Interface on candidates to prioritize, +// along with the specially-treated digest values for the implementation of sort.Interface.Less +type candidateSortState struct { + cs []CandidateWithTime // The entries to sort + primaryDigest digest.Digest // The digest the user actually asked for + uncompressedDigest digest.Digest // The uncompressed digest corresponding to primaryDigest. May be "", or even equal to primaryDigest +} + +func (css *candidateSortState) Len() int { + return len(css.cs) +} + +func (css *candidateSortState) Less(i, j int) bool { + xi := css.cs[i] + xj := css.cs[j] + + // primaryDigest entries come first, more recent first. + // uncompressedDigest entries, if uncompressedDigest is set and != primaryDigest, come last, more recent entry first. + // Other digest values are primarily sorted by time (more recent first), secondarily by digest (to provide a deterministic order) + + // First, deal with the primaryDigest/uncompressedDigest cases: + if xi.Candidate.Digest != xj.Candidate.Digest { + // - The two digests are different, and one (or both) of the digests is primaryDigest or uncompressedDigest: time does not matter + if xi.Candidate.Digest == css.primaryDigest { + return true + } + if xj.Candidate.Digest == css.primaryDigest { + return false + } + if css.uncompressedDigest != "" { + if xi.Candidate.Digest == css.uncompressedDigest { + return false + } + if xj.Candidate.Digest == css.uncompressedDigest { + return true + } + } + } else { // xi.Candidate.Digest == xj.Candidate.Digest + // The two digests are the same, and are either primaryDigest or uncompressedDigest: order by time + if xi.Candidate.Digest == css.primaryDigest || (css.uncompressedDigest != "" && xi.Candidate.Digest == css.uncompressedDigest) { + return xi.LastSeen.After(xj.LastSeen) + } + } + + // Neither of the digests are primaryDigest/uncompressedDigest: + if !xi.LastSeen.Equal(xj.LastSeen) { // Order primarily by time + return xi.LastSeen.After(xj.LastSeen) + } + // Fall back to digest, if timestamps end up _exactly_ the same (how?!) + return xi.Candidate.Digest < xj.Candidate.Digest +} + +func (css *candidateSortState) Swap(i, j int) { + css.cs[i], css.cs[j] = css.cs[j], css.cs[i] +} + +// destructivelyPrioritizeReplacementCandidatesWithMax is destructivelyPrioritizeReplacementCandidates with a parameter for the +// number of entries to limit, only to make testing simpler. +func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest, maxCandidates int) []blobinfocache.BICReplacementCandidate2 { + // We don't need to use sort.Stable() because nanosecond timestamps are (presumably?) unique, so no two elements should + // compare equal. + sort.Sort(&candidateSortState{ + cs: cs, + primaryDigest: primaryDigest, + uncompressedDigest: uncompressedDigest, + }) + + resLength := len(cs) + if resLength > maxCandidates { + resLength = maxCandidates + } + res := make([]blobinfocache.BICReplacementCandidate2, resLength) + for i := range res { + res[i] = cs[i].Candidate + } + return res +} + +// DestructivelyPrioritizeReplacementCandidates consumes AND DESTROYS an array of possible replacement candidates with their last known existence times, +// the primary digest the user actually asked for, and the corresponding uncompressed digest (if known, possibly equal to the primary digest), +// and returns an appropriately prioritized and/or trimmed result suitable for a return value from types.BlobInfoCache.CandidateLocations. +// +// WARNING: The array of candidates is destructively modified. (The implementation of this function could of course +// make a copy, but all CandidateLocations implementations build the slice of candidates only for the single purpose of calling this function anyway.) +func DestructivelyPrioritizeReplacementCandidates(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest) []blobinfocache.BICReplacementCandidate2 { + return destructivelyPrioritizeReplacementCandidatesWithMax(cs, primaryDigest, uncompressedDigest, replacementAttempts) +} diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go new file mode 100644 index 00000000000..426640366fc --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go @@ -0,0 +1,186 @@ +// Package memory implements an in-memory BlobInfoCache. +package memory + +import ( + "sync" + "time" + + "github.com/containers/image/v5/internal/blobinfocache" + "github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize" + "github.com/containers/image/v5/types" + digest "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" +) + +// locationKey only exists to make lookup in knownLocations easier. +type locationKey struct { + transport string + scope types.BICTransportScope + blobDigest digest.Digest +} + +// cache implements an in-memory-only BlobInfoCache +type cache struct { + mutex sync.Mutex + // The following fields can only be accessed with mutex held. + uncompressedDigests map[digest.Digest]digest.Digest + digestsByUncompressed map[digest.Digest]map[digest.Digest]struct{} // stores a set of digests for each uncompressed digest + knownLocations map[locationKey]map[types.BICLocationReference]time.Time // stores last known existence time for each location reference + compressors map[digest.Digest]string // stores a compressor name, or blobinfocache.Unknown, for each digest +} + +// New returns a BlobInfoCache implementation which is in-memory only. +// +// This is primarily intended for tests, but also used as a fallback +// if blobinfocache.DefaultCache can’t determine, or set up, the +// location for a persistent cache. Most users should use +// blobinfocache.DefaultCache. instead of calling this directly. +// Manual users of types.{ImageSource,ImageDestination} might also use +// this instead of a persistent cache. +func New() types.BlobInfoCache { + return new2() +} + +func new2() *cache { + return &cache{ + uncompressedDigests: map[digest.Digest]digest.Digest{}, + digestsByUncompressed: map[digest.Digest]map[digest.Digest]struct{}{}, + knownLocations: map[locationKey]map[types.BICLocationReference]time.Time{}, + compressors: map[digest.Digest]string{}, + } +} + +// UncompressedDigest returns an uncompressed digest corresponding to anyDigest. +// May return anyDigest if it is known to be uncompressed. +// Returns "" if nothing is known about the digest (it may be compressed or uncompressed). +func (mem *cache) UncompressedDigest(anyDigest digest.Digest) digest.Digest { + mem.mutex.Lock() + defer mem.mutex.Unlock() + return mem.uncompressedDigestLocked(anyDigest) +} + +// uncompressedDigestLocked implements types.BlobInfoCache.UncompressedDigest, but must be called only with mem.mutex held. +func (mem *cache) uncompressedDigestLocked(anyDigest digest.Digest) digest.Digest { + if d, ok := mem.uncompressedDigests[anyDigest]; ok { + return d + } + // Presence in digestsByUncompressed implies that anyDigest must already refer to an uncompressed digest. + // This way we don't have to waste storage space with trivial (uncompressed, uncompressed) mappings + // when we already record a (compressed, uncompressed) pair. + if m, ok := mem.digestsByUncompressed[anyDigest]; ok && len(m) > 0 { + return anyDigest + } + return "" +} + +// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed. +// It’s allowed for anyDigest == uncompressed. +// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. +// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. +// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) +func (mem *cache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) { + mem.mutex.Lock() + defer mem.mutex.Unlock() + if previous, ok := mem.uncompressedDigests[anyDigest]; ok && previous != uncompressed { + logrus.Warnf("Uncompressed digest for blob %s previously recorded as %s, now %s", anyDigest, previous, uncompressed) + } + mem.uncompressedDigests[anyDigest] = uncompressed + + anyDigestSet, ok := mem.digestsByUncompressed[uncompressed] + if !ok { + anyDigestSet = map[digest.Digest]struct{}{} + mem.digestsByUncompressed[uncompressed] = anyDigestSet + } + anyDigestSet[anyDigest] = struct{}{} // Possibly writing the same struct{}{} presence marker again. +} + +// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope, +// and can be reused given the opaque location data. +func (mem *cache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) { + mem.mutex.Lock() + defer mem.mutex.Unlock() + key := locationKey{transport: transport.Name(), scope: scope, blobDigest: blobDigest} + locationScope, ok := mem.knownLocations[key] + if !ok { + locationScope = map[types.BICLocationReference]time.Time{} + mem.knownLocations[key] = locationScope + } + locationScope[location] = time.Now() // Possibly overwriting an older entry. +} + +// RecordDigestCompressorName records that the blob with the specified digest is either compressed with the specified +// algorithm, or uncompressed, or that we no longer know. +func (mem *cache) RecordDigestCompressorName(blobDigest digest.Digest, compressorName string) { + mem.mutex.Lock() + defer mem.mutex.Unlock() + if compressorName == blobinfocache.UnknownCompression { + delete(mem.compressors, blobDigest) + return + } + mem.compressors[blobDigest] = compressorName +} + +// appendReplacementCandidates creates prioritize.CandidateWithTime values for (transport, scope, digest), and returns the result of appending them to candidates. +func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, requireCompressionInfo bool) []prioritize.CandidateWithTime { + locations := mem.knownLocations[locationKey{transport: transport.Name(), scope: scope, blobDigest: digest}] // nil if not present + for l, t := range locations { + compressorName, compressorKnown := mem.compressors[digest] + if !compressorKnown { + if requireCompressionInfo { + continue + } + compressorName = blobinfocache.UnknownCompression + } + candidates = append(candidates, prioritize.CandidateWithTime{ + Candidate: blobinfocache.BICReplacementCandidate2{ + Digest: digest, + CompressorName: compressorName, + Location: l, + }, + LastSeen: t, + }) + } + return candidates +} + +// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused +// within the specified (transport scope) (if they still exist, which is not guaranteed). +// +// If !canSubstitute, the returned candidates will match the submitted digest exactly; if canSubstitute, +// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same +// uncompressed digest. +func (mem *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate { + return blobinfocache.CandidateLocationsFromV2(mem.candidateLocations(transport, scope, primaryDigest, canSubstitute, false)) +} + +// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations that could possibly be reused +// within the specified (transport scope) (if they still exist, which is not guaranteed). +// +// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute, +// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same +// uncompressed digest. +func (mem *cache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []blobinfocache.BICReplacementCandidate2 { + return mem.candidateLocations(transport, scope, primaryDigest, canSubstitute, true) +} + +func (mem *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute, requireCompressionInfo bool) []blobinfocache.BICReplacementCandidate2 { + mem.mutex.Lock() + defer mem.mutex.Unlock() + res := []prioritize.CandidateWithTime{} + res = mem.appendReplacementCandidates(res, transport, scope, primaryDigest, requireCompressionInfo) + var uncompressedDigest digest.Digest // = "" + if canSubstitute { + if uncompressedDigest = mem.uncompressedDigestLocked(primaryDigest); uncompressedDigest != "" { + otherDigests := mem.digestsByUncompressed[uncompressedDigest] // nil if not present in the map + for d := range otherDigests { + if d != primaryDigest && d != uncompressedDigest { + res = mem.appendReplacementCandidates(res, transport, scope, d, requireCompressionInfo) + } + } + if uncompressedDigest != primaryDigest { + res = mem.appendReplacementCandidates(res, transport, scope, uncompressedDigest, requireCompressionInfo) + } + } + } + return prioritize.DestructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigest) +} diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/none/none.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/none/none.go new file mode 100644 index 00000000000..4b7122f9285 --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/none/none.go @@ -0,0 +1,50 @@ +// Package none implements a dummy BlobInfoCache which records no data. +package none + +import ( + "github.com/containers/image/v5/internal/blobinfocache" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" +) + +// noCache implements a dummy BlobInfoCache which records no data. +type noCache struct { +} + +// NoCache implements BlobInfoCache by not recording any data. +// +// This exists primarily for implementations of configGetter for +// Manifest.Inspect, because configs only have one representation. +// Any use of BlobInfoCache with blobs should usually use at least a +// short-lived cache, ideally blobinfocache.DefaultCache. +var NoCache blobinfocache.BlobInfoCache2 = blobinfocache.FromBlobInfoCache(&noCache{}) + +// UncompressedDigest returns an uncompressed digest corresponding to anyDigest. +// May return anyDigest if it is known to be uncompressed. +// Returns "" if nothing is known about the digest (it may be compressed or uncompressed). +func (noCache) UncompressedDigest(anyDigest digest.Digest) digest.Digest { + return "" +} + +// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed. +// It’s allowed for anyDigest == uncompressed. +// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. +// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. +// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) +func (noCache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) { +} + +// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope, +// and can be reused given the opaque location data. +func (noCache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) { +} + +// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused +// within the specified (transport scope) (if they still exist, which is not guaranteed). +// +// If !canSubstitute, the returned candidates will match the submitted digest exactly; if canSubstitute, +// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same +// uncompressed digest. +func (noCache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate { + return nil +} diff --git a/vendor/github.com/containers/image/v5/pkg/compression/compression.go b/vendor/github.com/containers/image/v5/pkg/compression/compression.go new file mode 100644 index 00000000000..c28e8179296 --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/compression/compression.go @@ -0,0 +1,167 @@ +package compression + +import ( + "bytes" + "compress/bzip2" + "fmt" + "io" + "io/ioutil" + + "github.com/containers/image/v5/pkg/compression/internal" + "github.com/containers/image/v5/pkg/compression/types" + "github.com/containers/storage/pkg/chunked/compressor" + "github.com/klauspost/pgzip" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/ulikunitz/xz" +) + +// Algorithm is a compression algorithm that can be used for CompressStream. +type Algorithm = types.Algorithm + +var ( + // Gzip compression. + Gzip = internal.NewAlgorithm(types.GzipAlgorithmName, types.GzipAlgorithmName, + []byte{0x1F, 0x8B, 0x08}, GzipDecompressor, gzipCompressor) + // Bzip2 compression. + Bzip2 = internal.NewAlgorithm(types.Bzip2AlgorithmName, types.Bzip2AlgorithmName, + []byte{0x42, 0x5A, 0x68}, Bzip2Decompressor, bzip2Compressor) + // Xz compression. + Xz = internal.NewAlgorithm(types.XzAlgorithmName, types.XzAlgorithmName, + []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, XzDecompressor, xzCompressor) + // Zstd compression. + Zstd = internal.NewAlgorithm(types.ZstdAlgorithmName, types.ZstdAlgorithmName, + []byte{0x28, 0xb5, 0x2f, 0xfd}, ZstdDecompressor, zstdCompressor) + // Zstd:chunked compression. + ZstdChunked = internal.NewAlgorithm(types.ZstdChunkedAlgorithmName, types.ZstdAlgorithmName, /* Note: InternalUnstableUndocumentedMIMEQuestionMark is not ZstdChunkedAlgorithmName */ + nil, ZstdDecompressor, compressor.ZstdCompressor) + + compressionAlgorithms = map[string]Algorithm{ + Gzip.Name(): Gzip, + Bzip2.Name(): Bzip2, + Xz.Name(): Xz, + Zstd.Name(): Zstd, + ZstdChunked.Name(): ZstdChunked, + } +) + +// AlgorithmByName returns the compressor by its name +func AlgorithmByName(name string) (Algorithm, error) { + algorithm, ok := compressionAlgorithms[name] + if ok { + return algorithm, nil + } + return Algorithm{}, fmt.Errorf("cannot find compressor for %q", name) +} + +// DecompressorFunc returns the decompressed stream, given a compressed stream. +// The caller must call Close() on the decompressed stream (even if the compressed input stream does not need closing!). +type DecompressorFunc = internal.DecompressorFunc + +// GzipDecompressor is a DecompressorFunc for the gzip compression algorithm. +func GzipDecompressor(r io.Reader) (io.ReadCloser, error) { + return pgzip.NewReader(r) +} + +// Bzip2Decompressor is a DecompressorFunc for the bzip2 compression algorithm. +func Bzip2Decompressor(r io.Reader) (io.ReadCloser, error) { + return ioutil.NopCloser(bzip2.NewReader(r)), nil +} + +// XzDecompressor is a DecompressorFunc for the xz compression algorithm. +func XzDecompressor(r io.Reader) (io.ReadCloser, error) { + r, err := xz.NewReader(r) + if err != nil { + return nil, err + } + return ioutil.NopCloser(r), nil +} + +// gzipCompressor is a CompressorFunc for the gzip compression algorithm. +func gzipCompressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) { + if level != nil { + return pgzip.NewWriterLevel(r, *level) + } + return pgzip.NewWriter(r), nil +} + +// bzip2Compressor is a CompressorFunc for the bzip2 compression algorithm. +func bzip2Compressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) { + return nil, fmt.Errorf("bzip2 compression not supported") +} + +// xzCompressor is a CompressorFunc for the xz compression algorithm. +func xzCompressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) { + return xz.NewWriter(r) +} + +// CompressStream returns the compressor by its name +func CompressStream(dest io.Writer, algo Algorithm, level *int) (io.WriteCloser, error) { + m := map[string]string{} + return internal.AlgorithmCompressor(algo)(dest, m, level) +} + +// CompressStreamWithMetadata returns the compressor by its name. If the compression +// generates any metadata, it is written to the provided metadata map. +func CompressStreamWithMetadata(dest io.Writer, metadata map[string]string, algo Algorithm, level *int) (io.WriteCloser, error) { + return internal.AlgorithmCompressor(algo)(dest, metadata, level) +} + +// DetectCompressionFormat returns an Algorithm and DecompressorFunc if the input is recognized as a compressed format, an invalid +// value and nil otherwise. +// Because it consumes the start of input, other consumers must use the returned io.Reader instead to also read from the beginning. +func DetectCompressionFormat(input io.Reader) (Algorithm, DecompressorFunc, io.Reader, error) { + buffer := [8]byte{} + + n, err := io.ReadAtLeast(input, buffer[:], len(buffer)) + if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { + // This is a “real” error. We could just ignore it this time, process the data we have, and hope that the source will report the same error again. + // Instead, fail immediately with the original error cause instead of a possibly secondary/misleading error returned later. + return Algorithm{}, nil, nil, err + } + + var retAlgo Algorithm + var decompressor DecompressorFunc + for _, algo := range compressionAlgorithms { + prefix := internal.AlgorithmPrefix(algo) + if len(prefix) > 0 && bytes.HasPrefix(buffer[:n], prefix) { + logrus.Debugf("Detected compression format %s", algo.Name()) + retAlgo = algo + decompressor = internal.AlgorithmDecompressor(algo) + break + } + } + if decompressor == nil { + logrus.Debugf("No compression detected") + } + + return retAlgo, decompressor, io.MultiReader(bytes.NewReader(buffer[:n]), input), nil +} + +// DetectCompression returns a DecompressorFunc if the input is recognized as a compressed format, nil otherwise. +// Because it consumes the start of input, other consumers must use the returned io.Reader instead to also read from the beginning. +func DetectCompression(input io.Reader) (DecompressorFunc, io.Reader, error) { + _, d, r, e := DetectCompressionFormat(input) + return d, r, e +} + +// AutoDecompress takes a stream and returns an uncompressed version of the +// same stream. +// The caller must call Close() on the returned stream (even if the input does not need, +// or does not even support, closing!). +func AutoDecompress(stream io.Reader) (io.ReadCloser, bool, error) { + decompressor, stream, err := DetectCompression(stream) + if err != nil { + return nil, false, errors.Wrapf(err, "detecting compression") + } + var res io.ReadCloser + if decompressor != nil { + res, err = decompressor(stream) + if err != nil { + return nil, false, errors.Wrapf(err, "initializing decompression") + } + } else { + res = ioutil.NopCloser(stream) + } + return res, decompressor != nil, nil +} diff --git a/vendor/github.com/containers/image/v5/pkg/compression/internal/types.go b/vendor/github.com/containers/image/v5/pkg/compression/internal/types.go new file mode 100644 index 00000000000..fb37ca31790 --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/compression/internal/types.go @@ -0,0 +1,65 @@ +package internal + +import "io" + +// CompressorFunc writes the compressed stream to the given writer using the specified compression level. +// The caller must call Close() on the stream (even if the input stream does not need closing!). +type CompressorFunc func(io.Writer, map[string]string, *int) (io.WriteCloser, error) + +// DecompressorFunc returns the decompressed stream, given a compressed stream. +// The caller must call Close() on the decompressed stream (even if the compressed input stream does not need closing!). +type DecompressorFunc func(io.Reader) (io.ReadCloser, error) + +// Algorithm is a compression algorithm that can be used for CompressStream. +type Algorithm struct { + name string + mime string + prefix []byte // Initial bytes of a stream compressed using this algorithm, or empty to disable detection. + decompressor DecompressorFunc + compressor CompressorFunc +} + +// NewAlgorithm creates an Algorithm instance. +// This function exists so that Algorithm instances can only be created by code that +// is allowed to import this internal subpackage. +func NewAlgorithm(name, mime string, prefix []byte, decompressor DecompressorFunc, compressor CompressorFunc) Algorithm { + return Algorithm{ + name: name, + mime: mime, + prefix: prefix, + decompressor: decompressor, + compressor: compressor, + } +} + +// Name returns the name for the compression algorithm. +func (c Algorithm) Name() string { + return c.name +} + +// InternalUnstableUndocumentedMIMEQuestionMark ??? +// DO NOT USE THIS anywhere outside of c/image until it is properly documented. +func (c Algorithm) InternalUnstableUndocumentedMIMEQuestionMark() string { + return c.mime +} + +// AlgorithmCompressor returns the compressor field of algo. +// This is a function instead of a public method so that it is only callable from by code +// that is allowed to import this internal subpackage. +func AlgorithmCompressor(algo Algorithm) CompressorFunc { + return algo.compressor +} + +// AlgorithmDecompressor returns the decompressor field of algo. +// This is a function instead of a public method so that it is only callable from by code +// that is allowed to import this internal subpackage. +func AlgorithmDecompressor(algo Algorithm) DecompressorFunc { + return algo.decompressor +} + +// AlgorithmPrefix returns the prefix field of algo. +// This is a function instead of a public method so that it is only callable from by code +// that is allowed to import this internal subpackage. +func AlgorithmPrefix(algo Algorithm) []byte { + return algo.prefix +} diff --git a/vendor/github.com/containers/image/v5/pkg/compression/types/types.go b/vendor/github.com/containers/image/v5/pkg/compression/types/types.go new file mode 100644 index 00000000000..43d03b601c4 --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/compression/types/types.go @@ -0,0 +1,41 @@ +package types + +import ( + "github.com/containers/image/v5/pkg/compression/internal" +) + +// DecompressorFunc returns the decompressed stream, given a compressed stream. +// The caller must call Close() on the decompressed stream (even if the compressed input stream does not need closing!). +type DecompressorFunc = internal.DecompressorFunc + +// Algorithm is a compression algorithm provided and supported by pkg/compression. +// It can’t be supplied from the outside. +type Algorithm = internal.Algorithm + +const ( + // GzipAlgorithmName is the name used by pkg/compression.Gzip. + // NOTE: Importing only this /types package does not inherently guarantee a Gzip algorithm + // will actually be available. (In fact it is intended for this types package not to depend + // on any of the implementations.) + GzipAlgorithmName = "gzip" + // Bzip2AlgorithmName is the name used by pkg/compression.Bzip2. + // NOTE: Importing only this /types package does not inherently guarantee a Bzip2 algorithm + // will actually be available. (In fact it is intended for this types package not to depend + // on any of the implementations.) + Bzip2AlgorithmName = "bzip2" + // XzAlgorithmName is the name used by pkg/compression.Xz. + // NOTE: Importing only this /types package does not inherently guarantee a Xz algorithm + // will actually be available. (In fact it is intended for this types package not to depend + // on any of the implementations.) + XzAlgorithmName = "Xz" + // ZstdAlgorithmName is the name used by pkg/compression.Zstd. + // NOTE: Importing only this /types package does not inherently guarantee a Zstd algorithm + // will actually be available. (In fact it is intended for this types package not to depend + // on any of the implementations.) + ZstdAlgorithmName = "zstd" + // ZstdChunkedAlgorithmName is the name used by pkg/compression.ZstdChunked. + // NOTE: Importing only this /types package does not inherently guarantee a ZstdChunked algorithm + // will actually be available. (In fact it is intended for this types package not to depend + // on any of the implementations.) + ZstdChunkedAlgorithmName = "zstd:chunked" +) diff --git a/vendor/github.com/containers/image/v5/pkg/compression/zstd.go b/vendor/github.com/containers/image/v5/pkg/compression/zstd.go new file mode 100644 index 00000000000..39ae014d2e3 --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/compression/zstd.go @@ -0,0 +1,59 @@ +package compression + +import ( + "io" + + "github.com/klauspost/compress/zstd" +) + +type wrapperZstdDecoder struct { + decoder *zstd.Decoder +} + +func (w *wrapperZstdDecoder) Close() error { + w.decoder.Close() + return nil +} + +func (w *wrapperZstdDecoder) DecodeAll(input, dst []byte) ([]byte, error) { + return w.decoder.DecodeAll(input, dst) +} + +func (w *wrapperZstdDecoder) Read(p []byte) (int, error) { + return w.decoder.Read(p) +} + +func (w *wrapperZstdDecoder) Reset(r io.Reader) error { + return w.decoder.Reset(r) +} + +func (w *wrapperZstdDecoder) WriteTo(wr io.Writer) (int64, error) { + return w.decoder.WriteTo(wr) +} + +func zstdReader(buf io.Reader) (io.ReadCloser, error) { + decoder, err := zstd.NewReader(buf) + return &wrapperZstdDecoder{decoder: decoder}, err +} + +func zstdWriter(dest io.Writer) (io.WriteCloser, error) { + return zstd.NewWriter(dest) +} + +func zstdWriterWithLevel(dest io.Writer, level int) (*zstd.Encoder, error) { + el := zstd.EncoderLevelFromZstd(level) + return zstd.NewWriter(dest, zstd.WithEncoderLevel(el)) +} + +// zstdCompressor is a CompressorFunc for the zstd compression algorithm. +func zstdCompressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) { + if level == nil { + return zstdWriter(r) + } + return zstdWriterWithLevel(r, *level) +} + +// ZstdDecompressor is a DecompressorFunc for the zstd compression algorithm. +func ZstdDecompressor(r io.Reader) (io.ReadCloser, error) { + return zstdReader(r) +} diff --git a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go new file mode 100644 index 00000000000..1d73dc405e7 --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go @@ -0,0 +1,802 @@ +package config + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/pkg/sysregistriesv2" + "github.com/containers/image/v5/types" + "github.com/containers/storage/pkg/homedir" + helperclient "github.com/docker/docker-credential-helpers/client" + "github.com/docker/docker-credential-helpers/credentials" + "github.com/hashicorp/go-multierror" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +type dockerAuthConfig struct { + Auth string `json:"auth,omitempty"` + IdentityToken string `json:"identitytoken,omitempty"` +} + +type dockerConfigFile struct { + AuthConfigs map[string]dockerAuthConfig `json:"auths"` + CredHelpers map[string]string `json:"credHelpers,omitempty"` +} + +type authPath struct { + path string + legacyFormat bool +} + +var ( + defaultPerUIDPathFormat = filepath.FromSlash("/run/containers/%d/auth.json") + xdgConfigHomePath = filepath.FromSlash("containers/auth.json") + xdgRuntimeDirPath = filepath.FromSlash("containers/auth.json") + dockerHomePath = filepath.FromSlash(".docker/config.json") + dockerLegacyHomePath = ".dockercfg" + nonLinuxAuthFilePath = filepath.FromSlash(".config/containers/auth.json") + + // ErrNotLoggedIn is returned for users not logged into a registry + // that they are trying to logout of + ErrNotLoggedIn = errors.New("not logged in") + // ErrNotSupported is returned for unsupported methods + ErrNotSupported = errors.New("not supported") +) + +// SetCredentials stores the username and password in a location +// appropriate for sys and the users’ configuration. +// A valid key is a repository, a namespace within a registry, or a registry hostname; +// using forms other than just a registry may fail depending on configuration. +// Returns a human-redable description of the location that was updated. +// NOTE: The return value is only intended to be read by humans; its form is not an API, +// it may change (or new forms can be added) any time. +func SetCredentials(sys *types.SystemContext, key, username, password string) (string, error) { + isNamespaced, err := validateKey(key) + if err != nil { + return "", err + } + + helpers, err := sysregistriesv2.CredentialHelpers(sys) + if err != nil { + return "", err + } + + // Make sure to collect all errors. + var multiErr error + for _, helper := range helpers { + var desc string + var err error + switch helper { + // Special-case the built-in helpers for auth files. + case sysregistriesv2.AuthenticationFileHelper: + desc, err = modifyJSON(sys, func(auths *dockerConfigFile) (bool, error) { + if ch, exists := auths.CredHelpers[key]; exists { + if isNamespaced { + return false, unsupportedNamespaceErr(ch) + } + return false, setAuthToCredHelper(ch, key, username, password) + } + creds := base64.StdEncoding.EncodeToString([]byte(username + ":" + password)) + newCreds := dockerAuthConfig{Auth: creds} + auths.AuthConfigs[key] = newCreds + return true, nil + }) + // External helpers. + default: + if isNamespaced { + err = unsupportedNamespaceErr(helper) + } else { + desc = fmt.Sprintf("credential helper: %s", helper) + err = setAuthToCredHelper(helper, key, username, password) + } + } + if err != nil { + multiErr = multierror.Append(multiErr, err) + logrus.Debugf("Error storing credentials for %s in credential helper %s: %v", key, helper, err) + continue + } + logrus.Debugf("Stored credentials for %s in credential helper %s", key, helper) + return desc, nil + } + return "", multiErr +} + +func unsupportedNamespaceErr(helper string) error { + return errors.Errorf("namespaced key is not supported for credential helper %s", helper) +} + +// SetAuthentication stores the username and password in the credential helper or file +// See the documentation of SetCredentials for format of "key" +func SetAuthentication(sys *types.SystemContext, key, username, password string) error { + _, err := SetCredentials(sys, key, username, password) + return err +} + +// GetAllCredentials returns the registry credentials for all registries stored +// in any of the configured credential helpers. +func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthConfig, error) { + // To keep things simple, let's first extract all registries from all + // possible sources, and then call `GetCredentials` on them. That + // prevents us from having to reverse engineer the logic in + // `GetCredentials`. + allKeys := make(map[string]bool) + addKey := func(s string) { + allKeys[s] = true + } + + // To use GetCredentials, we must at least convert the URL forms into host names. + // While we're at it, we’ll also canonicalize docker.io to the standard format. + normalizedDockerIORegistry := normalizeRegistry("docker.io") + + helpers, err := sysregistriesv2.CredentialHelpers(sys) + if err != nil { + return nil, err + } + for _, helper := range helpers { + switch helper { + // Special-case the built-in helper for auth files. + case sysregistriesv2.AuthenticationFileHelper: + for _, path := range getAuthFilePaths(sys, homedir.Get()) { + // readJSONFile returns an empty map in case the path doesn't exist. + auths, err := readJSONFile(path.path, path.legacyFormat) + if err != nil { + return nil, errors.Wrapf(err, "reading JSON file %q", path.path) + } + // Credential helpers in the auth file have a + // direct mapping to a registry, so we can just + // walk the map. + for registry := range auths.CredHelpers { + addKey(registry) + } + for key := range auths.AuthConfigs { + key := normalizeAuthFileKey(key, path.legacyFormat) + if key == normalizedDockerIORegistry { + key = "docker.io" + } + addKey(key) + } + } + // External helpers. + default: + creds, err := listAuthsFromCredHelper(helper) + if err != nil { + logrus.Debugf("Error listing credentials stored in credential helper %s: %v", helper, err) + } + switch errors.Cause(err) { + case nil: + for registry := range creds { + addKey(registry) + } + case exec.ErrNotFound: + // It's okay if the helper doesn't exist. + default: + return nil, err + } + } + } + + // Now use `GetCredentials` to the specific auth configs for each + // previously listed registry. + authConfigs := make(map[string]types.DockerAuthConfig) + for key := range allKeys { + authConf, err := GetCredentials(sys, key) + if err != nil { + // Note: we rely on the logging in `GetCredentials`. + return nil, err + } + if authConf != (types.DockerAuthConfig{}) { + authConfigs[key] = authConf + } + } + + return authConfigs, nil +} + +// getAuthFilePaths returns a slice of authPaths based on the system context +// in the order they should be searched. Note that some paths may not exist. +// The homeDir parameter should always be homedir.Get(), and is only intended to be overridden +// by tests. +func getAuthFilePaths(sys *types.SystemContext, homeDir string) []authPath { + paths := []authPath{} + pathToAuth, lf, err := getPathToAuth(sys) + if err == nil { + paths = append(paths, authPath{path: pathToAuth, legacyFormat: lf}) + } else { + // Error means that the path set for XDG_RUNTIME_DIR does not exist + // but we don't want to completely fail in the case that the user is pulling a public image + // Logging the error as a warning instead and moving on to pulling the image + logrus.Warnf("%v: Trying to pull image in the event that it is a public image.", err) + } + xdgCfgHome := os.Getenv("XDG_CONFIG_HOME") + if xdgCfgHome == "" { + xdgCfgHome = filepath.Join(homeDir, ".config") + } + paths = append(paths, authPath{path: filepath.Join(xdgCfgHome, xdgConfigHomePath), legacyFormat: false}) + if dockerConfig := os.Getenv("DOCKER_CONFIG"); dockerConfig != "" { + paths = append(paths, + authPath{path: filepath.Join(dockerConfig, "config.json"), legacyFormat: false}, + ) + } else { + paths = append(paths, + authPath{path: filepath.Join(homeDir, dockerHomePath), legacyFormat: false}, + ) + } + paths = append(paths, + authPath{path: filepath.Join(homeDir, dockerLegacyHomePath), legacyFormat: true}, + ) + return paths +} + +// GetCredentials returns the registry credentials matching key, appropriate for +// sys and the users’ configuration. +// If an entry is not found, an empty struct is returned. +// A valid key is a repository, a namespace within a registry, or a registry hostname. +// +// GetCredentialsForRef should almost always be used in favor of this API. +func GetCredentials(sys *types.SystemContext, key string) (types.DockerAuthConfig, error) { + return getCredentialsWithHomeDir(sys, key, homedir.Get()) +} + +// GetCredentialsForRef returns the registry credentials necessary for +// accessing ref on the registry ref points to, +// appropriate for sys and the users’ configuration. +// If an entry is not found, an empty struct is returned. +func GetCredentialsForRef(sys *types.SystemContext, ref reference.Named) (types.DockerAuthConfig, error) { + return getCredentialsWithHomeDir(sys, ref.Name(), homedir.Get()) +} + +// getCredentialsWithHomeDir is an internal implementation detail of +// GetCredentialsForRef and GetCredentials. It exists only to allow testing it +// with an artificial home directory. +func getCredentialsWithHomeDir(sys *types.SystemContext, key, homeDir string) (types.DockerAuthConfig, error) { + _, err := validateKey(key) + if err != nil { + return types.DockerAuthConfig{}, err + } + + if sys != nil && sys.DockerAuthConfig != nil { + logrus.Debugf("Returning credentials for %s from DockerAuthConfig", key) + return *sys.DockerAuthConfig, nil + } + + var registry string // We compute this once because it is used in several places. + if firstSlash := strings.IndexRune(key, '/'); firstSlash != -1 { + registry = key[:firstSlash] + } else { + registry = key + } + + // Anonymous function to query credentials from auth files. + getCredentialsFromAuthFiles := func() (types.DockerAuthConfig, string, error) { + for _, path := range getAuthFilePaths(sys, homeDir) { + authConfig, err := findCredentialsInFile(key, registry, path.path, path.legacyFormat) + if err != nil { + return types.DockerAuthConfig{}, "", err + } + + if authConfig != (types.DockerAuthConfig{}) { + return authConfig, path.path, nil + } + } + return types.DockerAuthConfig{}, "", nil + } + + helpers, err := sysregistriesv2.CredentialHelpers(sys) + if err != nil { + return types.DockerAuthConfig{}, err + } + + var multiErr error + for _, helper := range helpers { + var ( + creds types.DockerAuthConfig + helperKey string + credHelperPath string + err error + ) + switch helper { + // Special-case the built-in helper for auth files. + case sysregistriesv2.AuthenticationFileHelper: + helperKey = key + creds, credHelperPath, err = getCredentialsFromAuthFiles() + // External helpers. + default: + // This intentionally uses "registry", not "key"; we don't support namespaced + // credentials in helpers, but a "registry" is a valid parent of "key". + helperKey = registry + creds, err = getAuthFromCredHelper(helper, registry) + } + if err != nil { + logrus.Debugf("Error looking up credentials for %s in credential helper %s: %v", helperKey, helper, err) + multiErr = multierror.Append(multiErr, err) + continue + } + if creds != (types.DockerAuthConfig{}) { + msg := fmt.Sprintf("Found credentials for %s in credential helper %s", helperKey, helper) + if credHelperPath != "" { + msg = fmt.Sprintf("%s in file %s", msg, credHelperPath) + } + logrus.Debug(msg) + return creds, nil + } + } + if multiErr != nil { + return types.DockerAuthConfig{}, multiErr + } + + logrus.Debugf("No credentials for %s found", key) + return types.DockerAuthConfig{}, nil +} + +// GetAuthentication returns the registry credentials matching key, appropriate for +// sys and the users’ configuration. +// If an entry is not found, an empty struct is returned. +// A valid key is a repository, a namespace within a registry, or a registry hostname. +// +// Deprecated: This API only has support for username and password. To get the +// support for oauth2 in container registry authentication, we added the new +// GetCredentialsForRef and GetCredentials API. The new API should be used and this API is kept to +// maintain backward compatibility. +func GetAuthentication(sys *types.SystemContext, key string) (string, string, error) { + return getAuthenticationWithHomeDir(sys, key, homedir.Get()) +} + +// getAuthenticationWithHomeDir is an internal implementation detail of GetAuthentication, +// it exists only to allow testing it with an artificial home directory. +func getAuthenticationWithHomeDir(sys *types.SystemContext, key, homeDir string) (string, string, error) { + auth, err := getCredentialsWithHomeDir(sys, key, homeDir) + if err != nil { + return "", "", err + } + if auth.IdentityToken != "" { + return "", "", errors.Wrap(ErrNotSupported, "non-empty identity token found and this API doesn't support it") + } + return auth.Username, auth.Password, nil +} + +// RemoveAuthentication removes credentials for `key` from all possible +// sources such as credential helpers and auth files. +// A valid key is a repository, a namespace within a registry, or a registry hostname; +// using forms other than just a registry may fail depending on configuration. +func RemoveAuthentication(sys *types.SystemContext, key string) error { + isNamespaced, err := validateKey(key) + if err != nil { + return err + } + + helpers, err := sysregistriesv2.CredentialHelpers(sys) + if err != nil { + return err + } + + var multiErr error + isLoggedIn := false + + removeFromCredHelper := func(helper string) { + if isNamespaced { + logrus.Debugf("Not removing credentials because namespaced keys are not supported for the credential helper: %s", helper) + return + } else { + err := deleteAuthFromCredHelper(helper, key) + if err == nil { + logrus.Debugf("Credentials for %q were deleted from credential helper %s", key, helper) + isLoggedIn = true + return + } + if credentials.IsErrCredentialsNotFoundMessage(err.Error()) { + logrus.Debugf("Not logged in to %s with credential helper %s", key, helper) + return + } + } + multiErr = multierror.Append(multiErr, errors.Wrapf(err, "removing credentials for %s from credential helper %s", key, helper)) + } + + for _, helper := range helpers { + var err error + switch helper { + // Special-case the built-in helper for auth files. + case sysregistriesv2.AuthenticationFileHelper: + _, err = modifyJSON(sys, func(auths *dockerConfigFile) (bool, error) { + if innerHelper, exists := auths.CredHelpers[key]; exists { + removeFromCredHelper(innerHelper) + } + if _, ok := auths.AuthConfigs[key]; ok { + isLoggedIn = true + delete(auths.AuthConfigs, key) + } + return true, multiErr + }) + if err != nil { + multiErr = multierror.Append(multiErr, err) + } + // External helpers. + default: + removeFromCredHelper(helper) + } + } + + if multiErr != nil { + return multiErr + } + if !isLoggedIn { + return ErrNotLoggedIn + } + + return nil +} + +// RemoveAllAuthentication deletes all the credentials stored in credential +// helpers and auth files. +func RemoveAllAuthentication(sys *types.SystemContext) error { + helpers, err := sysregistriesv2.CredentialHelpers(sys) + if err != nil { + return err + } + + var multiErr error + for _, helper := range helpers { + var err error + switch helper { + // Special-case the built-in helper for auth files. + case sysregistriesv2.AuthenticationFileHelper: + _, err = modifyJSON(sys, func(auths *dockerConfigFile) (bool, error) { + for registry, helper := range auths.CredHelpers { + // Helpers in auth files are expected + // to exist, so no special treatment + // for them. + if err := deleteAuthFromCredHelper(helper, registry); err != nil { + return false, err + } + } + auths.CredHelpers = make(map[string]string) + auths.AuthConfigs = make(map[string]dockerAuthConfig) + return true, nil + }) + // External helpers. + default: + var creds map[string]string + creds, err = listAuthsFromCredHelper(helper) + switch errors.Cause(err) { + case nil: + for registry := range creds { + err = deleteAuthFromCredHelper(helper, registry) + if err != nil { + break + } + } + case exec.ErrNotFound: + // It's okay if the helper doesn't exist. + continue + default: + // fall through + } + } + if err != nil { + logrus.Debugf("Error removing credentials from credential helper %s: %v", helper, err) + multiErr = multierror.Append(multiErr, err) + continue + } + logrus.Debugf("All credentials removed from credential helper %s", helper) + } + + return multiErr +} + +func listAuthsFromCredHelper(credHelper string) (map[string]string, error) { + helperName := fmt.Sprintf("docker-credential-%s", credHelper) + p := helperclient.NewShellProgramFunc(helperName) + return helperclient.List(p) +} + +// getPathToAuth gets the path of the auth.json file used for reading and writing credentials +// returns the path, and a bool specifies whether the file is in legacy format +func getPathToAuth(sys *types.SystemContext) (string, bool, error) { + return getPathToAuthWithOS(sys, runtime.GOOS) +} + +// getPathToAuthWithOS is an internal implementation detail of getPathToAuth, +// it exists only to allow testing it with an artificial runtime.GOOS. +func getPathToAuthWithOS(sys *types.SystemContext, goOS string) (string, bool, error) { + if sys != nil { + if sys.AuthFilePath != "" { + return sys.AuthFilePath, false, nil + } + if sys.LegacyFormatAuthFilePath != "" { + return sys.LegacyFormatAuthFilePath, true, nil + } + if sys.RootForImplicitAbsolutePaths != "" { + return filepath.Join(sys.RootForImplicitAbsolutePaths, fmt.Sprintf(defaultPerUIDPathFormat, os.Getuid())), false, nil + } + } + if goOS == "windows" || goOS == "darwin" { + return filepath.Join(homedir.Get(), nonLinuxAuthFilePath), false, nil + } + + runtimeDir := os.Getenv("XDG_RUNTIME_DIR") + if runtimeDir != "" { + // This function does not in general need to separately check that the returned path exists; that’s racy, and callers will fail accessing the file anyway. + // We are checking for os.IsNotExist here only to give the user better guidance what to do in this special case. + _, err := os.Stat(runtimeDir) + if os.IsNotExist(err) { + // This means the user set the XDG_RUNTIME_DIR variable and either forgot to create the directory + // or made a typo while setting the environment variable, + // so return an error referring to $XDG_RUNTIME_DIR instead of xdgRuntimeDirPath inside. + return "", false, errors.Wrapf(err, "%q directory set by $XDG_RUNTIME_DIR does not exist. Either create the directory or unset $XDG_RUNTIME_DIR.", runtimeDir) + } // else ignore err and let the caller fail accessing xdgRuntimeDirPath. + return filepath.Join(runtimeDir, xdgRuntimeDirPath), false, nil + } + return fmt.Sprintf(defaultPerUIDPathFormat, os.Getuid()), false, nil +} + +// readJSONFile unmarshals the authentications stored in the auth.json file and returns it +// or returns an empty dockerConfigFile data structure if auth.json does not exist +// if the file exists and is empty, readJSONFile returns an error +func readJSONFile(path string, legacyFormat bool) (dockerConfigFile, error) { + var auths dockerConfigFile + + raw, err := ioutil.ReadFile(path) + if err != nil { + if os.IsNotExist(err) { + auths.AuthConfigs = map[string]dockerAuthConfig{} + return auths, nil + } + return dockerConfigFile{}, err + } + + if legacyFormat { + if err = json.Unmarshal(raw, &auths.AuthConfigs); err != nil { + return dockerConfigFile{}, errors.Wrapf(err, "unmarshaling JSON at %q", path) + } + return auths, nil + } + + if err = json.Unmarshal(raw, &auths); err != nil { + return dockerConfigFile{}, errors.Wrapf(err, "unmarshaling JSON at %q", path) + } + + if auths.AuthConfigs == nil { + auths.AuthConfigs = map[string]dockerAuthConfig{} + } + if auths.CredHelpers == nil { + auths.CredHelpers = make(map[string]string) + } + + return auths, nil +} + +// modifyJSON finds an auth.json file, calls editor on the contents, and +// writes it back if editor returns true. +// Returns a human-redable description of the file, to be returned by SetCredentials. +func modifyJSON(sys *types.SystemContext, editor func(auths *dockerConfigFile) (bool, error)) (string, error) { + path, legacyFormat, err := getPathToAuth(sys) + if err != nil { + return "", err + } + if legacyFormat { + return "", fmt.Errorf("writes to %s using legacy format are not supported", path) + } + + dir := filepath.Dir(path) + if err = os.MkdirAll(dir, 0700); err != nil { + return "", err + } + + auths, err := readJSONFile(path, false) + if err != nil { + return "", errors.Wrapf(err, "reading JSON file %q", path) + } + + updated, err := editor(&auths) + if err != nil { + return "", errors.Wrapf(err, "updating %q", path) + } + if updated { + newData, err := json.MarshalIndent(auths, "", "\t") + if err != nil { + return "", errors.Wrapf(err, "marshaling JSON %q", path) + } + + if err = ioutil.WriteFile(path, newData, 0600); err != nil { + return "", errors.Wrapf(err, "writing to file %q", path) + } + } + + return path, nil +} + +func getAuthFromCredHelper(credHelper, registry string) (types.DockerAuthConfig, error) { + helperName := fmt.Sprintf("docker-credential-%s", credHelper) + p := helperclient.NewShellProgramFunc(helperName) + creds, err := helperclient.Get(p, registry) + if err != nil { + if credentials.IsErrCredentialsNotFoundMessage(err.Error()) { + logrus.Debugf("Not logged in to %s with credential helper %s", registry, credHelper) + err = nil + } + return types.DockerAuthConfig{}, err + } + + switch creds.Username { + case "": + return types.DockerAuthConfig{ + IdentityToken: creds.Secret, + }, nil + default: + return types.DockerAuthConfig{ + Username: creds.Username, + Password: creds.Secret, + }, nil + } +} + +func setAuthToCredHelper(credHelper, registry, username, password string) error { + helperName := fmt.Sprintf("docker-credential-%s", credHelper) + p := helperclient.NewShellProgramFunc(helperName) + creds := &credentials.Credentials{ + ServerURL: registry, + Username: username, + Secret: password, + } + return helperclient.Store(p, creds) +} + +func deleteAuthFromCredHelper(credHelper, registry string) error { + helperName := fmt.Sprintf("docker-credential-%s", credHelper) + p := helperclient.NewShellProgramFunc(helperName) + return helperclient.Erase(p, registry) +} + +// findCredentialsInFile looks for credentials matching "key" +// (which is "registry" or a namespace in "registry") in "path". +func findCredentialsInFile(key, registry, path string, legacyFormat bool) (types.DockerAuthConfig, error) { + auths, err := readJSONFile(path, legacyFormat) + if err != nil { + return types.DockerAuthConfig{}, errors.Wrapf(err, "reading JSON file %q", path) + } + + // First try cred helpers. They should always be normalized. + // This intentionally uses "registry", not "key"; we don't support namespaced + // credentials in helpers. + if ch, exists := auths.CredHelpers[registry]; exists { + logrus.Debugf("Looking up in credential helper %s based on credHelpers entry in %s", ch, path) + return getAuthFromCredHelper(ch, registry) + } + + // Support sub-registry namespaces in auth. + // (This is not a feature of ~/.docker/config.json; we support it even for + // those files as an extension.) + var keys []string + if !legacyFormat { + keys = authKeysForKey(key) + } else { + keys = []string{registry} + } + + // Repo or namespace keys are only supported as exact matches. For registry + // keys we prefer exact matches as well. + for _, key := range keys { + if val, exists := auths.AuthConfigs[key]; exists { + return decodeDockerAuth(val) + } + } + + // bad luck; let's normalize the entries first + // This primarily happens for legacyFormat, which for a time used API URLs + // (http[s:]//…/v1/) as keys. + // Secondarily, (docker login) accepted URLs with no normalization for + // several years, and matched registry hostnames against that, so support + // those entries even in non-legacyFormat ~/.docker/config.json. + // The docker.io registry still uses the /v1/ key with a special host name, + // so account for that as well. + registry = normalizeRegistry(registry) + for k, v := range auths.AuthConfigs { + if normalizeAuthFileKey(k, legacyFormat) == registry { + return decodeDockerAuth(v) + } + } + + // Only log this if we found nothing; getCredentialsWithHomeDir logs the + // source of found data. + logrus.Debugf("No credentials matching %s found in %s", key, path) + return types.DockerAuthConfig{}, nil +} + +// authKeysForKey returns the keys matching a provided auth file key, in order +// from the best match to worst. For example, +// when given a repository key "quay.io/repo/ns/image", it returns +// - quay.io/repo/ns/image +// - quay.io/repo/ns +// - quay.io/repo +// - quay.io +func authKeysForKey(key string) (res []string) { + for { + res = append(res, key) + + lastSlash := strings.LastIndex(key, "/") + if lastSlash == -1 { + break + } + key = key[:lastSlash] + } + + return res +} + +// decodeDockerAuth decodes the username and password, which is +// encoded in base64. +func decodeDockerAuth(conf dockerAuthConfig) (types.DockerAuthConfig, error) { + decoded, err := base64.StdEncoding.DecodeString(conf.Auth) + if err != nil { + return types.DockerAuthConfig{}, err + } + + parts := strings.SplitN(string(decoded), ":", 2) + if len(parts) != 2 { + // if it's invalid just skip, as docker does + return types.DockerAuthConfig{}, nil + } + + user := parts[0] + password := strings.Trim(parts[1], "\x00") + return types.DockerAuthConfig{ + Username: user, + Password: password, + IdentityToken: conf.IdentityToken, + }, nil +} + +// normalizeAuthFileKey takes a key, converts it to a host name and normalizes +// the resulting registry. +func normalizeAuthFileKey(key string, legacyFormat bool) string { + stripped := strings.TrimPrefix(key, "http://") + stripped = strings.TrimPrefix(stripped, "https://") + + if legacyFormat || stripped != key { + stripped = strings.SplitN(stripped, "/", 2)[0] + } + + return normalizeRegistry(stripped) +} + +// normalizeRegistry converts the provided registry if a known docker.io host +// is provided. +func normalizeRegistry(registry string) string { + switch registry { + case "registry-1.docker.io", "docker.io": + return "index.docker.io" + } + return registry +} + +// validateKey verifies that the input key does not have a prefix that is not +// allowed and returns an indicator if the key is namespaced. +func validateKey(key string) (bool, error) { + if strings.HasPrefix(key, "http://") || strings.HasPrefix(key, "https://") { + return false, errors.Errorf("key %s contains http[s]:// prefix", key) + } + + // Ideally this should only accept explicitly valid keys, compare + // validateIdentityRemappingPrefix. For now, just reject values that look + // like tagged or digested values. + if strings.ContainsRune(key, '@') { + return false, fmt.Errorf(`key %s contains a '@' character`, key) + } + + firstSlash := strings.IndexRune(key, '/') + isNamespaced := firstSlash != -1 + // Reject host/repo:tag, but allow localhost:5000 and localhost:5000/foo. + if isNamespaced && strings.ContainsRune(key[firstSlash+1:], ':') { + return false, fmt.Errorf(`key %s contains a ':' character after host[:port]`, key) + } + // check if the provided key contains one or more subpaths. + return isNamespaced, nil +} diff --git a/vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go b/vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go new file mode 100644 index 00000000000..46c10ff631a --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go @@ -0,0 +1,477 @@ +package shortnames + +import ( + "fmt" + "os" + "strings" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/pkg/sysregistriesv2" + "github.com/containers/image/v5/types" + "github.com/manifoldco/promptui" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "golang.org/x/term" +) + +// IsShortName returns true if the specified input is a "short name". A "short +// name" refers to a container image without a fully-qualified reference, and +// is hence missing a registry (or domain). Names including a digest are not +// short names. +// +// Examples: +// * short names: "image:tag", "library/fedora" +// * not short names: "quay.io/image", "localhost/image:tag", +// "server.org:5000/lib/image", "image@sha256:..." +func IsShortName(input string) bool { + isShort, _, _ := parseUnnormalizedShortName(input) + return isShort +} + +// parseUnnormalizedShortName parses the input and returns if it's short name, +// the unnormalized reference.Named, and a parsing error. +func parseUnnormalizedShortName(input string) (bool, reference.Named, error) { + ref, err := reference.Parse(input) + if err != nil { + return false, nil, errors.Wrapf(err, "cannot parse input: %q", input) + } + + named, ok := ref.(reference.Named) + if !ok { + return true, nil, errors.Errorf("%q is not a named reference", input) + } + + registry := reference.Domain(named) + if strings.ContainsAny(registry, ".:") || registry == "localhost" { + // A final parse to make sure that docker.io references are correctly + // normalized (e.g., docker.io/alpine to docker.io/library/alpine. + named, err = reference.ParseNormalizedNamed(input) + if err != nil { + return false, nil, errors.Wrapf(err, "cannot normalize input: %q", input) + } + return false, named, nil + } + + return true, named, nil +} + +// splitUserInput parses the user-specified reference. Namely, it strips off +// the tag or digest and stores it in the return values so that both can be +// re-added to a possible resolved alias' or USRs at a later point. +func splitUserInput(named reference.Named) (isTagged bool, isDigested bool, normalized reference.Named, tag string, digest digest.Digest) { + normalized = named + + tagged, isT := named.(reference.NamedTagged) + if isT { + isTagged = true + tag = tagged.Tag() + } + + digested, isD := named.(reference.Digested) + if isD { + isDigested = true + digest = digested.Digest() + } + + // Strip off tag/digest if present. + normalized = reference.TrimNamed(named) + + return +} + +// Add records the specified name-value pair as a new short-name alias to the +// user-specific aliases.conf. It may override an existing alias for `name`. +func Add(ctx *types.SystemContext, name string, value reference.Named) error { + isShort, _, err := parseUnnormalizedShortName(name) + if err != nil { + return err + } + if !isShort { + return errors.Errorf("%q is not a short name", name) + } + return sysregistriesv2.AddShortNameAlias(ctx, name, value.String()) +} + +// Remove clears the short-name alias for the specified name. It throws an +// error in case name does not exist in the machine-generated +// short-name-alias.conf. In such case, the alias must be specified in one of +// the registries.conf files, which is the users' responsibility. +func Remove(ctx *types.SystemContext, name string) error { + isShort, _, err := parseUnnormalizedShortName(name) + if err != nil { + return err + } + if !isShort { + return errors.Errorf("%q is not a short name", name) + } + return sysregistriesv2.RemoveShortNameAlias(ctx, name) +} + +// Resolved encapsulates all data for a resolved image name. +type Resolved struct { + PullCandidates []PullCandidate + + userInput reference.Named + systemContext *types.SystemContext + rationale rationale + originDescription string +} + +func (r *Resolved) addCandidate(named reference.Named) { + named = reference.TagNameOnly(named) // Make sure to add ":latest" if needed + r.PullCandidates = append(r.PullCandidates, PullCandidate{named, false, r}) +} + +func (r *Resolved) addCandidateToRecord(named reference.Named) { + r.PullCandidates = append(r.PullCandidates, PullCandidate{named, true, r}) +} + +// Allows to reason over pull errors and add some context information. +// Used in (*Resolved).WrapPullError. +type rationale int + +const ( + // No additional context. + rationaleNone rationale = iota + // Resolved value is a short-name alias. + rationaleAlias + // Resolved value has been completed with an Unqualified Search Registry. + rationaleUSR + // Resolved value has been selected by the user (via the prompt). + rationaleUserSelection + // Resolved value has been enforced to use Docker Hub (via SystemContext). + rationaleEnforcedDockerHub +) + +// Description returns a human-readable description about the resolution +// process (e.g., short-name alias, unqualified-search registries, etc.). +// It is meant to be printed before attempting to pull the pull candidates +// to make the short-name resolution more transparent to user. +// +// If the returned string is empty, it is not meant to be printed. +func (r *Resolved) Description() string { + switch r.rationale { + case rationaleAlias: + return fmt.Sprintf("Resolved %q as an alias (%s)", r.userInput, r.originDescription) + case rationaleUSR: + return fmt.Sprintf("Resolving %q using unqualified-search registries (%s)", r.userInput, r.originDescription) + case rationaleEnforcedDockerHub: + return fmt.Sprintf("Resolving %q to docker.io (%s)", r.userInput, r.originDescription) + case rationaleUserSelection, rationaleNone: + fallthrough + default: + return "" + } +} + +// FormatPullErrors is a convenience function to format errors that occurred +// while trying to pull all of the resolved pull candidates. +// +// Note that nil is returned if len(pullErrors) == 0. Otherwise, the amount of +// pull errors must equal the amount of pull candidates. +func (r *Resolved) FormatPullErrors(pullErrors []error) error { + if len(pullErrors) >= 0 && len(pullErrors) != len(r.PullCandidates) { + pullErrors = append(pullErrors, + errors.Errorf("internal error: expected %d instead of %d errors for %d pull candidates", + len(r.PullCandidates), len(pullErrors), len(r.PullCandidates))) + } + + switch len(pullErrors) { + case 0: + return nil + case 1: + return pullErrors[0] + default: + var sb strings.Builder + sb.WriteString(fmt.Sprintf("%d errors occurred while pulling:", len(pullErrors))) + for _, e := range pullErrors { + sb.WriteString("\n * ") + sb.WriteString(e.Error()) + } + return errors.New(sb.String()) + } +} + +// PullCandidate is a resolved name. Once the Value has been used +// successfully, users MUST call `(*PullCandidate).Record(..)` to possibly +// record it as a new short-name alias. +type PullCandidate struct { + // Fully-qualified reference with tag or digest. + Value reference.Named + // Control whether to record it permanently as an alias. + record bool + + // Backwards pointer to the Resolved "parent". + resolved *Resolved +} + +// Record may store a short-name alias for the PullCandidate. +func (c *PullCandidate) Record() error { + if !c.record { + return nil + } + + // Strip off tags/digests from name/value. + name := reference.TrimNamed(c.resolved.userInput) + value := reference.TrimNamed(c.Value) + + if err := Add(c.resolved.systemContext, name.String(), value); err != nil { + return errors.Wrapf(err, "recording short-name alias (%q=%q)", c.resolved.userInput, c.Value) + } + return nil +} + +// Resolve resolves the specified name to either one or more fully-qualified +// image references that the short name may be *pulled* from. If the specified +// name is already a fully-qualified reference (i.e., not a short name), it is +// returned as is. In case, it's a short name, it's resolved according to the +// ShortNameMode in the SystemContext (if specified) or in the registries.conf. +// +// Note that tags and digests are stripped from the specified name before +// looking up an alias. Stripped off tags and digests are later on appended to +// all candidates. If neither tag nor digest is specified, candidates are +// normalized with the "latest" tag. An error is returned if there is no +// matching alias and no unqualified-search registries are configured. +// +// Note that callers *must* call `(PullCandidate).Record` after a returned +// item has been pulled successfully; this callback will record a new +// short-name alias (depending on the specified short-name mode). +// +// Furthermore, before attempting to pull callers *should* call +// `(Resolved).Description` and afterwards use +// `(Resolved).FormatPullErrors` in case of pull errors. +func Resolve(ctx *types.SystemContext, name string) (*Resolved, error) { + resolved := &Resolved{} + + // Create a copy of the system context to make it usable beyond this + // function call. + if ctx != nil { + copy := *ctx + ctx = © + } + resolved.systemContext = ctx + + // Detect which mode we're running in. + mode, err := sysregistriesv2.GetShortNameMode(ctx) + if err != nil { + return nil, err + } + + // Sanity check the short-name mode. + switch mode { + case types.ShortNameModeDisabled, types.ShortNameModePermissive, types.ShortNameModeEnforcing: + // We're good. + default: + return nil, errors.Errorf("unsupported short-name mode (%v)", mode) + } + + isShort, shortRef, err := parseUnnormalizedShortName(name) + if err != nil { + return nil, err + } + if !isShort { // no short name + resolved.addCandidate(shortRef) + return resolved, nil + } + + // Resolve to docker.io only if enforced by the caller (e.g., Podman's + // Docker-compatible REST API). + if ctx != nil && ctx.PodmanOnlyShortNamesIgnoreRegistriesConfAndForceDockerHub { + named, err := reference.ParseNormalizedNamed(name) + if err != nil { + return nil, errors.Wrapf(err, "cannot normalize input: %q", name) + } + resolved.addCandidate(named) + resolved.rationale = rationaleEnforcedDockerHub + resolved.originDescription = "enforced by caller" + return resolved, nil + } + + // Strip off the tag to normalize the short name for looking it up in + // the config files. + isTagged, isDigested, shortNameRepo, tag, digest := splitUserInput(shortRef) + resolved.userInput = shortNameRepo + + // If there's already an alias, use it. + namedAlias, aliasOriginDescription, err := sysregistriesv2.ResolveShortNameAlias(ctx, shortNameRepo.String()) + if err != nil { + return nil, err + } + + // Always use an alias if present. + if namedAlias != nil { + if isTagged { + namedAlias, err = reference.WithTag(namedAlias, tag) + if err != nil { + return nil, err + } + } + if isDigested { + namedAlias, err = reference.WithDigest(namedAlias, digest) + if err != nil { + return nil, err + } + } + resolved.addCandidate(namedAlias) + resolved.rationale = rationaleAlias + resolved.originDescription = aliasOriginDescription + return resolved, nil + } + + resolved.rationale = rationaleUSR + + // Query the registry for unqualified-search registries. + unqualifiedSearchRegistries, usrConfig, err := sysregistriesv2.UnqualifiedSearchRegistriesWithOrigin(ctx) + if err != nil { + return nil, err + } + // Error out if there's no matching alias and no search registries. + if len(unqualifiedSearchRegistries) == 0 { + if usrConfig != "" { + return nil, errors.Errorf("short-name %q did not resolve to an alias and no unqualified-search registries are defined in %q", name, usrConfig) + } + return nil, errors.Errorf("short-name %q did not resolve to an alias and no containers-registries.conf(5) was found", name) + } + resolved.originDescription = usrConfig + + for _, reg := range unqualifiedSearchRegistries { + named, err := reference.ParseNormalizedNamed(fmt.Sprintf("%s/%s", reg, name)) + if err != nil { + return nil, errors.Wrapf(err, "creating reference with unqualified-search registry %q", reg) + } + resolved.addCandidate(named) + } + + // If we're running in disabled, return the candidates without + // prompting (and without recording). + if mode == types.ShortNameModeDisabled { + return resolved, nil + } + + // If we have only one candidate, there's no ambiguity. + if len(resolved.PullCandidates) == 1 { + return resolved, nil + } + + // If we don't have a TTY, act according to the mode. + if !term.IsTerminal(int(os.Stdout.Fd())) || !term.IsTerminal(int(os.Stdin.Fd())) { + switch mode { + case types.ShortNameModePermissive: + // Permissive falls back to using all candidates. + return resolved, nil + case types.ShortNameModeEnforcing: + // Enforcing errors out without a prompt. + return nil, errors.New("short-name resolution enforced but cannot prompt without a TTY") + default: + // We should not end up here. + return nil, errors.Errorf("unexpected short-name mode (%v) during resolution", mode) + } + } + + // We have a TTY, and can prompt the user with a selection of all + // possible candidates. + strCandidates := []string{} + for _, candidate := range resolved.PullCandidates { + strCandidates = append(strCandidates, candidate.Value.String()) + } + prompt := promptui.Select{ + Label: "Please select an image", + Items: strCandidates, + HideHelp: true, // do not show navigation help + } + + _, selection, err := prompt.Run() + if err != nil { + return nil, err + } + + named, err := reference.ParseNormalizedNamed(selection) + if err != nil { + return nil, errors.Wrapf(err, "selection %q is not a valid reference", selection) + } + + resolved.PullCandidates = nil + resolved.addCandidateToRecord(named) + resolved.rationale = rationaleUserSelection + + return resolved, nil +} + +// ResolveLocally resolves the specified name to either one or more local +// images. If the specified name is already a fully-qualified reference (i.e., +// not a short name), it is returned as is. In case, it's a short name, the +// returned slice of named references looks as follows: +// +// 1) If present, the short-name alias +// 2) "localhost/" as used by many container engines such as Podman and Buildah +// 3) Unqualified-search registries from the registries.conf files +// +// Note that tags and digests are stripped from the specified name before +// looking up an alias. Stripped off tags and digests are later on appended to +// all candidates. If neither tag nor digest is specified, candidates are +// normalized with the "latest" tag. The returned slice contains at least one +// item. +func ResolveLocally(ctx *types.SystemContext, name string) ([]reference.Named, error) { + isShort, shortRef, err := parseUnnormalizedShortName(name) + if err != nil { + return nil, err + } + if !isShort { // no short name + named := reference.TagNameOnly(shortRef) // Make sure to add ":latest" if needed + return []reference.Named{named}, nil + } + + var candidates []reference.Named + + // Complete the candidates with the specified registries. + completeCandidates := func(registries []string) ([]reference.Named, error) { + for _, reg := range registries { + named, err := reference.ParseNormalizedNamed(fmt.Sprintf("%s/%s", reg, name)) + if err != nil { + return nil, errors.Wrapf(err, "creating reference with unqualified-search registry %q", reg) + } + named = reference.TagNameOnly(named) // Make sure to add ":latest" if needed + candidates = append(candidates, named) + } + return candidates, nil + } + + if ctx != nil && ctx.PodmanOnlyShortNamesIgnoreRegistriesConfAndForceDockerHub { + return completeCandidates([]string{"docker.io"}) + } + + // Strip off the tag to normalize the short name for looking it up in + // the config files. + isTagged, isDigested, shortNameRepo, tag, digest := splitUserInput(shortRef) + + // If there's already an alias, use it. + namedAlias, _, err := sysregistriesv2.ResolveShortNameAlias(ctx, shortNameRepo.String()) + if err != nil { + return nil, err + } + if namedAlias != nil { + if isTagged { + namedAlias, err = reference.WithTag(namedAlias, tag) + if err != nil { + return nil, err + } + } + if isDigested { + namedAlias, err = reference.WithDigest(namedAlias, digest) + if err != nil { + return nil, err + } + } + namedAlias = reference.TagNameOnly(namedAlias) // Make sure to add ":latest" if needed + candidates = append(candidates, namedAlias) + } + + // Query the registry for unqualified-search registries. + unqualifiedSearchRegistries, err := sysregistriesv2.UnqualifiedSearchRegistries(ctx) + if err != nil { + return nil, err + } + + // Note that "localhost" has precedence over the unqualified-search registries. + return completeCandidates(append([]string{"localhost"}, unqualifiedSearchRegistries...)) +} diff --git a/vendor/github.com/containers/image/v5/pkg/strslice/README.md b/vendor/github.com/containers/image/v5/pkg/strslice/README.md new file mode 100644 index 00000000000..ae6097e82ed --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/strslice/README.md @@ -0,0 +1 @@ +This package was replicated from [github.com/docker/docker v17.04.0-ce](https://github.com/docker/docker/tree/v17.04.0-ce/api/types/strslice). diff --git a/vendor/github.com/containers/image/v5/pkg/strslice/strslice.go b/vendor/github.com/containers/image/v5/pkg/strslice/strslice.go new file mode 100644 index 00000000000..bad493fb89f --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/strslice/strslice.go @@ -0,0 +1,30 @@ +package strslice + +import "encoding/json" + +// StrSlice represents a string or an array of strings. +// We need to override the json decoder to accept both options. +type StrSlice []string + +// UnmarshalJSON decodes the byte slice whether it's a string or an array of +// strings. This method is needed to implement json.Unmarshaler. +func (e *StrSlice) UnmarshalJSON(b []byte) error { + if len(b) == 0 { + // With no input, we preserve the existing value by returning nil and + // leaving the target alone. This allows defining default values for + // the type. + return nil + } + + p := make([]string, 0, 1) + if err := json.Unmarshal(b, &p); err != nil { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + p = append(p, s) + } + + *e = p + return nil +} diff --git a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go new file mode 100644 index 00000000000..7122e869fe6 --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go @@ -0,0 +1,347 @@ +package sysregistriesv2 + +import ( + "os" + "path/filepath" + "reflect" + "strings" + + "github.com/BurntSushi/toml" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/rootless" + "github.com/containers/image/v5/types" + "github.com/containers/storage/pkg/homedir" + "github.com/containers/storage/pkg/lockfile" + "github.com/pkg/errors" +) + +// defaultShortNameMode is the default mode of registries.conf files if the +// corresponding field is left empty. +const defaultShortNameMode = types.ShortNameModePermissive + +// userShortNamesFile is the user-specific config file to store aliases. +var userShortNamesFile = filepath.FromSlash("containers/short-name-aliases.conf") + +// shortNameAliasesConfPath returns the path to the machine-generated +// short-name-aliases.conf file. +func shortNameAliasesConfPath(ctx *types.SystemContext) (string, error) { + if ctx != nil && len(ctx.UserShortNameAliasConfPath) > 0 { + return ctx.UserShortNameAliasConfPath, nil + } + + if rootless.GetRootlessEUID() == 0 { + // Root user or in a non-conforming user NS + return filepath.Join("/var/cache", userShortNamesFile), nil + } + + // Rootless user + cacheRoot, err := homedir.GetCacheHome() + if err != nil { + return "", err + } + + return filepath.Join(cacheRoot, userShortNamesFile), nil +} + +// shortNameAliasConf is a subset of the `V2RegistriesConf` format. It's used in the +// software-maintained `userShortNamesFile`. +type shortNameAliasConf struct { + // A map for aliasing short names to their fully-qualified image + // reference counter parts. + // Note that Aliases is niled after being loaded from a file. + Aliases map[string]string `toml:"aliases"` + + // If you add any field, make sure to update nonempty() below. +} + +// nonempty returns true if config contains at least one configuration entry. +func (c *shortNameAliasConf) nonempty() bool { + copy := *c // A shallow copy + if copy.Aliases != nil && len(copy.Aliases) == 0 { + copy.Aliases = nil + } + return !reflect.DeepEqual(copy, shortNameAliasConf{}) +} + +// alias combines the parsed value of an alias with the config file it has been +// specified in. The config file is crucial for an improved user experience +// such that users are able to resolve potential pull errors. +type alias struct { + // The parsed value of an alias. May be nil if set to "" in a config. + value reference.Named + // The config file the alias originates from. + configOrigin string +} + +// shortNameAliasCache is the result of parsing shortNameAliasConf, +// pre-processed for faster usage. +type shortNameAliasCache struct { + // Note that an alias value may be nil iff it's set as an empty string + // in the config. + namedAliases map[string]alias +} + +// ResolveShortNameAlias performs an alias resolution of the specified name. +// The user-specific short-name-aliases.conf has precedence over aliases in the +// assembled registries.conf. It returns the possibly resolved alias or nil, a +// human-readable description of the config where the alias is specified, and +// an error. The origin of the config file is crucial for an improved user +// experience such that users are able to resolve potential pull errors. +// Almost all callers should use pkg/shortnames instead. +// +// Note that it’s the caller’s responsibility to pass only a repository +// (reference.IsNameOnly) as the short name. +func ResolveShortNameAlias(ctx *types.SystemContext, name string) (reference.Named, string, error) { + if err := validateShortName(name); err != nil { + return nil, "", err + } + confPath, lock, err := shortNameAliasesConfPathAndLock(ctx) + if err != nil { + return nil, "", err + } + + // Acquire the lock as a reader to allow for multiple routines in the + // same process space to read simultaneously. + lock.RLock() + defer lock.Unlock() + + _, aliasCache, err := loadShortNameAliasConf(confPath) + if err != nil { + return nil, "", err + } + + // First look up the short-name-aliases.conf. Note that a value may be + // nil iff it's set as an empty string in the config. + alias, resolved := aliasCache.namedAliases[name] + if resolved { + return alias.value, alias.configOrigin, nil + } + + config, err := getConfig(ctx) + if err != nil { + return nil, "", err + } + alias, resolved = config.aliasCache.namedAliases[name] + if resolved { + return alias.value, alias.configOrigin, nil + } + return nil, "", nil +} + +// editShortNameAlias loads the aliases.conf file and changes it. If value is +// set, it adds the name-value pair as a new alias. Otherwise, it will remove +// name from the config. +func editShortNameAlias(ctx *types.SystemContext, name string, value *string) error { + if err := validateShortName(name); err != nil { + return err + } + if value != nil { + if _, err := parseShortNameValue(*value); err != nil { + return err + } + } + + confPath, lock, err := shortNameAliasesConfPathAndLock(ctx) + if err != nil { + return err + } + + // Acquire the lock as a writer to prevent data corruption. + lock.Lock() + defer lock.Unlock() + + // Load the short-name-alias.conf, add the specified name-value pair, + // and write it back to the file. + conf, _, err := loadShortNameAliasConf(confPath) + if err != nil { + return err + } + + if conf.Aliases == nil { // Ensure we have a map to update. + conf.Aliases = make(map[string]string) + } + if value != nil { + conf.Aliases[name] = *value + } else { + // If the name does not exist, throw an error. + if _, exists := conf.Aliases[name]; !exists { + return errors.Errorf("short-name alias %q not found in %q: please check registries.conf files", name, confPath) + } + + delete(conf.Aliases, name) + } + + f, err := os.OpenFile(confPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600) + if err != nil { + return err + } + defer f.Close() + + encoder := toml.NewEncoder(f) + return encoder.Encode(conf) +} + +// AddShortNameAlias adds the specified name-value pair as a new alias to the +// user-specific aliases.conf. It may override an existing alias for `name`. +// +// Note that it’s the caller’s responsibility to pass only a repository +// (reference.IsNameOnly) as the short name. +func AddShortNameAlias(ctx *types.SystemContext, name string, value string) error { + return editShortNameAlias(ctx, name, &value) +} + +// RemoveShortNameAlias clears the alias for the specified name. It throws an +// error in case name does not exist in the machine-generated +// short-name-alias.conf. In such case, the alias must be specified in one of +// the registries.conf files, which is the users' responsibility. +// +// Note that it’s the caller’s responsibility to pass only a repository +// (reference.IsNameOnly) as the short name. +func RemoveShortNameAlias(ctx *types.SystemContext, name string) error { + return editShortNameAlias(ctx, name, nil) +} + +// parseShortNameValue parses the specified alias into a reference.Named. The alias is +// expected to not be tagged or carry a digest and *must* include a +// domain/registry. +// +// Note that the returned reference is always normalized. +func parseShortNameValue(alias string) (reference.Named, error) { + ref, err := reference.Parse(alias) + if err != nil { + return nil, errors.Wrapf(err, "parsing alias %q", alias) + } + + if _, ok := ref.(reference.Digested); ok { + return nil, errors.Errorf("invalid alias %q: must not contain digest", alias) + } + + if _, ok := ref.(reference.Tagged); ok { + return nil, errors.Errorf("invalid alias %q: must not contain tag", alias) + } + + named, ok := ref.(reference.Named) + if !ok { + return nil, errors.Errorf("invalid alias %q: must contain registry and repository", alias) + } + + registry := reference.Domain(named) + if !(strings.ContainsAny(registry, ".:") || registry == "localhost") { + return nil, errors.Errorf("invalid alias %q: must contain registry and repository", alias) + } + + // A final parse to make sure that docker.io references are correctly + // normalized (e.g., docker.io/alpine to docker.io/library/alpine. + named, err = reference.ParseNormalizedNamed(alias) + return named, err +} + +// validateShortName parses the specified `name` of an alias (i.e., the left-hand +// side) and checks if it's a short name and does not include a tag or digest. +func validateShortName(name string) error { + repo, err := reference.Parse(name) + if err != nil { + return errors.Wrapf(err, "cannot parse short name: %q", name) + } + + if _, ok := repo.(reference.Digested); ok { + return errors.Errorf("invalid short name %q: must not contain digest", name) + } + + if _, ok := repo.(reference.Tagged); ok { + return errors.Errorf("invalid short name %q: must not contain tag", name) + } + + named, ok := repo.(reference.Named) + if !ok { + return errors.Errorf("invalid short name %q: no name", name) + } + + registry := reference.Domain(named) + if strings.ContainsAny(registry, ".:") || registry == "localhost" { + return errors.Errorf("invalid short name %q: must not contain registry", name) + } + return nil +} + +// newShortNameAliasCache parses shortNameAliasConf and returns the corresponding internal +// representation. +func newShortNameAliasCache(path string, conf *shortNameAliasConf) (*shortNameAliasCache, error) { + res := shortNameAliasCache{ + namedAliases: make(map[string]alias), + } + errs := []error{} + for name, value := range conf.Aliases { + if err := validateShortName(name); err != nil { + errs = append(errs, err) + } + + // Empty right-hand side values in config files allow to reset + // an alias in a previously loaded config. This way, drop-in + // config files from registries.conf.d can reset potentially + // malconfigured aliases. + if value == "" { + res.namedAliases[name] = alias{nil, path} + continue + } + + named, err := parseShortNameValue(value) + if err != nil { + // We want to report *all* malformed entries to avoid a + // whack-a-mole for the user. + errs = append(errs, err) + } else { + res.namedAliases[name] = alias{named, path} + } + } + if len(errs) > 0 { + err := errs[0] + for i := 1; i < len(errs); i++ { + err = errors.Wrapf(err, "%v\n", errs[i]) + } + return nil, err + } + return &res, nil +} + +// updateWithConfigurationFrom updates c with configuration from updates. +// In case of conflict, updates is preferred. +func (c *shortNameAliasCache) updateWithConfigurationFrom(updates *shortNameAliasCache) { + for name, value := range updates.namedAliases { + c.namedAliases[name] = value + } +} + +func loadShortNameAliasConf(confPath string) (*shortNameAliasConf, *shortNameAliasCache, error) { + conf := shortNameAliasConf{} + + _, err := toml.DecodeFile(confPath, &conf) + if err != nil && !os.IsNotExist(err) { + // It's okay if the config doesn't exist. Other errors are not. + return nil, nil, errors.Wrapf(err, "loading short-name aliases config file %q", confPath) + } + + // Even if we don’t always need the cache, doing so validates the machine-generated config. The + // file could still be corrupted by another process or user. + cache, err := newShortNameAliasCache(confPath, &conf) + if err != nil { + return nil, nil, errors.Wrapf(err, "loading short-name aliases config file %q", confPath) + } + + return &conf, cache, nil +} + +func shortNameAliasesConfPathAndLock(ctx *types.SystemContext) (string, lockfile.Locker, error) { + shortNameAliasesConfPath, err := shortNameAliasesConfPath(ctx) + if err != nil { + return "", nil, err + } + // Make sure the path to file exists. + if err := os.MkdirAll(filepath.Dir(shortNameAliasesConfPath), 0700); err != nil { + return "", nil, err + } + + lockPath := shortNameAliasesConfPath + ".lock" + locker, err := lockfile.GetLockfile(lockPath) + return shortNameAliasesConfPath, locker, err +} diff --git a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go new file mode 100644 index 00000000000..c8a603c4ef0 --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go @@ -0,0 +1,1000 @@ +package sysregistriesv2 + +import ( + "fmt" + "os" + "path/filepath" + "reflect" + "regexp" + "sort" + "strings" + "sync" + + "github.com/BurntSushi/toml" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/types" + "github.com/containers/storage/pkg/homedir" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// systemRegistriesConfPath is the path to the system-wide registry +// configuration file and is used to add/subtract potential registries for +// obtaining images. You can override this at build time with +// -ldflags '-X github.com/containers/image/v5/sysregistries.systemRegistriesConfPath=$your_path' +var systemRegistriesConfPath = builtinRegistriesConfPath + +// builtinRegistriesConfPath is the path to the registry configuration file. +// DO NOT change this, instead see systemRegistriesConfPath above. +const builtinRegistriesConfPath = "/etc/containers/registries.conf" + +// systemRegistriesConfDirPath is the path to the system-wide registry +// configuration directory and is used to add/subtract potential registries for +// obtaining images. You can override this at build time with +// -ldflags '-X github.com/containers/image/v5/sysregistries.systemRegistriesConfDirectoryPath=$your_path' +var systemRegistriesConfDirPath = builtinRegistriesConfDirPath + +// builtinRegistriesConfDirPath is the path to the registry configuration directory. +// DO NOT change this, instead see systemRegistriesConfDirectoryPath above. +const builtinRegistriesConfDirPath = "/etc/containers/registries.conf.d" + +// AuthenticationFileHelper is a special key for credential helpers indicating +// the usage of consulting containers-auth.json files instead of a credential +// helper. +const AuthenticationFileHelper = "containers-auth.json" + +// Endpoint describes a remote location of a registry. +type Endpoint struct { + // The endpoint's remote location. Can be empty iff Prefix contains + // wildcard in the format: "*.example.com" for subdomain matching. + // Please refer to FindRegistry / PullSourcesFromReference instead + // of accessing/interpreting `Location` directly. + Location string `toml:"location,omitempty"` + // If true, certs verification will be skipped and HTTP (non-TLS) + // connections will be allowed. + Insecure bool `toml:"insecure,omitempty"` +} + +// userRegistriesFile is the path to the per user registry configuration file. +var userRegistriesFile = filepath.FromSlash(".config/containers/registries.conf") + +// userRegistriesDir is the path to the per user registry configuration file. +var userRegistriesDir = filepath.FromSlash(".config/containers/registries.conf.d") + +// rewriteReference will substitute the provided reference `prefix` to the +// endpoints `location` from the `ref` and creates a new named reference from it. +// The function errors if the newly created reference is not parsable. +func (e *Endpoint) rewriteReference(ref reference.Named, prefix string) (reference.Named, error) { + refString := ref.String() + var newNamedRef string + // refMatchingPrefix returns the length of the match. Everything that + // follows the match gets appended to registries location. + prefixLen := refMatchingPrefix(refString, prefix) + if prefixLen == -1 { + return nil, fmt.Errorf("invalid prefix '%v' for reference '%v'", prefix, refString) + } + // In the case of an empty `location` field, simply return the original + // input ref as-is. + // + // FIXME: already validated in postProcessRegistries, so check can probably + // be dropped. + // https://github.com/containers/image/pull/1191#discussion_r610621608 + if e.Location == "" { + if !strings.HasPrefix(prefix, "*.") { + return nil, fmt.Errorf("invalid prefix '%v' for empty location, should be in the format: *.example.com", prefix) + } + return ref, nil + } + newNamedRef = e.Location + refString[prefixLen:] + newParsedRef, err := reference.ParseNamed(newNamedRef) + if err != nil { + return nil, errors.Wrapf(err, "rewriting reference") + } + + return newParsedRef, nil +} + +// Registry represents a registry. +type Registry struct { + // Prefix is used for matching images, and to translate one namespace to + // another. If `Prefix="example.com/bar"`, `location="example.com/foo/bar"` + // and we pull from "example.com/bar/myimage:latest", the image will + // effectively be pulled from "example.com/foo/bar/myimage:latest". + // If no Prefix is specified, it defaults to the specified location. + // Prefix can also be in the format: "*.example.com" for matching + // subdomains. The wildcard should only be in the beginning and should also + // not contain any namespaces or special characters: "/", "@" or ":". + // Please refer to FindRegistry / PullSourcesFromReference instead + // of accessing/interpreting `Prefix` directly. + Prefix string `toml:"prefix"` + // A registry is an Endpoint too + Endpoint + // The registry's mirrors. + Mirrors []Endpoint `toml:"mirror,omitempty"` + // If true, pulling from the registry will be blocked. + Blocked bool `toml:"blocked,omitempty"` + // If true, mirrors will only be used for digest pulls. Pulling images by + // tag can potentially yield different images, depending on which endpoint + // we pull from. Forcing digest-pulls for mirrors avoids that issue. + MirrorByDigestOnly bool `toml:"mirror-by-digest-only,omitempty"` +} + +// PullSource consists of an Endpoint and a Reference. Note that the reference is +// rewritten according to the registries prefix and the Endpoint's location. +type PullSource struct { + Endpoint Endpoint + Reference reference.Named +} + +// PullSourcesFromReference returns a slice of PullSource's based on the passed +// reference. +func (r *Registry) PullSourcesFromReference(ref reference.Named) ([]PullSource, error) { + var endpoints []Endpoint + + if r.MirrorByDigestOnly { + // Only use mirrors when the reference is a digest one. + if _, isDigested := ref.(reference.Canonical); isDigested { + endpoints = append(r.Mirrors, r.Endpoint) + } else { + endpoints = []Endpoint{r.Endpoint} + } + } else { + endpoints = append(r.Mirrors, r.Endpoint) + } + + sources := []PullSource{} + for _, ep := range endpoints { + rewritten, err := ep.rewriteReference(ref, r.Prefix) + if err != nil { + return nil, err + } + sources = append(sources, PullSource{Endpoint: ep, Reference: rewritten}) + } + + return sources, nil +} + +// V1TOMLregistries is for backwards compatibility to sysregistries v1 +type V1TOMLregistries struct { + Registries []string `toml:"registries"` +} + +// V1TOMLConfig is for backwards compatibility to sysregistries v1 +type V1TOMLConfig struct { + Search V1TOMLregistries `toml:"search"` + Insecure V1TOMLregistries `toml:"insecure"` + Block V1TOMLregistries `toml:"block"` +} + +// V1RegistriesConf is the sysregistries v1 configuration format. +type V1RegistriesConf struct { + V1TOMLConfig `toml:"registries"` +} + +// Nonempty returns true if config contains at least one configuration entry. +func (config *V1RegistriesConf) Nonempty() bool { + copy := *config // A shallow copy + if copy.V1TOMLConfig.Search.Registries != nil && len(copy.V1TOMLConfig.Search.Registries) == 0 { + copy.V1TOMLConfig.Search.Registries = nil + } + if copy.V1TOMLConfig.Insecure.Registries != nil && len(copy.V1TOMLConfig.Insecure.Registries) == 0 { + copy.V1TOMLConfig.Insecure.Registries = nil + } + if copy.V1TOMLConfig.Block.Registries != nil && len(copy.V1TOMLConfig.Block.Registries) == 0 { + copy.V1TOMLConfig.Block.Registries = nil + } + return !reflect.DeepEqual(copy, V1RegistriesConf{}) +} + +// V2RegistriesConf is the sysregistries v2 configuration format. +type V2RegistriesConf struct { + Registries []Registry `toml:"registry"` + // An array of host[:port] (not prefix!) entries to use for resolving unqualified image references + UnqualifiedSearchRegistries []string `toml:"unqualified-search-registries"` + // An array of global credential helpers to use for authentication + // (e.g., ["pass", "secretservice"]). The helpers are consulted in the + // specified order. Note that "containers-auth.json" is a reserved + // value for consulting auth files as specified in + // containers-auth.json(5). + // + // If empty, CredentialHelpers defaults to ["containers-auth.json"]. + CredentialHelpers []string `toml:"credential-helpers"` + + // ShortNameMode defines how short-name resolution should be handled by + // _consumers_ of this package. Depending on the mode, the user should + // be prompted with a choice of using one of the unqualified-search + // registries when referring to a short name. + // + // Valid modes are: * "prompt": prompt if stdout is a TTY, otherwise + // use all unqualified-search registries * "enforcing": always prompt + // and error if stdout is not a TTY * "disabled": do not prompt and + // potentially use all unqualified-search registries + ShortNameMode string `toml:"short-name-mode"` + + shortNameAliasConf + + // If you add any field, make sure to update Nonempty() below. +} + +// Nonempty returns true if config contains at least one configuration entry. +func (config *V2RegistriesConf) Nonempty() bool { + copy := *config // A shallow copy + if copy.Registries != nil && len(copy.Registries) == 0 { + copy.Registries = nil + } + if copy.UnqualifiedSearchRegistries != nil && len(copy.UnqualifiedSearchRegistries) == 0 { + copy.UnqualifiedSearchRegistries = nil + } + if copy.CredentialHelpers != nil && len(copy.CredentialHelpers) == 0 { + copy.CredentialHelpers = nil + } + if !copy.shortNameAliasConf.nonempty() { + copy.shortNameAliasConf = shortNameAliasConf{} + } + return !reflect.DeepEqual(copy, V2RegistriesConf{}) +} + +// parsedConfig is the result of parsing, and possibly merging, configuration files; +// it is the boundary between the process of reading+ingesting the files, and +// later interpreting the configuration based on caller’s requests. +type parsedConfig struct { + // NOTE: Update also parsedConfig.updateWithConfigurationFrom! + + // partialV2 must continue to exist to maintain the return value of TryUpdatingCache + // for compatibility with existing callers. + // We store the authoritative Registries and UnqualifiedSearchRegistries values there as well. + partialV2 V2RegistriesConf + // Absolute path to the configuration file that set the UnqualifiedSearchRegistries. + unqualifiedSearchRegistriesOrigin string + // Result of parsing of partialV2.ShortNameMode. + // NOTE: May be ShortNameModeInvalid to represent ShortNameMode == "" in intermediate values; + // the full configuration in configCache / getConfig() always contains a valid value. + shortNameMode types.ShortNameMode + aliasCache *shortNameAliasCache +} + +// InvalidRegistries represents an invalid registry configurations. An example +// is when "registry.com" is defined multiple times in the configuration but +// with conflicting security settings. +type InvalidRegistries struct { + s string +} + +// Error returns the error string. +func (e *InvalidRegistries) Error() string { + return e.s +} + +// parseLocation parses the input string, performs some sanity checks and returns +// the sanitized input string. An error is returned if the input string is +// empty or if contains an "http{s,}://" prefix. +func parseLocation(input string) (string, error) { + trimmed := strings.TrimRight(input, "/") + + // FIXME: This check needs to exist but fails for empty Location field with + // wildcarded prefix. Removal of this check "only" allows invalid input in, + // and does not prevent correct operation. + // https://github.com/containers/image/pull/1191#discussion_r610122617 + // + // if trimmed == "" { + // return "", &InvalidRegistries{s: "invalid location: cannot be empty"} + // } + // + + if strings.HasPrefix(trimmed, "http://") || strings.HasPrefix(trimmed, "https://") { + msg := fmt.Sprintf("invalid location '%s': URI schemes are not supported", input) + return "", &InvalidRegistries{s: msg} + } + + return trimmed, nil +} + +// ConvertToV2 returns a v2 config corresponding to a v1 one. +func (config *V1RegistriesConf) ConvertToV2() (*V2RegistriesConf, error) { + regMap := make(map[string]*Registry) + // The order of the registries is not really important, but make it deterministic (the same for the same config file) + // to minimize behavior inconsistency and not contribute to difficult-to-reproduce situations. + registryOrder := []string{} + + getRegistry := func(location string) (*Registry, error) { // Note: _pointer_ to a long-lived object + var err error + location, err = parseLocation(location) + if err != nil { + return nil, err + } + reg, exists := regMap[location] + if !exists { + reg = &Registry{ + Endpoint: Endpoint{Location: location}, + Mirrors: []Endpoint{}, + Prefix: location, + } + regMap[location] = reg + registryOrder = append(registryOrder, location) + } + return reg, nil + } + + for _, blocked := range config.V1TOMLConfig.Block.Registries { + reg, err := getRegistry(blocked) + if err != nil { + return nil, err + } + reg.Blocked = true + } + for _, insecure := range config.V1TOMLConfig.Insecure.Registries { + reg, err := getRegistry(insecure) + if err != nil { + return nil, err + } + reg.Insecure = true + } + + res := &V2RegistriesConf{ + UnqualifiedSearchRegistries: config.V1TOMLConfig.Search.Registries, + } + for _, location := range registryOrder { + reg := regMap[location] + res.Registries = append(res.Registries, *reg) + } + return res, nil +} + +// anchoredDomainRegexp is an internal implementation detail of postProcess, defining the valid values of elements of UnqualifiedSearchRegistries. +var anchoredDomainRegexp = regexp.MustCompile("^" + reference.DomainRegexp.String() + "$") + +// postProcess checks the consistency of all the configuration, looks for conflicts, +// and normalizes the configuration (e.g., sets the Prefix to Location if not set). +func (config *V2RegistriesConf) postProcessRegistries() error { + regMap := make(map[string][]*Registry) + + for i := range config.Registries { + reg := &config.Registries[i] + // make sure Location and Prefix are valid + var err error + reg.Location, err = parseLocation(reg.Location) + if err != nil { + return err + } + + if reg.Prefix == "" { + if reg.Location == "" { + return &InvalidRegistries{s: "invalid condition: both location and prefix are unset"} + } + reg.Prefix = reg.Location + } else { + reg.Prefix, err = parseLocation(reg.Prefix) + if err != nil { + return err + } + // FIXME: allow config authors to always use Prefix. + // https://github.com/containers/image/pull/1191#discussion_r610622495 + if !strings.HasPrefix(reg.Prefix, "*.") && reg.Location == "" { + return &InvalidRegistries{s: "invalid condition: location is unset and prefix is not in the format: *.example.com"} + } + } + + // make sure mirrors are valid + for _, mir := range reg.Mirrors { + mir.Location, err = parseLocation(mir.Location) + if err != nil { + return err + } + + //FIXME: unqualifiedSearchRegistries now also accepts empty values + //and shouldn't + // https://github.com/containers/image/pull/1191#discussion_r610623216 + if mir.Location == "" { + return &InvalidRegistries{s: "invalid condition: mirror location is unset"} + } + } + if reg.Location == "" { + regMap[reg.Prefix] = append(regMap[reg.Prefix], reg) + } else { + regMap[reg.Location] = append(regMap[reg.Location], reg) + } + } + + // Given a registry can be mentioned multiple times (e.g., to have + // multiple prefixes backed by different mirrors), we need to make sure + // there are no conflicts among them. + // + // Note: we need to iterate over the registries array to ensure a + // deterministic behavior which is not guaranteed by maps. + for _, reg := range config.Registries { + var others []*Registry + var ok bool + if reg.Location == "" { + others, ok = regMap[reg.Prefix] + } else { + others, ok = regMap[reg.Location] + } + if !ok { + return fmt.Errorf("Internal error in V2RegistriesConf.PostProcess: entry in regMap is missing") + } + for _, other := range others { + if reg.Insecure != other.Insecure { + msg := fmt.Sprintf("registry '%s' is defined multiple times with conflicting 'insecure' setting", reg.Location) + return &InvalidRegistries{s: msg} + } + + if reg.Blocked != other.Blocked { + msg := fmt.Sprintf("registry '%s' is defined multiple times with conflicting 'blocked' setting", reg.Location) + return &InvalidRegistries{s: msg} + } + } + } + + for i := range config.UnqualifiedSearchRegistries { + registry, err := parseLocation(config.UnqualifiedSearchRegistries[i]) + if err != nil { + return err + } + if !anchoredDomainRegexp.MatchString(registry) { + return &InvalidRegistries{fmt.Sprintf("Invalid unqualified-search-registries entry %#v", registry)} + } + config.UnqualifiedSearchRegistries[i] = registry + } + + // Registries are ordered and the first longest prefix always wins, + // rendering later items with the same prefix non-existent. We cannot error + // out anymore as this might break existing users, so let's just ignore them + // to guarantee that the same prefix exists only once. + // + // As a side effect of parsedConfig.updateWithConfigurationFrom, the Registries slice + // is always sorted. To be consistent in situations where it is not called (no drop-ins), + // sort it here as well. + prefixes := []string{} + uniqueRegistries := make(map[string]Registry) + for i := range config.Registries { + // TODO: should we warn if we see the same prefix being used multiple times? + prefix := config.Registries[i].Prefix + if _, exists := uniqueRegistries[prefix]; !exists { + uniqueRegistries[prefix] = config.Registries[i] + prefixes = append(prefixes, prefix) + } + } + sort.Strings(prefixes) + config.Registries = []Registry{} + for _, prefix := range prefixes { + config.Registries = append(config.Registries, uniqueRegistries[prefix]) + } + + return nil +} + +// ConfigPath returns the path to the system-wide registry configuration file. +// Deprecated: This API implies configuration is read from files, and that there is only one. +// Please use ConfigurationSourceDescription to obtain a string usable for error messages. +func ConfigPath(ctx *types.SystemContext) string { + return newConfigWrapper(ctx).configPath +} + +// ConfigDirPath returns the path to the directory for drop-in +// registry configuration files. +// Deprecated: This API implies configuration is read from directories, and that there is only one. +// Please use ConfigurationSourceDescription to obtain a string usable for error messages. +func ConfigDirPath(ctx *types.SystemContext) string { + configWrapper := newConfigWrapper(ctx) + if configWrapper.userConfigDirPath != "" { + return configWrapper.userConfigDirPath + } + return configWrapper.configDirPath +} + +// configWrapper is used to store the paths from ConfigPath and ConfigDirPath +// and acts as a key to the internal cache. +type configWrapper struct { + // path to the registries.conf file + configPath string + // path to system-wide registries.conf.d directory, or "" if not used + configDirPath string + // path to user specified registries.conf.d directory, or "" if not used + userConfigDirPath string +} + +// newConfigWrapper returns a configWrapper for the specified SystemContext. +func newConfigWrapper(ctx *types.SystemContext) configWrapper { + return newConfigWrapperWithHomeDir(ctx, homedir.Get()) +} + +// newConfigWrapperWithHomeDir is an internal implementation detail of newConfigWrapper, +// it exists only to allow testing it with an artificial home directory. +func newConfigWrapperWithHomeDir(ctx *types.SystemContext, homeDir string) configWrapper { + var wrapper configWrapper + userRegistriesFilePath := filepath.Join(homeDir, userRegistriesFile) + userRegistriesDirPath := filepath.Join(homeDir, userRegistriesDir) + + // decide configPath using per-user path or system file + if ctx != nil && ctx.SystemRegistriesConfPath != "" { + wrapper.configPath = ctx.SystemRegistriesConfPath + } else if _, err := os.Stat(userRegistriesFilePath); err == nil { + // per-user registries.conf exists, not reading system dir + // return config dirs from ctx or per-user one + wrapper.configPath = userRegistriesFilePath + if ctx != nil && ctx.SystemRegistriesConfDirPath != "" { + wrapper.configDirPath = ctx.SystemRegistriesConfDirPath + } else { + wrapper.userConfigDirPath = userRegistriesDirPath + } + + return wrapper + } else if ctx != nil && ctx.RootForImplicitAbsolutePaths != "" { + wrapper.configPath = filepath.Join(ctx.RootForImplicitAbsolutePaths, systemRegistriesConfPath) + } else { + wrapper.configPath = systemRegistriesConfPath + } + + // potentially use both system and per-user dirs if not using per-user config file + if ctx != nil && ctx.SystemRegistriesConfDirPath != "" { + // dir explicitly chosen: use only that one + wrapper.configDirPath = ctx.SystemRegistriesConfDirPath + } else if ctx != nil && ctx.RootForImplicitAbsolutePaths != "" { + wrapper.configDirPath = filepath.Join(ctx.RootForImplicitAbsolutePaths, systemRegistriesConfDirPath) + wrapper.userConfigDirPath = userRegistriesDirPath + } else { + wrapper.configDirPath = systemRegistriesConfDirPath + wrapper.userConfigDirPath = userRegistriesDirPath + } + + return wrapper +} + +// ConfigurationSourceDescription returns a string containers paths of registries.conf and registries.conf.d +func ConfigurationSourceDescription(ctx *types.SystemContext) string { + wrapper := newConfigWrapper(ctx) + configSources := []string{wrapper.configPath} + if wrapper.configDirPath != "" { + configSources = append(configSources, wrapper.configDirPath) + } + if wrapper.userConfigDirPath != "" { + configSources = append(configSources, wrapper.userConfigDirPath) + } + return strings.Join(configSources, ", ") +} + +// configMutex is used to synchronize concurrent accesses to configCache. +var configMutex = sync.Mutex{} + +// configCache caches already loaded configs with config paths as keys and is +// used to avoid redundantly parsing configs. Concurrent accesses to the cache +// are synchronized via configMutex. +var configCache = make(map[configWrapper]*parsedConfig) + +// InvalidateCache invalidates the registry cache. This function is meant to be +// used for long-running processes that need to reload potential changes made to +// the cached registry config files. +func InvalidateCache() { + configMutex.Lock() + defer configMutex.Unlock() + configCache = make(map[configWrapper]*parsedConfig) +} + +// getConfig returns the config object corresponding to ctx, loading it if it is not yet cached. +func getConfig(ctx *types.SystemContext) (*parsedConfig, error) { + wrapper := newConfigWrapper(ctx) + configMutex.Lock() + if config, inCache := configCache[wrapper]; inCache { + configMutex.Unlock() + return config, nil + } + configMutex.Unlock() + + return tryUpdatingCache(ctx, wrapper) +} + +// dropInConfigs returns a slice of drop-in-configs from the registries.conf.d +// directory. +func dropInConfigs(wrapper configWrapper) ([]string, error) { + var ( + configs []string + dirPaths []string + ) + if wrapper.configDirPath != "" { + dirPaths = append(dirPaths, wrapper.configDirPath) + } + if wrapper.userConfigDirPath != "" { + dirPaths = append(dirPaths, wrapper.userConfigDirPath) + } + for _, dirPath := range dirPaths { + err := filepath.Walk(dirPath, + // WalkFunc to read additional configs + func(path string, info os.FileInfo, err error) error { + switch { + case err != nil: + // return error (could be a permission problem) + return err + case info == nil: + // this should only happen when err != nil but let's be sure + return nil + case info.IsDir(): + if path != dirPath { + // make sure to not recurse into sub-directories + return filepath.SkipDir + } + // ignore directories + return nil + default: + // only add *.conf files + if strings.HasSuffix(path, ".conf") { + configs = append(configs, path) + } + return nil + } + }, + ) + + if err != nil && !os.IsNotExist(err) { + // Ignore IsNotExist errors: most systems won't have a registries.conf.d + // directory. + return nil, errors.Wrapf(err, "reading registries.conf.d") + } + } + + return configs, nil +} + +// TryUpdatingCache loads the configuration from the provided `SystemContext` +// without using the internal cache. On success, the loaded configuration will +// be added into the internal registry cache. +// It returns the resulting configuration; this is DEPRECATED and may not correctly +// reflect any future data handled by this package. +func TryUpdatingCache(ctx *types.SystemContext) (*V2RegistriesConf, error) { + config, err := tryUpdatingCache(ctx, newConfigWrapper(ctx)) + if err != nil { + return nil, err + } + return &config.partialV2, err +} + +// tryUpdatingCache implements TryUpdatingCache with an additional configWrapper +// argument to avoid redundantly calculating the config paths. +func tryUpdatingCache(ctx *types.SystemContext, wrapper configWrapper) (*parsedConfig, error) { + configMutex.Lock() + defer configMutex.Unlock() + + // load the config + config, err := loadConfigFile(wrapper.configPath, false) + if err != nil { + // Continue with an empty []Registry if we use the default config, which + // implies that the config path of the SystemContext isn't set. + // + // Note: if ctx.SystemRegistriesConfPath points to the default config, + // we will still return an error. + if os.IsNotExist(err) && (ctx == nil || ctx.SystemRegistriesConfPath == "") { + config = &parsedConfig{} + config.partialV2 = V2RegistriesConf{Registries: []Registry{}} + config.aliasCache, err = newShortNameAliasCache("", &shortNameAliasConf{}) + if err != nil { + return nil, err // Should never happen + } + } else { + return nil, errors.Wrapf(err, "loading registries configuration %q", wrapper.configPath) + } + } + + // Load the configs from the conf directory path. + dinConfigs, err := dropInConfigs(wrapper) + if err != nil { + return nil, err + } + for _, path := range dinConfigs { + // Enforce v2 format for drop-in-configs. + dropIn, err := loadConfigFile(path, true) + if err != nil { + return nil, errors.Wrapf(err, "loading drop-in registries configuration %q", path) + } + config.updateWithConfigurationFrom(dropIn) + } + + if config.shortNameMode == types.ShortNameModeInvalid { + config.shortNameMode = defaultShortNameMode + } + + if len(config.partialV2.CredentialHelpers) == 0 { + config.partialV2.CredentialHelpers = []string{AuthenticationFileHelper} + } + + // populate the cache + configCache[wrapper] = config + return config, nil +} + +// GetRegistries has been deprecated. Use FindRegistry instead. +// +// GetRegistries loads and returns the registries specified in the config. +// Note the parsed content of registry config files is cached. For reloading, +// use `InvalidateCache` and re-call `GetRegistries`. +func GetRegistries(ctx *types.SystemContext) ([]Registry, error) { + config, err := getConfig(ctx) + if err != nil { + return nil, err + } + return config.partialV2.Registries, nil +} + +// UnqualifiedSearchRegistries returns a list of host[:port] entries to try +// for unqualified image search, in the returned order) +func UnqualifiedSearchRegistries(ctx *types.SystemContext) ([]string, error) { + registries, _, err := UnqualifiedSearchRegistriesWithOrigin(ctx) + return registries, err +} + +// UnqualifiedSearchRegistriesWithOrigin returns a list of host[:port] entries +// to try for unqualified image search, in the returned order. It also returns +// a human-readable description of where these entries are specified (e.g., a +// registries.conf file). +func UnqualifiedSearchRegistriesWithOrigin(ctx *types.SystemContext) ([]string, string, error) { + config, err := getConfig(ctx) + if err != nil { + return nil, "", err + } + return config.partialV2.UnqualifiedSearchRegistries, config.unqualifiedSearchRegistriesOrigin, nil +} + +// parseShortNameMode translates the string into well-typed +// types.ShortNameMode. +func parseShortNameMode(mode string) (types.ShortNameMode, error) { + switch mode { + case "disabled": + return types.ShortNameModeDisabled, nil + case "enforcing": + return types.ShortNameModeEnforcing, nil + case "permissive": + return types.ShortNameModePermissive, nil + default: + return types.ShortNameModeInvalid, errors.Errorf("invalid short-name mode: %q", mode) + } +} + +// GetShortNameMode returns the configured types.ShortNameMode. +func GetShortNameMode(ctx *types.SystemContext) (types.ShortNameMode, error) { + if ctx != nil && ctx.ShortNameMode != nil { + return *ctx.ShortNameMode, nil + } + config, err := getConfig(ctx) + if err != nil { + return -1, err + } + return config.shortNameMode, err +} + +// CredentialHelpers returns the global top-level credential helpers. +func CredentialHelpers(sys *types.SystemContext) ([]string, error) { + config, err := getConfig(sys) + if err != nil { + return nil, err + } + return config.partialV2.CredentialHelpers, nil +} + +// refMatchingSubdomainPrefix returns the length of ref +// iff ref, which is a registry, repository namespace, repository or image reference (as formatted by +// reference.Domain(), reference.Named.Name() or reference.Reference.String() +// — note that this requires the name to start with an explicit hostname!), +// matches a Registry.Prefix value containing wildcarded subdomains in the +// format: *.example.com. Wildcards are only accepted at the beginning, so +// other formats like example.*.com will not work. Wildcarded prefixes also +// cannot contain port numbers or namespaces in them. +func refMatchingSubdomainPrefix(ref, prefix string) int { + index := strings.Index(ref, prefix[1:]) + if index == -1 { + return -1 + } + if strings.Contains(ref[:index], "/") { + return -1 + } + index += len(prefix[1:]) + if index == len(ref) { + return index + } + switch ref[index] { + case ':', '/', '@': + return index + default: + return -1 + } +} + +// refMatchingPrefix returns the length of the prefix iff ref, +// which is a registry, repository namespace, repository or image reference (as formatted by +// reference.Domain(), reference.Named.Name() or reference.Reference.String() +// — note that this requires the name to start with an explicit hostname!), +// matches a Registry.Prefix value. +// (This is split from the caller primarily to make testing easier.) +func refMatchingPrefix(ref, prefix string) int { + switch { + case strings.HasPrefix(prefix, "*."): + return refMatchingSubdomainPrefix(ref, prefix) + case len(ref) < len(prefix): + return -1 + case len(ref) == len(prefix): + if ref == prefix { + return len(prefix) + } + return -1 + case len(ref) > len(prefix): + if !strings.HasPrefix(ref, prefix) { + return -1 + } + c := ref[len(prefix)] + // This allows "example.com:5000" to match "example.com", + // which is unintended; that will get fixed eventually, DON'T RELY + // ON THE CURRENT BEHAVIOR. + if c == ':' || c == '/' || c == '@' { + return len(prefix) + } + return -1 + default: + panic("Internal error: impossible comparison outcome") + } +} + +// FindRegistry returns the Registry with the longest prefix for ref, +// which is a registry, repository namespace repository or image reference (as formatted by +// reference.Domain(), reference.Named.Name() or reference.Reference.String() +// — note that this requires the name to start with an explicit hostname!). +// If no Registry prefixes the image, nil is returned. +func FindRegistry(ctx *types.SystemContext, ref string) (*Registry, error) { + config, err := getConfig(ctx) + if err != nil { + return nil, err + } + + return findRegistryWithParsedConfig(config, ref) +} + +// findRegistryWithParsedConfig implements `FindRegistry` with a pre-loaded +// parseConfig. +func findRegistryWithParsedConfig(config *parsedConfig, ref string) (*Registry, error) { + reg := Registry{} + prefixLen := 0 + for _, r := range config.partialV2.Registries { + if refMatchingPrefix(ref, r.Prefix) != -1 { + length := len(r.Prefix) + if length > prefixLen { + reg = r + prefixLen = length + } + } + } + if prefixLen != 0 { + return ®, nil + } + return nil, nil +} + +// loadConfigFile loads and unmarshals a single config file. +// Use forceV2 if the config must in the v2 format. +func loadConfigFile(path string, forceV2 bool) (*parsedConfig, error) { + logrus.Debugf("Loading registries configuration %q", path) + + // tomlConfig allows us to unmarshal either V1 or V2 simultaneously. + type tomlConfig struct { + V2RegistriesConf + V1RegistriesConf // for backwards compatibility with sysregistries v1 + } + + // Load the tomlConfig. Note that `DecodeFile` will overwrite set fields. + var combinedTOML tomlConfig + _, err := toml.DecodeFile(path, &combinedTOML) + if err != nil { + return nil, err + } + + if combinedTOML.V1RegistriesConf.Nonempty() { + // Enforce the v2 format if requested. + if forceV2 { + return nil, &InvalidRegistries{s: "registry must be in v2 format but is in v1"} + } + + // Convert a v1 config into a v2 config. + if combinedTOML.V2RegistriesConf.Nonempty() { + return nil, &InvalidRegistries{s: "mixing sysregistry v1/v2 is not supported"} + } + converted, err := combinedTOML.V1RegistriesConf.ConvertToV2() + if err != nil { + return nil, err + } + combinedTOML.V1RegistriesConf = V1RegistriesConf{} + combinedTOML.V2RegistriesConf = *converted + } + + res := parsedConfig{partialV2: combinedTOML.V2RegistriesConf} + + // Post process registries, set the correct prefixes, sanity checks, etc. + if err := res.partialV2.postProcessRegistries(); err != nil { + return nil, err + } + + res.unqualifiedSearchRegistriesOrigin = path + + if len(res.partialV2.ShortNameMode) > 0 { + mode, err := parseShortNameMode(res.partialV2.ShortNameMode) + if err != nil { + return nil, err + } + res.shortNameMode = mode + } else { + res.shortNameMode = types.ShortNameModeInvalid + } + + // Valid wildcarded prefixes must be in the format: *.example.com + // FIXME: Move to postProcessRegistries + // https://github.com/containers/image/pull/1191#discussion_r610623829 + for i := range res.partialV2.Registries { + prefix := res.partialV2.Registries[i].Prefix + if strings.HasPrefix(prefix, "*.") && strings.ContainsAny(prefix, "/@:") { + msg := fmt.Sprintf("Wildcarded prefix should be in the format: *.example.com. Current prefix %q is incorrectly formatted", prefix) + return nil, &InvalidRegistries{s: msg} + } + } + + // Parse and validate short-name aliases. + cache, err := newShortNameAliasCache(path, &res.partialV2.shortNameAliasConf) + if err != nil { + return nil, errors.Wrap(err, "validating short-name aliases") + } + res.aliasCache = cache + // Clear conf.partialV2.shortNameAliasConf to make it available for garbage collection and + // reduce memory consumption. We're consulting aliasCache for lookups. + res.partialV2.shortNameAliasConf = shortNameAliasConf{} + + return &res, nil +} + +// updateWithConfigurationFrom updates c with configuration from updates. +// +// Fields present in updates will typically replace already set fields in c. +// The [[registry]] and alias tables are merged. +func (c *parsedConfig) updateWithConfigurationFrom(updates *parsedConfig) { + // == Merge Registries: + registryMap := make(map[string]Registry) + for i := range c.partialV2.Registries { + registryMap[c.partialV2.Registries[i].Prefix] = c.partialV2.Registries[i] + } + // Merge the freshly loaded registries. + for i := range updates.partialV2.Registries { + registryMap[updates.partialV2.Registries[i].Prefix] = updates.partialV2.Registries[i] + } + + // Go maps have a non-deterministic order when iterating the keys, so + // we dump them in a slice and sort it to enforce some order in + // Registries slice. Some consumers of c/image (e.g., CRI-O) log the + // the configuration where a non-deterministic order could easily cause + // confusion. + prefixes := []string{} + for prefix := range registryMap { + prefixes = append(prefixes, prefix) + } + sort.Strings(prefixes) + + c.partialV2.Registries = []Registry{} + for _, prefix := range prefixes { + c.partialV2.Registries = append(c.partialV2.Registries, registryMap[prefix]) + } + + // == Merge UnqualifiedSearchRegistries: + // This depends on an subtlety of the behavior of the TOML decoder, where a missing array field + // is not modified while unmarshaling (in our case remains to nil), while an [] is unmarshaled + // as a non-nil []string{}. + if updates.partialV2.UnqualifiedSearchRegistries != nil { + c.partialV2.UnqualifiedSearchRegistries = updates.partialV2.UnqualifiedSearchRegistries + c.unqualifiedSearchRegistriesOrigin = updates.unqualifiedSearchRegistriesOrigin + } + + // == Merge credential helpers: + if updates.partialV2.CredentialHelpers != nil { + c.partialV2.CredentialHelpers = updates.partialV2.CredentialHelpers + } + + // == Merge shortNameMode: + // We don’t maintain c.partialV2.ShortNameMode. + if updates.shortNameMode != types.ShortNameModeInvalid { + c.shortNameMode = updates.shortNameMode + } + + // == Merge aliasCache: + // We don’t maintain (in fact we actively clear) c.partialV2.shortNameAliasConf. + c.aliasCache.updateWithConfigurationFrom(updates.aliasCache) +} diff --git a/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go b/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go new file mode 100644 index 00000000000..7e2142b1f58 --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go @@ -0,0 +1,111 @@ +package tlsclientconfig + +import ( + "crypto/tls" + "io/ioutil" + "net" + "net/http" + "os" + "path/filepath" + "strings" + "time" + + "github.com/docker/go-connections/sockets" + "github.com/docker/go-connections/tlsconfig" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// SetupCertificates opens all .crt, .cert, and .key files in dir and appends / loads certs and key pairs as appropriate to tlsc +func SetupCertificates(dir string, tlsc *tls.Config) error { + logrus.Debugf("Looking for TLS certificates and private keys in %s", dir) + fs, err := ioutil.ReadDir(dir) + if err != nil { + if os.IsNotExist(err) { + return nil + } + if os.IsPermission(err) { + logrus.Debugf("Skipping scan of %s due to permission error: %v", dir, err) + return nil + } + return err + } + + for _, f := range fs { + fullPath := filepath.Join(dir, f.Name()) + if strings.HasSuffix(f.Name(), ".crt") { + logrus.Debugf(" crt: %s", fullPath) + data, err := ioutil.ReadFile(fullPath) + if err != nil { + if os.IsNotExist(err) { + // Dangling symbolic link? + // Race with someone who deleted the + // file after we read the directory's + // list of contents? + logrus.Warnf("error reading certificate %q: %v", fullPath, err) + continue + } + return err + } + if tlsc.RootCAs == nil { + systemPool, err := tlsconfig.SystemCertPool() + if err != nil { + return errors.Wrap(err, "unable to get system cert pool") + } + tlsc.RootCAs = systemPool + } + tlsc.RootCAs.AppendCertsFromPEM(data) + } + if strings.HasSuffix(f.Name(), ".cert") { + certName := f.Name() + keyName := certName[:len(certName)-5] + ".key" + logrus.Debugf(" cert: %s", fullPath) + if !hasFile(fs, keyName) { + return errors.Errorf("missing key %s for client certificate %s. Note that CA certificates should use the extension .crt", keyName, certName) + } + cert, err := tls.LoadX509KeyPair(filepath.Join(dir, certName), filepath.Join(dir, keyName)) + if err != nil { + return err + } + tlsc.Certificates = append(tlsc.Certificates, cert) + } + if strings.HasSuffix(f.Name(), ".key") { + keyName := f.Name() + certName := keyName[:len(keyName)-4] + ".cert" + logrus.Debugf(" key: %s", fullPath) + if !hasFile(fs, certName) { + return errors.Errorf("missing client certificate %s for key %s", certName, keyName) + } + } + } + return nil +} + +func hasFile(files []os.FileInfo, name string) bool { + for _, f := range files { + if f.Name() == name { + return true + } + } + return false +} + +// NewTransport Creates a default transport +func NewTransport() *http.Transport { + direct := &net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + } + tr := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: direct.DialContext, + TLSHandshakeTimeout: 10 * time.Second, + // TODO(dmcgowan): Call close idle connections when complete and use keep alive + DisableKeepAlives: true, + } + if _, err := sockets.DialerFromEnvironment(direct); err != nil { + logrus.Debugf("Can't execute DialerFromEnvironment: %v", err) + } + return tr +} diff --git a/vendor/github.com/containers/image/v5/sif/load.go b/vendor/github.com/containers/image/v5/sif/load.go new file mode 100644 index 00000000000..ba6d875bae6 --- /dev/null +++ b/vendor/github.com/containers/image/v5/sif/load.go @@ -0,0 +1,211 @@ +package sif + +import ( + "bufio" + "context" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "strings" + + "github.com/sirupsen/logrus" + "github.com/sylabs/sif/v2/pkg/sif" +) + +// injectedScriptTargetPath is the path injectedScript should be written to in the created image. +const injectedScriptTargetPath = "/podman/runscript" + +// parseDefFile parses a SIF definition file from reader, +// and returns non-trivial contents of the %environment and %runscript sections. +func parseDefFile(reader io.Reader) ([]string, []string, error) { + type parserState int + const ( + parsingOther parserState = iota + parsingEnvironment + parsingRunscript + ) + + environment := []string{} + runscript := []string{} + + state := parsingOther + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + s := strings.TrimSpace(scanner.Text()) + switch { + case s == `%environment`: + state = parsingEnvironment + case s == `%runscript`: + state = parsingRunscript + case strings.HasPrefix(s, "%"): + state = parsingOther + case state == parsingEnvironment: + if s != "" && !strings.HasPrefix(s, "#") { + environment = append(environment, s) + } + case state == parsingRunscript: + runscript = append(runscript, s) + default: // parsingOther: ignore the line + } + } + if err := scanner.Err(); err != nil { + return nil, nil, fmt.Errorf("reading lines from SIF definition file object: %w", err) + } + return environment, runscript, nil +} + +// generateInjectedScript generates a shell script based on +// SIF definition file %environment and %runscript data, and returns it. +func generateInjectedScript(environment []string, runscript []string) []byte { + script := fmt.Sprintf("#!/bin/bash\n"+ + "%s\n"+ + "%s\n", strings.Join(environment, "\n"), strings.Join(runscript, "\n")) + return []byte(script) +} + +// processDefFile finds sif.DataDeffile in sifImage, if any, +// and returns: +// - the command to run +// - contents of a script to inject as injectedScriptTargetPath, or nil +func processDefFile(sifImage *sif.FileImage) (string, []byte, error) { + var environment, runscript []string + + desc, err := sifImage.GetDescriptor(sif.WithDataType(sif.DataDeffile)) + if err == nil { + environment, runscript, err = parseDefFile(desc.GetReader()) + if err != nil { + return "", nil, err + } + } + + var command string + var injectedScript []byte + if len(environment) == 0 && len(runscript) == 0 { + command = "bash" + injectedScript = nil + } else { + injectedScript = generateInjectedScript(environment, runscript) + command = injectedScriptTargetPath + } + + return command, injectedScript, nil +} + +func writeInjectedScript(extractedRootPath string, injectedScript []byte) error { + if injectedScript == nil { + return nil + } + filePath := filepath.Join(extractedRootPath, injectedScriptTargetPath) + parentDirPath := filepath.Dir(filePath) + if err := os.MkdirAll(parentDirPath, 0755); err != nil { + return fmt.Errorf("creating %s: %w", parentDirPath, err) + } + if err := ioutil.WriteFile(filePath, injectedScript, 0755); err != nil { + return fmt.Errorf("writing %s to %s: %w", injectedScriptTargetPath, filePath, err) + } + return nil +} + +// createTarFromSIFInputs creates a tar file at tarPath, using a squashfs image at squashFSPath. +// It can also use extractedRootPath and scriptPath, which are allocated for its exclusive use, +// if necessary. +func createTarFromSIFInputs(ctx context.Context, tarPath, squashFSPath string, injectedScript []byte, extractedRootPath, scriptPath string) error { + // It's safe for the Remove calls to happen even before we create the files, because tempDir is exclusive + // for our use. + defer os.RemoveAll(extractedRootPath) + + // Almost everything in extractedRootPath comes from squashFSPath. + conversionCommand := fmt.Sprintf("unsquashfs -d %s -f %s && tar --acls --xattrs -C %s -cpf %s ./", + extractedRootPath, squashFSPath, extractedRootPath, tarPath) + script := "#!/bin/sh\n" + conversionCommand + "\n" + if err := ioutil.WriteFile(scriptPath, []byte(script), 0755); err != nil { + return err + } + defer os.Remove(scriptPath) + + // On top of squashFSPath, we only add injectedScript, if necessary. + if err := writeInjectedScript(extractedRootPath, injectedScript); err != nil { + return err + } + + logrus.Debugf("Converting squashfs to tar, command: %s ...", conversionCommand) + cmd := exec.CommandContext(ctx, "fakeroot", "--", scriptPath) + output, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("converting image: %w, output: %s", err, string(output)) + } + logrus.Debugf("... finished converting squashfs to tar") + return nil +} + +// convertSIFToElements processes sifImage and creates/returns +// the relevant elements for constructing an OCI-like image: +// - A path to a tar file containing a root filesystem, +// - A command to run. +// The returned tar file path is inside tempDir, which can be assumed to be empty +// at start, and is exclusively used by the current process (i.e. it is safe +// to use hard-coded relative paths within it). +func convertSIFToElements(ctx context.Context, sifImage *sif.FileImage, tempDir string) (string, []string, error) { + // We could allocate unique names for all of these using ioutil.Temp*, but tempDir is exclusive, + // so we can just hard-code a set of unique values here. + // We create and/or manage cleanup of these two paths. + squashFSPath := filepath.Join(tempDir, "rootfs.squashfs") + tarPath := filepath.Join(tempDir, "rootfs.tar") + // We only allocate these paths, the user is responsible for cleaning them up. + extractedRootPath := filepath.Join(tempDir, "rootfs") + scriptPath := filepath.Join(tempDir, "script") + + succeeded := false + // It's safe for the Remove calls to happen even before we create the files, because tempDir is exclusive + // for our use. + // Ideally we would remove squashFSPath immediately after creating extractedRootPath, but we need + // to run both creation and consumption of extractedRootPath in the same fakeroot context. + // So, overall, this process requires at least 2 compressed copies (SIF and squashFSPath) and 2 + // uncompressed copies (extractedRootPath and tarPath) of the data, all using up space at the same time. + // That's rather unsatisfactory, ideally we would be streaming the data directly from a squashfs parser + // reading from the SIF file to a tarball, for 1 compressed and 1 uncompressed copy. + defer os.Remove(squashFSPath) + defer func() { + if !succeeded { + os.Remove(tarPath) + } + }() + + command, injectedScript, err := processDefFile(sifImage) + if err != nil { + return "", nil, err + } + + rootFS, err := sifImage.GetDescriptor(sif.WithPartitionType(sif.PartPrimSys)) + if err != nil { + return "", nil, fmt.Errorf("looking up rootfs from SIF file: %w", err) + } + // TODO: We'd prefer not to make a full copy of the file here; unsquashfs ≥ 4.4 + // has an -o option that allows extracting a squashfs from the SIF file directly, + // but that version is not currently available in RHEL 8. + logrus.Debugf("Creating a temporary squashfs image %s ...", squashFSPath) + if err := func() error { // A scope for defer + f, err := os.Create(squashFSPath) + if err != nil { + return err + } + defer f.Close() + // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). + if _, err := io.CopyN(f, rootFS.GetReader(), rootFS.Size()); err != nil { + return err + } + return nil + }(); err != nil { + return "", nil, err + } + logrus.Debugf("... finished creating a temporary squashfs image") + + if err := createTarFromSIFInputs(ctx, tarPath, squashFSPath, injectedScript, extractedRootPath, scriptPath); err != nil { + return "", nil, err + } + succeeded = true + return tarPath, []string{command}, nil +} diff --git a/vendor/github.com/containers/image/v5/sif/src.go b/vendor/github.com/containers/image/v5/sif/src.go new file mode 100644 index 00000000000..ba95a469f36 --- /dev/null +++ b/vendor/github.com/containers/image/v5/sif/src.go @@ -0,0 +1,217 @@ +package sif + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + + "github.com/containers/image/v5/internal/tmpdir" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + imgspecs "github.com/opencontainers/image-spec/specs-go" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/sirupsen/logrus" + "github.com/sylabs/sif/v2/pkg/sif" +) + +type sifImageSource struct { + ref sifReference + workDir string + layerDigest digest.Digest + layerSize int64 + layerFile string + config []byte + configDigest digest.Digest + manifest []byte +} + +// getBlobInfo returns the digest, and size of the provided file. +func getBlobInfo(path string) (digest.Digest, int64, error) { + f, err := os.Open(path) + if err != nil { + return "", -1, fmt.Errorf("opening %q for reading: %w", path, err) + } + defer f.Close() + + // TODO: Instead of writing the tar file to disk, and reading + // it here again, stream the tar file to a pipe and + // compute the digest while writing it to disk. + logrus.Debugf("Computing a digest of the SIF conversion output...") + digester := digest.Canonical.Digester() + // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). + size, err := io.Copy(digester.Hash(), f) + if err != nil { + return "", -1, fmt.Errorf("reading %q: %w", path, err) + } + digest := digester.Digest() + logrus.Debugf("... finished computing the digest of the SIF conversion output") + + return digest, size, nil +} + +// newImageSource returns an ImageSource for reading from an existing directory. +// newImageSource extracts SIF objects and saves them in a temp directory. +func newImageSource(ctx context.Context, sys *types.SystemContext, ref sifReference) (types.ImageSource, error) { + sifImg, err := sif.LoadContainerFromPath(ref.file, sif.OptLoadWithFlag(os.O_RDONLY)) + if err != nil { + return nil, fmt.Errorf("loading SIF file: %w", err) + } + defer func() { + _ = sifImg.UnloadContainer() + }() + + workDir, err := ioutil.TempDir(tmpdir.TemporaryDirectoryForBigFiles(sys), "sif") + if err != nil { + return nil, fmt.Errorf("creating temp directory: %w", err) + } + succeeded := false + defer func() { + if !succeeded { + os.RemoveAll(workDir) + } + }() + + layerPath, commandLine, err := convertSIFToElements(ctx, sifImg, workDir) + if err != nil { + return nil, fmt.Errorf("converting rootfs from SquashFS to Tarball: %w", err) + } + + layerDigest, layerSize, err := getBlobInfo(layerPath) + if err != nil { + return nil, fmt.Errorf("gathering blob information: %w", err) + } + + created := sifImg.ModifiedAt() + config := imgspecv1.Image{ + Created: &created, + Architecture: sifImg.PrimaryArch(), + OS: "linux", + Config: imgspecv1.ImageConfig{ + Cmd: commandLine, + }, + RootFS: imgspecv1.RootFS{ + Type: "layers", + DiffIDs: []digest.Digest{layerDigest}, + }, + History: []imgspecv1.History{ + { + Created: &created, + CreatedBy: fmt.Sprintf("/bin/sh -c #(nop) ADD file:%s in %c", layerDigest.Hex(), os.PathSeparator), + Comment: "imported from SIF, uuid: " + sifImg.ID(), + }, + { + Created: &created, + CreatedBy: "/bin/sh -c #(nop) CMD [\"bash\"]", + EmptyLayer: true, + }, + }, + } + configBytes, err := json.Marshal(&config) + if err != nil { + return nil, fmt.Errorf("generating configuration blob for %q: %w", ref.resolvedFile, err) + } + configDigest := digest.Canonical.FromBytes(configBytes) + + manifest := imgspecv1.Manifest{ + Versioned: imgspecs.Versioned{SchemaVersion: 2}, + MediaType: imgspecv1.MediaTypeImageManifest, + Config: imgspecv1.Descriptor{ + Digest: configDigest, + Size: int64(len(configBytes)), + MediaType: imgspecv1.MediaTypeImageConfig, + }, + Layers: []imgspecv1.Descriptor{{ + Digest: layerDigest, + Size: layerSize, + MediaType: imgspecv1.MediaTypeImageLayer, + }}, + } + manifestBytes, err := json.Marshal(&manifest) + if err != nil { + return nil, fmt.Errorf("generating manifest for %q: %w", ref.resolvedFile, err) + } + + succeeded = true + return &sifImageSource{ + ref: ref, + workDir: workDir, + layerDigest: layerDigest, + layerSize: layerSize, + layerFile: layerPath, + config: configBytes, + configDigest: configDigest, + manifest: manifestBytes, + }, nil +} + +// Reference returns the reference used to set up this source. +func (s *sifImageSource) Reference() types.ImageReference { + return s.ref +} + +// Close removes resources associated with an initialized ImageSource, if any. +func (s *sifImageSource) Close() error { + return os.RemoveAll(s.workDir) +} + +// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. +func (s *sifImageSource) HasThreadSafeGetBlob() bool { + return true +} + +// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). +// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. +// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. +func (s *sifImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { + switch info.Digest { + case s.configDigest: + return ioutil.NopCloser(bytes.NewBuffer(s.config)), int64(len(s.config)), nil + case s.layerDigest: + reader, err := os.Open(s.layerFile) + if err != nil { + return nil, -1, fmt.Errorf("opening %q: %w", s.layerFile, err) + } + return reader, s.layerSize, nil + default: + return nil, -1, fmt.Errorf("no blob with digest %q found", info.Digest.String()) + } +} + +// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). +// It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); +// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). +func (s *sifImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { + if instanceDigest != nil { + return nil, "", errors.New("manifest lists are not supported by the sif transport") + } + return s.manifest, imgspecv1.MediaTypeImageManifest, nil +} + +// GetSignatures returns the image's signatures. It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +func (s *sifImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + if instanceDigest != nil { + return nil, errors.New("manifest lists are not supported by the sif transport") + } + return nil, nil +} + +// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer +// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob() +// to read the image's layers. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (s *sifImageSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) { + return nil, nil +} diff --git a/vendor/github.com/containers/image/v5/sif/transport.go b/vendor/github.com/containers/image/v5/sif/transport.go new file mode 100644 index 00000000000..18d894bc35c --- /dev/null +++ b/vendor/github.com/containers/image/v5/sif/transport.go @@ -0,0 +1,164 @@ +package sif + +import ( + "context" + "errors" + "fmt" + "path/filepath" + "strings" + + "github.com/containers/image/v5/directory/explicitfilepath" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/image" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" +) + +func init() { + transports.Register(Transport) +} + +// Transport is an ImageTransport for SIF images. +var Transport = sifTransport{} + +type sifTransport struct{} + +func (t sifTransport) Name() string { + return "sif" +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. +func (t sifTransport) ParseReference(reference string) (types.ImageReference, error) { + return NewReference(reference) +} + +// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys +// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). +// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. +// scope passed to this function will not be "", that value is always allowed. +func (t sifTransport) ValidatePolicyConfigurationScope(scope string) error { + if !strings.HasPrefix(scope, "/") { + return fmt.Errorf("Invalid scope %s: Must be an absolute path", scope) + } + // Refuse also "/", otherwise "/" and "" would have the same semantics, + // and "" could be unexpectedly shadowed by the "/" entry. + if scope == "/" { + return errors.New(`Invalid scope "/": Use the generic default scope ""`) + } + cleaned := filepath.Clean(scope) + if cleaned != scope { + return fmt.Errorf(`Invalid scope %s: Uses non-canonical format, perhaps try %s`, scope, cleaned) + } + return nil +} + +// sifReference is an ImageReference for SIF images. +type sifReference struct { + // Note that the interpretation of paths below depends on the underlying filesystem state, which may change under us at any time! + // Either of the paths may point to a different, or no, inode over time. resolvedFile may contain symbolic links, and so on. + + // Generally we follow the intent of the user, and use the "file" member for filesystem operations (e.g. the user can use a relative path to avoid + // being exposed to symlinks and renames in the parent directories to the working directory). + // (But in general, we make no attempt to be completely safe against concurrent hostile filesystem modifications.) + file string // As specified by the user. May be relative, contain symlinks, etc. + resolvedFile string // Absolute file path with no symlinks, at least at the time of its creation. Primarily used for policy namespaces. +} + +// There is no sif.ParseReference because it is rather pointless. +// Callers who need a transport-independent interface will go through +// sifTransport.ParseReference; callers who intentionally deal with SIF files +// can use sif.NewReference. + +// NewReference returns an image file reference for a specified path. +func NewReference(file string) (types.ImageReference, error) { + // We do not expose an API supplying the resolvedFile; we could, but recomputing it + // is generally cheap enough that we prefer being confident about the properties of resolvedFile. + resolved, err := explicitfilepath.ResolvePathToFullyExplicit(file) + if err != nil { + return nil, err + } + return sifReference{file: file, resolvedFile: resolved}, nil +} + +func (ref sifReference) Transport() types.ImageTransport { + return Transport +} + +// StringWithinTransport returns a string representation of the reference, which MUST be such that +// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. +// NOTE: The returned string is not promised to be equal to the original input to ParseReference; +// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. +// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix; +// instead, see transports.ImageName(). +func (ref sifReference) StringWithinTransport() string { + return ref.file +} + +// DockerReference returns a Docker reference associated with this reference +// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, +// not e.g. after redirect or alias processing), or nil if unknown/not applicable. +func (ref sifReference) DockerReference() reference.Named { + return nil +} + +// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. +// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; +// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical +// (i.e. various references with exactly the same semantics should return the same configuration identity) +// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but +// not required/guaranteed that it will be a valid input to Transport().ParseReference(). +// Returns "" if configuration identities for these references are not supported. +func (ref sifReference) PolicyConfigurationIdentity() string { + return ref.resolvedFile +} + +// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search +// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed +// in order, terminating on first match, and an implicit "" is always checked at the end. +// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), +// and each following element to be a prefix of the element preceding it. +func (ref sifReference) PolicyConfigurationNamespaces() []string { + res := []string{} + path := ref.resolvedFile + for { + lastSlash := strings.LastIndex(path, "/") + if lastSlash == -1 || lastSlash == 0 { + break + } + path = path[:lastSlash] + res = append(res, path) + } + // Note that we do not include "/"; it is redundant with the default "" global default, + // and rejected by sifTransport.ValidatePolicyConfigurationScope above. + return res +} + +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (ref sifReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + src, err := newImageSource(ctx, sys, ref) + if err != nil { + return nil, err + } + return image.FromSource(ctx, sys, src) +} + +// NewImageSource returns a types.ImageSource for this reference. +// The caller must call .Close() on the returned ImageSource. +func (ref sifReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { + return newImageSource(ctx, sys, ref) +} + +// NewImageDestination returns a types.ImageDestination for this reference. +// The caller must call .Close() on the returned ImageDestination. +func (ref sifReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { + return nil, errors.New(`"sif:" locations can only be read from, not written to`) +} + +// DeleteImage deletes the named image from the registry, if supported. +func (ref sifReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + return errors.New("Deleting images not implemented for sif: images") +} diff --git a/vendor/github.com/containers/image/v5/signature/docker.go b/vendor/github.com/containers/image/v5/signature/docker.go new file mode 100644 index 00000000000..8e9ce0dd236 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/docker.go @@ -0,0 +1,89 @@ +// Note: Consider the API unstable until the code supports at least three different image formats or transports. + +package signature + +import ( + "errors" + "fmt" + "strings" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/manifest" + "github.com/opencontainers/go-digest" +) + +// SignOptions includes optional parameters for signing container images. +type SignOptions struct { + // Passphare to use when signing with the key identity. + Passphrase string +} + +// SignDockerManifest returns a signature for manifest as the specified dockerReference, +// using mech and keyIdentity, and the specified options. +func SignDockerManifestWithOptions(m []byte, dockerReference string, mech SigningMechanism, keyIdentity string, options *SignOptions) ([]byte, error) { + manifestDigest, err := manifest.Digest(m) + if err != nil { + return nil, err + } + sig := newUntrustedSignature(manifestDigest, dockerReference) + + var passphrase string + if options != nil { + passphrase = options.Passphrase + // The gpgme implementation can’t use passphrase with \n; reject it here for consistent behavior. + if strings.Contains(passphrase, "\n") { + return nil, errors.New("invalid passphrase: must not contain a line break") + } + } + + return sig.sign(mech, keyIdentity, passphrase) +} + +// SignDockerManifest returns a signature for manifest as the specified dockerReference, +// using mech and keyIdentity. +func SignDockerManifest(m []byte, dockerReference string, mech SigningMechanism, keyIdentity string) ([]byte, error) { + return SignDockerManifestWithOptions(m, dockerReference, mech, keyIdentity, nil) +} + +// VerifyDockerManifestSignature checks that unverifiedSignature uses expectedKeyIdentity to sign unverifiedManifest as expectedDockerReference, +// using mech. +func VerifyDockerManifestSignature(unverifiedSignature, unverifiedManifest []byte, + expectedDockerReference string, mech SigningMechanism, expectedKeyIdentity string) (*Signature, error) { + expectedRef, err := reference.ParseNormalizedNamed(expectedDockerReference) + if err != nil { + return nil, err + } + sig, err := verifyAndExtractSignature(mech, unverifiedSignature, signatureAcceptanceRules{ + validateKeyIdentity: func(keyIdentity string) error { + if keyIdentity != expectedKeyIdentity { + return InvalidSignatureError{msg: fmt.Sprintf("Signature by %s does not match expected fingerprint %s", keyIdentity, expectedKeyIdentity)} + } + return nil + }, + validateSignedDockerReference: func(signedDockerReference string) error { + signedRef, err := reference.ParseNormalizedNamed(signedDockerReference) + if err != nil { + return InvalidSignatureError{msg: fmt.Sprintf("Invalid docker reference %s in signature", signedDockerReference)} + } + if signedRef.String() != expectedRef.String() { + return InvalidSignatureError{msg: fmt.Sprintf("Docker reference %s does not match %s", + signedDockerReference, expectedDockerReference)} + } + return nil + }, + validateSignedDockerManifestDigest: func(signedDockerManifestDigest digest.Digest) error { + matches, err := manifest.MatchesDigest(unverifiedManifest, signedDockerManifestDigest) + if err != nil { + return err + } + if !matches { + return InvalidSignatureError{msg: fmt.Sprintf("Signature for docker digest %q does not match", signedDockerManifestDigest)} + } + return nil + }, + }) + if err != nil { + return nil, err + } + return sig, nil +} diff --git a/vendor/github.com/containers/image/v5/signature/json.go b/vendor/github.com/containers/image/v5/signature/json.go new file mode 100644 index 00000000000..9e592863dae --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/json.go @@ -0,0 +1,88 @@ +package signature + +import ( + "bytes" + "encoding/json" + "fmt" + "io" +) + +// jsonFormatError is returned when JSON does not match expected format. +type jsonFormatError string + +func (err jsonFormatError) Error() string { + return string(err) +} + +// paranoidUnmarshalJSONObject unmarshals data as a JSON object, but failing on the slightest unexpected aspect +// (including duplicated keys, unrecognized keys, and non-matching types). Uses fieldResolver to +// determine the destination for a field value, which should return a pointer to the destination if valid, or nil if the key is rejected. +// +// The fieldResolver approach is useful for decoding the Policy.Transports map; using it for structs is a bit lazy, +// we could use reflection to automate this. Later? +func paranoidUnmarshalJSONObject(data []byte, fieldResolver func(string) interface{}) error { + seenKeys := map[string]struct{}{} + + dec := json.NewDecoder(bytes.NewReader(data)) + t, err := dec.Token() + if err != nil { + return jsonFormatError(err.Error()) + } + if t != json.Delim('{') { + return jsonFormatError(fmt.Sprintf("JSON object expected, got \"%s\"", t)) + } + for { + t, err := dec.Token() + if err != nil { + return jsonFormatError(err.Error()) + } + if t == json.Delim('}') { + break + } + + key, ok := t.(string) + if !ok { + // Coverage: This should never happen, dec.Token() rejects non-string-literals in this state. + return jsonFormatError(fmt.Sprintf("Key string literal expected, got \"%s\"", t)) + } + if _, ok := seenKeys[key]; ok { + return jsonFormatError(fmt.Sprintf("Duplicate key \"%s\"", key)) + } + seenKeys[key] = struct{}{} + + valuePtr := fieldResolver(key) + if valuePtr == nil { + return jsonFormatError(fmt.Sprintf("Unknown key \"%s\"", key)) + } + // This works like json.Unmarshal, in particular it allows us to implement UnmarshalJSON to implement strict parsing of the field value. + if err := dec.Decode(valuePtr); err != nil { + return jsonFormatError(err.Error()) + } + } + if _, err := dec.Token(); err != io.EOF { + return jsonFormatError("Unexpected data after JSON object") + } + return nil +} + +// paranoidUnmarshalJSONObject unmarshals data as a JSON object, but failing on the slightest unexpected aspect +// (including duplicated keys, unrecognized keys, and non-matching types). Each of the fields in exactFields +// must be present exactly once, and none other fields are accepted. +func paranoidUnmarshalJSONObjectExactFields(data []byte, exactFields map[string]interface{}) error { + seenKeys := map[string]struct{}{} + if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} { + if valuePtr, ok := exactFields[key]; ok { + seenKeys[key] = struct{}{} + return valuePtr + } + return nil + }); err != nil { + return err + } + for key := range exactFields { + if _, ok := seenKeys[key]; !ok { + return jsonFormatError(fmt.Sprintf(`Key "%s" missing in a JSON object`, key)) + } + } + return nil +} diff --git a/vendor/github.com/containers/image/v5/signature/mechanism.go b/vendor/github.com/containers/image/v5/signature/mechanism.go new file mode 100644 index 00000000000..9a32a43640e --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/mechanism.go @@ -0,0 +1,96 @@ +// Note: Consider the API unstable until the code supports at least three different image formats or transports. + +package signature + +import ( + "bytes" + "errors" + "fmt" + "io/ioutil" + "strings" + + // This code is used only to parse the data in an explicitly-untrusted + // code path, where cryptography is not relevant. For now, continue to + // use this frozen deprecated implementation. When mechanism_openpgp.go + // migrates to another implementation, this should migrate as well. + "golang.org/x/crypto/openpgp" //nolint:staticcheck +) + +// SigningMechanism abstracts a way to sign binary blobs and verify their signatures. +// Each mechanism should eventually be closed by calling Close(). +type SigningMechanism interface { + // Close removes resources associated with the mechanism, if any. + Close() error + // SupportsSigning returns nil if the mechanism supports signing, or a SigningNotSupportedError. + SupportsSigning() error + // Sign creates a (non-detached) signature of input using keyIdentity. + // Fails with a SigningNotSupportedError if the mechanism does not support signing. + Sign(input []byte, keyIdentity string) ([]byte, error) + // Verify parses unverifiedSignature and returns the content and the signer's identity + Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error) + // UntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION, + // along with a short identifier of the key used for signing. + // WARNING: The short key identifier (which corresponds to "Key ID" for OpenPGP keys) + // is NOT the same as a "key identity" used in other calls to this interface, and + // the values may have no recognizable relationship if the public key is not available. + UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) +} + +// signingMechanismWithPassphrase is an internal extension of SigningMechanism. +type signingMechanismWithPassphrase interface { + SigningMechanism + + // Sign creates a (non-detached) signature of input using keyIdentity and passphrase. + // Fails with a SigningNotSupportedError if the mechanism does not support signing. + SignWithPassphrase(input []byte, keyIdentity string, passphrase string) ([]byte, error) +} + +// SigningNotSupportedError is returned when trying to sign using a mechanism which does not support that. +type SigningNotSupportedError string + +func (err SigningNotSupportedError) Error() string { + return string(err) +} + +// NewGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism for the user’s default +// GPG configuration ($GNUPGHOME / ~/.gnupg) +// The caller must call .Close() on the returned SigningMechanism. +func NewGPGSigningMechanism() (SigningMechanism, error) { + return newGPGSigningMechanismInDirectory("") +} + +// NewEphemeralGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism which +// recognizes _only_ public keys from the supplied blob, and returns the identities +// of these keys. +// The caller must call .Close() on the returned SigningMechanism. +func NewEphemeralGPGSigningMechanism(blob []byte) (SigningMechanism, []string, error) { + return newEphemeralGPGSigningMechanism(blob) +} + +// gpgUntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION, +// along with a short identifier of the key used for signing. +// WARNING: The short key identifier (which corresponds to "Key ID" for OpenPGP keys) +// is NOT the same as a "key identity" used in other calls to this interface, and +// the values may have no recognizable relationship if the public key is not available. +func gpgUntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) { + // This uses the Golang-native OpenPGP implementation instead of gpgme because we are not doing any cryptography. + md, err := openpgp.ReadMessage(bytes.NewReader(untrustedSignature), openpgp.EntityList{}, nil, nil) + if err != nil { + return nil, "", err + } + if !md.IsSigned { + return nil, "", errors.New("The input is not a signature") + } + content, err := ioutil.ReadAll(md.UnverifiedBody) + if err != nil { + // Coverage: An error during reading the body can happen only if + // 1) the message is encrypted, which is not our case (and we don’t give ReadMessage the key + // to decrypt the contents anyway), or + // 2) the message is signed AND we give ReadMessage a corresponding public key, which we don’t. + return nil, "", err + } + + // Uppercase the key ID for minimal consistency with the gpgme-returned fingerprints + // (but note that key ID is a suffix of the fingerprint only for V4 keys, not V3)! + return content, strings.ToUpper(fmt.Sprintf("%016X", md.SignedByKeyId)), nil +} diff --git a/vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go b/vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go new file mode 100644 index 00000000000..c166fb32d89 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go @@ -0,0 +1,203 @@ +//go:build !containers_image_openpgp +// +build !containers_image_openpgp + +package signature + +import ( + "bytes" + "errors" + "fmt" + "io/ioutil" + "os" + + "github.com/proglottis/gpgme" +) + +// A GPG/OpenPGP signing mechanism, implemented using gpgme. +type gpgmeSigningMechanism struct { + ctx *gpgme.Context + ephemeralDir string // If not "", a directory to be removed on Close() +} + +// newGPGSigningMechanismInDirectory returns a new GPG/OpenPGP signing mechanism, using optionalDir if not empty. +// The caller must call .Close() on the returned SigningMechanism. +func newGPGSigningMechanismInDirectory(optionalDir string) (signingMechanismWithPassphrase, error) { + ctx, err := newGPGMEContext(optionalDir) + if err != nil { + return nil, err + } + return &gpgmeSigningMechanism{ + ctx: ctx, + ephemeralDir: "", + }, nil +} + +// newEphemeralGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism which +// recognizes _only_ public keys from the supplied blob, and returns the identities +// of these keys. +// The caller must call .Close() on the returned SigningMechanism. +func newEphemeralGPGSigningMechanism(blob []byte) (signingMechanismWithPassphrase, []string, error) { + dir, err := ioutil.TempDir("", "containers-ephemeral-gpg-") + if err != nil { + return nil, nil, err + } + removeDir := true + defer func() { + if removeDir { + os.RemoveAll(dir) + } + }() + ctx, err := newGPGMEContext(dir) + if err != nil { + return nil, nil, err + } + mech := &gpgmeSigningMechanism{ + ctx: ctx, + ephemeralDir: dir, + } + keyIdentities, err := mech.importKeysFromBytes(blob) + if err != nil { + return nil, nil, err + } + + removeDir = false + return mech, keyIdentities, nil +} + +// newGPGMEContext returns a new *gpgme.Context, using optionalDir if not empty. +func newGPGMEContext(optionalDir string) (*gpgme.Context, error) { + ctx, err := gpgme.New() + if err != nil { + return nil, err + } + if err = ctx.SetProtocol(gpgme.ProtocolOpenPGP); err != nil { + return nil, err + } + if optionalDir != "" { + err := ctx.SetEngineInfo(gpgme.ProtocolOpenPGP, "", optionalDir) + if err != nil { + return nil, err + } + } + ctx.SetArmor(false) + ctx.SetTextMode(false) + return ctx, nil +} + +func (m *gpgmeSigningMechanism) Close() error { + if m.ephemeralDir != "" { + os.RemoveAll(m.ephemeralDir) // Ignore an error, if any + } + return nil +} + +// importKeysFromBytes imports public keys from the supplied blob and returns their identities. +// The blob is assumed to have an appropriate format (the caller is expected to know which one). +// NOTE: This may modify long-term state (e.g. key storage in a directory underlying the mechanism); +// but we do not make this public, it can only be used through newEphemeralGPGSigningMechanism. +func (m *gpgmeSigningMechanism) importKeysFromBytes(blob []byte) ([]string, error) { + inputData, err := gpgme.NewDataBytes(blob) + if err != nil { + return nil, err + } + res, err := m.ctx.Import(inputData) + if err != nil { + return nil, err + } + keyIdentities := []string{} + for _, i := range res.Imports { + if i.Result == nil { + keyIdentities = append(keyIdentities, i.Fingerprint) + } + } + return keyIdentities, nil +} + +// SupportsSigning returns nil if the mechanism supports signing, or a SigningNotSupportedError. +func (m *gpgmeSigningMechanism) SupportsSigning() error { + return nil +} + +// Sign creates a (non-detached) signature of input using keyIdentity and passphrase. +// Fails with a SigningNotSupportedError if the mechanism does not support signing. +func (m *gpgmeSigningMechanism) SignWithPassphrase(input []byte, keyIdentity string, passphrase string) ([]byte, error) { + key, err := m.ctx.GetKey(keyIdentity, true) + if err != nil { + return nil, err + } + inputData, err := gpgme.NewDataBytes(input) + if err != nil { + return nil, err + } + var sigBuffer bytes.Buffer + sigData, err := gpgme.NewDataWriter(&sigBuffer) + if err != nil { + return nil, err + } + + if passphrase != "" { + // Callback to write the passphrase to the specified file descriptor. + callback := func(uidHint string, prevWasBad bool, gpgmeFD *os.File) error { + if prevWasBad { + return errors.New("bad passphrase") + } + _, err := gpgmeFD.WriteString(passphrase + "\n") + return err + } + if err := m.ctx.SetCallback(callback); err != nil { + return nil, fmt.Errorf("setting gpgme passphrase callback: %w", err) + } + + // Loopback mode will use the callback instead of prompting the user. + if err := m.ctx.SetPinEntryMode(gpgme.PinEntryLoopback); err != nil { + return nil, fmt.Errorf("setting gpgme pinentry mode: %w", err) + } + } + + if err = m.ctx.Sign([]*gpgme.Key{key}, inputData, sigData, gpgme.SigModeNormal); err != nil { + return nil, err + } + return sigBuffer.Bytes(), nil +} + +// Sign creates a (non-detached) signature of input using keyIdentity. +// Fails with a SigningNotSupportedError if the mechanism does not support signing. +func (m *gpgmeSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) { + return m.SignWithPassphrase(input, keyIdentity, "") +} + +// Verify parses unverifiedSignature and returns the content and the signer's identity +func (m *gpgmeSigningMechanism) Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error) { + signedBuffer := bytes.Buffer{} + signedData, err := gpgme.NewDataWriter(&signedBuffer) + if err != nil { + return nil, "", err + } + unverifiedSignatureData, err := gpgme.NewDataBytes(unverifiedSignature) + if err != nil { + return nil, "", err + } + _, sigs, err := m.ctx.Verify(unverifiedSignatureData, nil, signedData) + if err != nil { + return nil, "", err + } + if len(sigs) != 1 { + return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Unexpected GPG signature count %d", len(sigs))} + } + sig := sigs[0] + // This is sig.Summary == gpgme.SigSumValid except for key trust, which we handle ourselves + if sig.Status != nil || sig.Validity == gpgme.ValidityNever || sig.ValidityReason != nil || sig.WrongKeyUsage { + // FIXME: Better error reporting eventually + return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Invalid GPG signature: %#v", sig)} + } + return signedBuffer.Bytes(), sig.Fingerprint, nil +} + +// UntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION, +// along with a short identifier of the key used for signing. +// WARNING: The short key identifier (which corresponds to "Key ID" for OpenPGP keys) +// is NOT the same as a "key identity" used in other calls to this interface, and +// the values may have no recognizable relationship if the public key is not available. +func (m *gpgmeSigningMechanism) UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) { + return gpgUntrustedSignatureContents(untrustedSignature) +} diff --git a/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go b/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go new file mode 100644 index 00000000000..7a31425f199 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go @@ -0,0 +1,172 @@ +//go:build containers_image_openpgp +// +build containers_image_openpgp + +package signature + +import ( + "bytes" + "errors" + "fmt" + "io/ioutil" + "os" + "path" + "strings" + "time" + + "github.com/containers/storage/pkg/homedir" + // This is a fallback code; the primary recommendation is to use the gpgme mechanism + // implementation, which is out-of-process and more appropriate for handling long-term private key material + // than any Go implementation. + // For this verify-only fallback, we haven't reviewed any of the + // existing alternatives to choose; so, for now, continue to + // use this frozen deprecated implementation. + "golang.org/x/crypto/openpgp" //nolint:staticcheck +) + +// A GPG/OpenPGP signing mechanism, implemented using x/crypto/openpgp. +type openpgpSigningMechanism struct { + keyring openpgp.EntityList +} + +// newGPGSigningMechanismInDirectory returns a new GPG/OpenPGP signing mechanism, using optionalDir if not empty. +// The caller must call .Close() on the returned SigningMechanism. +func newGPGSigningMechanismInDirectory(optionalDir string) (signingMechanismWithPassphrase, error) { + m := &openpgpSigningMechanism{ + keyring: openpgp.EntityList{}, + } + + gpgHome := optionalDir + if gpgHome == "" { + gpgHome = os.Getenv("GNUPGHOME") + if gpgHome == "" { + gpgHome = path.Join(homedir.Get(), ".gnupg") + } + } + + pubring, err := ioutil.ReadFile(path.Join(gpgHome, "pubring.gpg")) + if err != nil { + if !os.IsNotExist(err) { + return nil, err + } + } else { + _, err := m.importKeysFromBytes(pubring) + if err != nil { + return nil, err + } + } + return m, nil +} + +// newEphemeralGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism which +// recognizes _only_ public keys from the supplied blob, and returns the identities +// of these keys. +// The caller must call .Close() on the returned SigningMechanism. +func newEphemeralGPGSigningMechanism(blob []byte) (signingMechanismWithPassphrase, []string, error) { + m := &openpgpSigningMechanism{ + keyring: openpgp.EntityList{}, + } + keyIdentities, err := m.importKeysFromBytes(blob) + if err != nil { + return nil, nil, err + } + return m, keyIdentities, nil +} + +func (m *openpgpSigningMechanism) Close() error { + return nil +} + +// importKeysFromBytes imports public keys from the supplied blob and returns their identities. +// The blob is assumed to have an appropriate format (the caller is expected to know which one). +func (m *openpgpSigningMechanism) importKeysFromBytes(blob []byte) ([]string, error) { + keyring, err := openpgp.ReadKeyRing(bytes.NewReader(blob)) + if err != nil { + k, e2 := openpgp.ReadArmoredKeyRing(bytes.NewReader(blob)) + if e2 != nil { + return nil, err // The original error -- FIXME: is this better? + } + keyring = k + } + + keyIdentities := []string{} + for _, entity := range keyring { + if entity.PrimaryKey == nil { + // Coverage: This should never happen, openpgp.ReadEntity fails with a + // openpgp.errors.StructuralError instead of returning an entity with this + // field set to nil. + continue + } + // Uppercase the fingerprint to be compatible with gpgme + keyIdentities = append(keyIdentities, strings.ToUpper(fmt.Sprintf("%x", entity.PrimaryKey.Fingerprint))) + m.keyring = append(m.keyring, entity) + } + return keyIdentities, nil +} + +// SupportsSigning returns nil if the mechanism supports signing, or a SigningNotSupportedError. +func (m *openpgpSigningMechanism) SupportsSigning() error { + return SigningNotSupportedError("signing is not supported in github.com/containers/image built with the containers_image_openpgp build tag") +} + +// Sign creates a (non-detached) signature of input using keyIdentity. +// Fails with a SigningNotSupportedError if the mechanism does not support signing. +func (m *openpgpSigningMechanism) SignWithPassphrase(input []byte, keyIdentity string, passphrase string) ([]byte, error) { + return nil, SigningNotSupportedError("signing is not supported in github.com/containers/image built with the containers_image_openpgp build tag") +} + +// Sign creates a (non-detached) signature of input using keyIdentity. +// Fails with a SigningNotSupportedError if the mechanism does not support signing. +func (m *openpgpSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) { + return m.SignWithPassphrase(input, keyIdentity, "") +} + +// Verify parses unverifiedSignature and returns the content and the signer's identity +func (m *openpgpSigningMechanism) Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error) { + md, err := openpgp.ReadMessage(bytes.NewReader(unverifiedSignature), m.keyring, nil, nil) + if err != nil { + return nil, "", err + } + if !md.IsSigned { + return nil, "", errors.New("not signed") + } + content, err := ioutil.ReadAll(md.UnverifiedBody) + if err != nil { + // Coverage: md.UnverifiedBody.Read only fails if the body is encrypted + // (and possibly also signed, but it _must_ be encrypted) and the signing + // “modification detection code” detects a mismatch. But in that case, + // we would expect the signature verification to fail as well, and that is checked + // first. Besides, we are not supplying any decryption keys, so we really + // can never reach this “encrypted data MDC mismatch” path. + return nil, "", err + } + if md.SignatureError != nil { + return nil, "", fmt.Errorf("signature error: %v", md.SignatureError) + } + if md.SignedBy == nil { + return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Invalid GPG signature: %#v", md.Signature)} + } + if md.Signature != nil { + if md.Signature.SigLifetimeSecs != nil { + expiry := md.Signature.CreationTime.Add(time.Duration(*md.Signature.SigLifetimeSecs) * time.Second) + if time.Now().After(expiry) { + return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Signature expired on %s", expiry)} + } + } + } else if md.SignatureV3 == nil { + // Coverage: If md.SignedBy != nil, the final md.UnverifiedBody.Read() either sets one of md.Signature or md.SignatureV3, + // or sets md.SignatureError. + return nil, "", InvalidSignatureError{msg: "Unexpected openpgp.MessageDetails: neither Signature nor SignatureV3 is set"} + } + + // Uppercase the fingerprint to be compatible with gpgme + return content, strings.ToUpper(fmt.Sprintf("%x", md.SignedBy.PublicKey.Fingerprint)), nil +} + +// UntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION, +// along with a short identifier of the key used for signing. +// WARNING: The short key identifier (which corresponds to "Key ID" for OpenPGP keys) +// is NOT the same as a "key identity" used in other calls to this interface, and +// the values may have no recognizable relationship if the public key is not available. +func (m *openpgpSigningMechanism) UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) { + return gpgUntrustedSignatureContents(untrustedSignature) +} diff --git a/vendor/github.com/containers/image/v5/signature/policy_config.go b/vendor/github.com/containers/image/v5/signature/policy_config.go new file mode 100644 index 00000000000..82fbb68cb14 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/policy_config.go @@ -0,0 +1,777 @@ +// policy_config.go handles creation of policy objects, either by parsing JSON +// or by programs building them programmatically. + +// The New* constructors are intended to be a stable API. FIXME: after an independent review. + +// Do not invoke the internals of the JSON marshaling/unmarshaling directly. + +// We can't just blindly call json.Unmarshal because that would silently ignore +// typos, and that would just not do for security policy. + +// FIXME? This is by no means an user-friendly parser: No location information in error messages, no other context. +// But at least it is not worse than blind json.Unmarshal()… + +package signature + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "regexp" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" + "github.com/containers/storage/pkg/homedir" + "github.com/pkg/errors" +) + +// systemDefaultPolicyPath is the policy path used for DefaultPolicy(). +// You can override this at build time with +// -ldflags '-X github.com/containers/image/v5/signature.systemDefaultPolicyPath=$your_path' +var systemDefaultPolicyPath = builtinDefaultPolicyPath + +// builtinDefaultPolicyPath is the policy path used for DefaultPolicy(). +// DO NOT change this, instead see systemDefaultPolicyPath above. +const builtinDefaultPolicyPath = "/etc/containers/policy.json" + +// userPolicyFile is the path to the per user policy path. +var userPolicyFile = filepath.FromSlash(".config/containers/policy.json") + +// InvalidPolicyFormatError is returned when parsing an invalid policy configuration. +type InvalidPolicyFormatError string + +func (err InvalidPolicyFormatError) Error() string { + return string(err) +} + +// DefaultPolicy returns the default policy of the system. +// Most applications should be using this method to get the policy configured +// by the system administrator. +// sys should usually be nil, can be set to override the default. +// NOTE: When this function returns an error, report it to the user and abort. +// DO NOT hard-code fallback policies in your application. +func DefaultPolicy(sys *types.SystemContext) (*Policy, error) { + return NewPolicyFromFile(defaultPolicyPath(sys)) +} + +// defaultPolicyPath returns a path to the default policy of the system. +func defaultPolicyPath(sys *types.SystemContext) string { + return defaultPolicyPathWithHomeDir(sys, homedir.Get()) +} + +// defaultPolicyPathWithHomeDir is an internal implementation detail of defaultPolicyPath, +// it exists only to allow testing it with an artificial home directory. +func defaultPolicyPathWithHomeDir(sys *types.SystemContext, homeDir string) string { + if sys != nil && sys.SignaturePolicyPath != "" { + return sys.SignaturePolicyPath + } + userPolicyFilePath := filepath.Join(homeDir, userPolicyFile) + if _, err := os.Stat(userPolicyFilePath); err == nil { + return userPolicyFilePath + } + if sys != nil && sys.RootForImplicitAbsolutePaths != "" { + return filepath.Join(sys.RootForImplicitAbsolutePaths, systemDefaultPolicyPath) + } + return systemDefaultPolicyPath +} + +// NewPolicyFromFile returns a policy configured in the specified file. +func NewPolicyFromFile(fileName string) (*Policy, error) { + contents, err := ioutil.ReadFile(fileName) + if err != nil { + return nil, err + } + policy, err := NewPolicyFromBytes(contents) + if err != nil { + return nil, errors.Wrapf(err, "invalid policy in %q", fileName) + } + return policy, nil +} + +// NewPolicyFromBytes returns a policy parsed from the specified blob. +// Use this function instead of calling json.Unmarshal directly. +func NewPolicyFromBytes(data []byte) (*Policy, error) { + p := Policy{} + if err := json.Unmarshal(data, &p); err != nil { + return nil, InvalidPolicyFormatError(err.Error()) + } + return &p, nil +} + +// Compile-time check that Policy implements json.Unmarshaler. +var _ json.Unmarshaler = (*Policy)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (p *Policy) UnmarshalJSON(data []byte) error { + *p = Policy{} + transports := policyTransportsMap{} + if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} { + switch key { + case "default": + return &p.Default + case "transports": + return &transports + default: + return nil + } + }); err != nil { + return err + } + + if p.Default == nil { + return InvalidPolicyFormatError("Default policy is missing") + } + p.Transports = map[string]PolicyTransportScopes(transports) + return nil +} + +// policyTransportsMap is a specialization of this map type for the strict JSON parsing semantics appropriate for the Policy.Transports member. +type policyTransportsMap map[string]PolicyTransportScopes + +// Compile-time check that policyTransportsMap implements json.Unmarshaler. +var _ json.Unmarshaler = (*policyTransportsMap)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (m *policyTransportsMap) UnmarshalJSON(data []byte) error { + // We can't unmarshal directly into map values because it is not possible to take an address of a map value. + // So, use a temporary map of pointers-to-slices and convert. + tmpMap := map[string]*PolicyTransportScopes{} + if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} { + // transport can be nil + transport := transports.Get(key) + // paranoidUnmarshalJSONObject detects key duplication for us, check just to be safe. + if _, ok := tmpMap[key]; ok { + return nil + } + ptsWithTransport := policyTransportScopesWithTransport{ + transport: transport, + dest: &PolicyTransportScopes{}, // This allocates a new instance on each call. + } + tmpMap[key] = ptsWithTransport.dest + return &ptsWithTransport + }); err != nil { + return err + } + for key, ptr := range tmpMap { + (*m)[key] = *ptr + } + return nil +} + +// Compile-time check that PolicyTransportScopes "implements"" json.Unmarshaler. +// we want to only use policyTransportScopesWithTransport +var _ json.Unmarshaler = (*PolicyTransportScopes)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (m *PolicyTransportScopes) UnmarshalJSON(data []byte) error { + return errors.New("Do not try to unmarshal PolicyTransportScopes directly") +} + +// policyTransportScopesWithTransport is a way to unmarshal a PolicyTransportScopes +// while validating using a specific ImageTransport if not nil. +type policyTransportScopesWithTransport struct { + transport types.ImageTransport + dest *PolicyTransportScopes +} + +// Compile-time check that policyTransportScopesWithTransport implements json.Unmarshaler. +var _ json.Unmarshaler = (*policyTransportScopesWithTransport)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (m *policyTransportScopesWithTransport) UnmarshalJSON(data []byte) error { + // We can't unmarshal directly into map values because it is not possible to take an address of a map value. + // So, use a temporary map of pointers-to-slices and convert. + tmpMap := map[string]*PolicyRequirements{} + if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} { + // paranoidUnmarshalJSONObject detects key duplication for us, check just to be safe. + if _, ok := tmpMap[key]; ok { + return nil + } + if key != "" && m.transport != nil { + if err := m.transport.ValidatePolicyConfigurationScope(key); err != nil { + return nil + } + } + ptr := &PolicyRequirements{} // This allocates a new instance on each call. + tmpMap[key] = ptr + return ptr + }); err != nil { + return err + } + for key, ptr := range tmpMap { + (*m.dest)[key] = *ptr + } + return nil +} + +// Compile-time check that PolicyRequirements implements json.Unmarshaler. +var _ json.Unmarshaler = (*PolicyRequirements)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (m *PolicyRequirements) UnmarshalJSON(data []byte) error { + reqJSONs := []json.RawMessage{} + if err := json.Unmarshal(data, &reqJSONs); err != nil { + return err + } + if len(reqJSONs) == 0 { + return InvalidPolicyFormatError("List of verification policy requirements must not be empty") + } + res := make([]PolicyRequirement, len(reqJSONs)) + for i, reqJSON := range reqJSONs { + req, err := newPolicyRequirementFromJSON(reqJSON) + if err != nil { + return err + } + res[i] = req + } + *m = res + return nil +} + +// newPolicyRequirementFromJSON parses JSON data into a PolicyRequirement implementation. +func newPolicyRequirementFromJSON(data []byte) (PolicyRequirement, error) { + var typeField prCommon + if err := json.Unmarshal(data, &typeField); err != nil { + return nil, err + } + var res PolicyRequirement + switch typeField.Type { + case prTypeInsecureAcceptAnything: + res = &prInsecureAcceptAnything{} + case prTypeReject: + res = &prReject{} + case prTypeSignedBy: + res = &prSignedBy{} + case prTypeSignedBaseLayer: + res = &prSignedBaseLayer{} + default: + return nil, InvalidPolicyFormatError(fmt.Sprintf("Unknown policy requirement type \"%s\"", typeField.Type)) + } + if err := json.Unmarshal(data, &res); err != nil { + return nil, err + } + return res, nil +} + +// newPRInsecureAcceptAnything is NewPRInsecureAcceptAnything, except it returns the private type. +func newPRInsecureAcceptAnything() *prInsecureAcceptAnything { + return &prInsecureAcceptAnything{prCommon{Type: prTypeInsecureAcceptAnything}} +} + +// NewPRInsecureAcceptAnything returns a new "insecureAcceptAnything" PolicyRequirement. +func NewPRInsecureAcceptAnything() PolicyRequirement { + return newPRInsecureAcceptAnything() +} + +// Compile-time check that prInsecureAcceptAnything implements json.Unmarshaler. +var _ json.Unmarshaler = (*prInsecureAcceptAnything)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (pr *prInsecureAcceptAnything) UnmarshalJSON(data []byte) error { + *pr = prInsecureAcceptAnything{} + var tmp prInsecureAcceptAnything + if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + "type": &tmp.Type, + }); err != nil { + return err + } + + if tmp.Type != prTypeInsecureAcceptAnything { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + } + *pr = *newPRInsecureAcceptAnything() + return nil +} + +// newPRReject is NewPRReject, except it returns the private type. +func newPRReject() *prReject { + return &prReject{prCommon{Type: prTypeReject}} +} + +// NewPRReject returns a new "reject" PolicyRequirement. +func NewPRReject() PolicyRequirement { + return newPRReject() +} + +// Compile-time check that prReject implements json.Unmarshaler. +var _ json.Unmarshaler = (*prReject)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (pr *prReject) UnmarshalJSON(data []byte) error { + *pr = prReject{} + var tmp prReject + if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + "type": &tmp.Type, + }); err != nil { + return err + } + + if tmp.Type != prTypeReject { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + } + *pr = *newPRReject() + return nil +} + +// newPRSignedBy returns a new prSignedBy if parameters are valid. +func newPRSignedBy(keyType sbKeyType, keyPath string, keyData []byte, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) { + if !keyType.IsValid() { + return nil, InvalidPolicyFormatError(fmt.Sprintf("invalid keyType \"%s\"", keyType)) + } + if len(keyPath) > 0 && len(keyData) > 0 { + return nil, InvalidPolicyFormatError("keyType and keyData cannot be used simultaneously") + } + if signedIdentity == nil { + return nil, InvalidPolicyFormatError("signedIdentity not specified") + } + return &prSignedBy{ + prCommon: prCommon{Type: prTypeSignedBy}, + KeyType: keyType, + KeyPath: keyPath, + KeyData: keyData, + SignedIdentity: signedIdentity, + }, nil +} + +// newPRSignedByKeyPath is NewPRSignedByKeyPath, except it returns the private type. +func newPRSignedByKeyPath(keyType sbKeyType, keyPath string, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) { + return newPRSignedBy(keyType, keyPath, nil, signedIdentity) +} + +// NewPRSignedByKeyPath returns a new "signedBy" PolicyRequirement using a KeyPath +func NewPRSignedByKeyPath(keyType sbKeyType, keyPath string, signedIdentity PolicyReferenceMatch) (PolicyRequirement, error) { + return newPRSignedByKeyPath(keyType, keyPath, signedIdentity) +} + +// newPRSignedByKeyData is NewPRSignedByKeyData, except it returns the private type. +func newPRSignedByKeyData(keyType sbKeyType, keyData []byte, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) { + return newPRSignedBy(keyType, "", keyData, signedIdentity) +} + +// NewPRSignedByKeyData returns a new "signedBy" PolicyRequirement using a KeyData +func NewPRSignedByKeyData(keyType sbKeyType, keyData []byte, signedIdentity PolicyReferenceMatch) (PolicyRequirement, error) { + return newPRSignedByKeyData(keyType, keyData, signedIdentity) +} + +// Compile-time check that prSignedBy implements json.Unmarshaler. +var _ json.Unmarshaler = (*prSignedBy)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (pr *prSignedBy) UnmarshalJSON(data []byte) error { + *pr = prSignedBy{} + var tmp prSignedBy + var gotKeyPath, gotKeyData = false, false + var signedIdentity json.RawMessage + if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} { + switch key { + case "type": + return &tmp.Type + case "keyType": + return &tmp.KeyType + case "keyPath": + gotKeyPath = true + return &tmp.KeyPath + case "keyData": + gotKeyData = true + return &tmp.KeyData + case "signedIdentity": + return &signedIdentity + default: + return nil + } + }); err != nil { + return err + } + + if tmp.Type != prTypeSignedBy { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + } + if signedIdentity == nil { + tmp.SignedIdentity = NewPRMMatchRepoDigestOrExact() + } else { + si, err := newPolicyReferenceMatchFromJSON(signedIdentity) + if err != nil { + return err + } + tmp.SignedIdentity = si + } + + var res *prSignedBy + var err error + switch { + case gotKeyPath && gotKeyData: + return InvalidPolicyFormatError("keyPath and keyData cannot be used simultaneously") + case gotKeyPath && !gotKeyData: + res, err = newPRSignedByKeyPath(tmp.KeyType, tmp.KeyPath, tmp.SignedIdentity) + case !gotKeyPath && gotKeyData: + res, err = newPRSignedByKeyData(tmp.KeyType, tmp.KeyData, tmp.SignedIdentity) + case !gotKeyPath && !gotKeyData: + return InvalidPolicyFormatError("At least one of keyPath and keyData mus be specified") + default: // Coverage: This should never happen + return errors.Errorf("Impossible keyPath/keyData presence combination!?") + } + if err != nil { + return err + } + *pr = *res + + return nil +} + +// IsValid returns true iff kt is a recognized value +func (kt sbKeyType) IsValid() bool { + switch kt { + case SBKeyTypeGPGKeys, SBKeyTypeSignedByGPGKeys, + SBKeyTypeX509Certificates, SBKeyTypeSignedByX509CAs: + return true + default: + return false + } +} + +// Compile-time check that sbKeyType implements json.Unmarshaler. +var _ json.Unmarshaler = (*sbKeyType)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (kt *sbKeyType) UnmarshalJSON(data []byte) error { + *kt = sbKeyType("") + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + if !sbKeyType(s).IsValid() { + return InvalidPolicyFormatError(fmt.Sprintf("Unrecognized keyType value \"%s\"", s)) + } + *kt = sbKeyType(s) + return nil +} + +// newPRSignedBaseLayer is NewPRSignedBaseLayer, except it returns the private type. +func newPRSignedBaseLayer(baseLayerIdentity PolicyReferenceMatch) (*prSignedBaseLayer, error) { + if baseLayerIdentity == nil { + return nil, InvalidPolicyFormatError("baseLayerIdentity not specified") + } + return &prSignedBaseLayer{ + prCommon: prCommon{Type: prTypeSignedBaseLayer}, + BaseLayerIdentity: baseLayerIdentity, + }, nil +} + +// NewPRSignedBaseLayer returns a new "signedBaseLayer" PolicyRequirement. +func NewPRSignedBaseLayer(baseLayerIdentity PolicyReferenceMatch) (PolicyRequirement, error) { + return newPRSignedBaseLayer(baseLayerIdentity) +} + +// Compile-time check that prSignedBaseLayer implements json.Unmarshaler. +var _ json.Unmarshaler = (*prSignedBaseLayer)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (pr *prSignedBaseLayer) UnmarshalJSON(data []byte) error { + *pr = prSignedBaseLayer{} + var tmp prSignedBaseLayer + var baseLayerIdentity json.RawMessage + if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + "type": &tmp.Type, + "baseLayerIdentity": &baseLayerIdentity, + }); err != nil { + return err + } + + if tmp.Type != prTypeSignedBaseLayer { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + } + bli, err := newPolicyReferenceMatchFromJSON(baseLayerIdentity) + if err != nil { + return err + } + res, err := newPRSignedBaseLayer(bli) + if err != nil { + // Coverage: This should never happen, newPolicyReferenceMatchFromJSON has ensured bli is valid. + return err + } + *pr = *res + return nil +} + +// newPolicyReferenceMatchFromJSON parses JSON data into a PolicyReferenceMatch implementation. +func newPolicyReferenceMatchFromJSON(data []byte) (PolicyReferenceMatch, error) { + var typeField prmCommon + if err := json.Unmarshal(data, &typeField); err != nil { + return nil, err + } + var res PolicyReferenceMatch + switch typeField.Type { + case prmTypeMatchExact: + res = &prmMatchExact{} + case prmTypeMatchRepoDigestOrExact: + res = &prmMatchRepoDigestOrExact{} + case prmTypeMatchRepository: + res = &prmMatchRepository{} + case prmTypeExactReference: + res = &prmExactReference{} + case prmTypeExactRepository: + res = &prmExactRepository{} + case prmTypeRemapIdentity: + res = &prmRemapIdentity{} + default: + return nil, InvalidPolicyFormatError(fmt.Sprintf("Unknown policy reference match type \"%s\"", typeField.Type)) + } + if err := json.Unmarshal(data, &res); err != nil { + return nil, err + } + return res, nil +} + +// newPRMMatchExact is NewPRMMatchExact, except it returns the private type. +func newPRMMatchExact() *prmMatchExact { + return &prmMatchExact{prmCommon{Type: prmTypeMatchExact}} +} + +// NewPRMMatchExact returns a new "matchExact" PolicyReferenceMatch. +func NewPRMMatchExact() PolicyReferenceMatch { + return newPRMMatchExact() +} + +// Compile-time check that prmMatchExact implements json.Unmarshaler. +var _ json.Unmarshaler = (*prmMatchExact)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (prm *prmMatchExact) UnmarshalJSON(data []byte) error { + *prm = prmMatchExact{} + var tmp prmMatchExact + if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + "type": &tmp.Type, + }); err != nil { + return err + } + + if tmp.Type != prmTypeMatchExact { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + } + *prm = *newPRMMatchExact() + return nil +} + +// newPRMMatchRepoDigestOrExact is NewPRMMatchRepoDigestOrExact, except it returns the private type. +func newPRMMatchRepoDigestOrExact() *prmMatchRepoDigestOrExact { + return &prmMatchRepoDigestOrExact{prmCommon{Type: prmTypeMatchRepoDigestOrExact}} +} + +// NewPRMMatchRepoDigestOrExact returns a new "matchRepoDigestOrExact" PolicyReferenceMatch. +func NewPRMMatchRepoDigestOrExact() PolicyReferenceMatch { + return newPRMMatchRepoDigestOrExact() +} + +// Compile-time check that prmMatchRepoDigestOrExact implements json.Unmarshaler. +var _ json.Unmarshaler = (*prmMatchRepoDigestOrExact)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (prm *prmMatchRepoDigestOrExact) UnmarshalJSON(data []byte) error { + *prm = prmMatchRepoDigestOrExact{} + var tmp prmMatchRepoDigestOrExact + if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + "type": &tmp.Type, + }); err != nil { + return err + } + + if tmp.Type != prmTypeMatchRepoDigestOrExact { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + } + *prm = *newPRMMatchRepoDigestOrExact() + return nil +} + +// newPRMMatchRepository is NewPRMMatchRepository, except it returns the private type. +func newPRMMatchRepository() *prmMatchRepository { + return &prmMatchRepository{prmCommon{Type: prmTypeMatchRepository}} +} + +// NewPRMMatchRepository returns a new "matchRepository" PolicyReferenceMatch. +func NewPRMMatchRepository() PolicyReferenceMatch { + return newPRMMatchRepository() +} + +// Compile-time check that prmMatchRepository implements json.Unmarshaler. +var _ json.Unmarshaler = (*prmMatchRepository)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (prm *prmMatchRepository) UnmarshalJSON(data []byte) error { + *prm = prmMatchRepository{} + var tmp prmMatchRepository + if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + "type": &tmp.Type, + }); err != nil { + return err + } + + if tmp.Type != prmTypeMatchRepository { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + } + *prm = *newPRMMatchRepository() + return nil +} + +// newPRMExactReference is NewPRMExactReference, except it returns the private type. +func newPRMExactReference(dockerReference string) (*prmExactReference, error) { + ref, err := reference.ParseNormalizedNamed(dockerReference) + if err != nil { + return nil, InvalidPolicyFormatError(fmt.Sprintf("Invalid format of dockerReference %s: %s", dockerReference, err.Error())) + } + if reference.IsNameOnly(ref) { + return nil, InvalidPolicyFormatError(fmt.Sprintf("dockerReference %s contains neither a tag nor digest", dockerReference)) + } + return &prmExactReference{ + prmCommon: prmCommon{Type: prmTypeExactReference}, + DockerReference: dockerReference, + }, nil +} + +// NewPRMExactReference returns a new "exactReference" PolicyReferenceMatch. +func NewPRMExactReference(dockerReference string) (PolicyReferenceMatch, error) { + return newPRMExactReference(dockerReference) +} + +// Compile-time check that prmExactReference implements json.Unmarshaler. +var _ json.Unmarshaler = (*prmExactReference)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (prm *prmExactReference) UnmarshalJSON(data []byte) error { + *prm = prmExactReference{} + var tmp prmExactReference + if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + "type": &tmp.Type, + "dockerReference": &tmp.DockerReference, + }); err != nil { + return err + } + + if tmp.Type != prmTypeExactReference { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + } + + res, err := newPRMExactReference(tmp.DockerReference) + if err != nil { + return err + } + *prm = *res + return nil +} + +// newPRMExactRepository is NewPRMExactRepository, except it returns the private type. +func newPRMExactRepository(dockerRepository string) (*prmExactRepository, error) { + if _, err := reference.ParseNormalizedNamed(dockerRepository); err != nil { + return nil, InvalidPolicyFormatError(fmt.Sprintf("Invalid format of dockerRepository %s: %s", dockerRepository, err.Error())) + } + return &prmExactRepository{ + prmCommon: prmCommon{Type: prmTypeExactRepository}, + DockerRepository: dockerRepository, + }, nil +} + +// NewPRMExactRepository returns a new "exactRepository" PolicyRepositoryMatch. +func NewPRMExactRepository(dockerRepository string) (PolicyReferenceMatch, error) { + return newPRMExactRepository(dockerRepository) +} + +// Compile-time check that prmExactRepository implements json.Unmarshaler. +var _ json.Unmarshaler = (*prmExactRepository)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (prm *prmExactRepository) UnmarshalJSON(data []byte) error { + *prm = prmExactRepository{} + var tmp prmExactRepository + if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + "type": &tmp.Type, + "dockerRepository": &tmp.DockerRepository, + }); err != nil { + return err + } + + if tmp.Type != prmTypeExactRepository { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + } + + res, err := newPRMExactRepository(tmp.DockerRepository) + if err != nil { + return err + } + *prm = *res + return nil +} + +// Private objects for validateIdentityRemappingPrefix +var ( + // remapIdentityDomainRegexp matches exactly a reference domain (name[:port]) + remapIdentityDomainRegexp = regexp.MustCompile("^" + reference.DomainRegexp.String() + "$") + // remapIdentityDomainPrefixRegexp matches a reference that starts with a domain; + // we need this because reference.NameRegexp accepts short names with docker.io implied. + remapIdentityDomainPrefixRegexp = regexp.MustCompile("^" + reference.DomainRegexp.String() + "/") + // remapIdentityNameRegexp matches exactly a reference.Named name (possibly unnormalized) + remapIdentityNameRegexp = regexp.MustCompile("^" + reference.NameRegexp.String() + "$") +) + +// validateIdentityRemappingPrefix returns an InvalidPolicyFormatError if s is detected to be invalid +// for the Prefix or SignedPrefix values of prmRemapIdentity. +// Note that it may not recognize _all_ invalid values. +func validateIdentityRemappingPrefix(s string) error { + if remapIdentityDomainRegexp.MatchString(s) || + (remapIdentityNameRegexp.MatchString(s) && remapIdentityDomainPrefixRegexp.MatchString(s)) { + // FIXME? This does not reject "shortname" nor "ns/shortname", because docker/reference + // does not provide an API for the short vs. long name logic. + // It will either not match, or fail in the ParseNamed call of + // prmRemapIdentity.remapReferencePrefix when trying to use such a prefix. + return nil + } + return InvalidPolicyFormatError(fmt.Sprintf("prefix %q is not valid", s)) +} + +// newPRMRemapIdentity is NewPRMRemapIdentity, except it returns the private type. +func newPRMRemapIdentity(prefix, signedPrefix string) (*prmRemapIdentity, error) { + if err := validateIdentityRemappingPrefix(prefix); err != nil { + return nil, err + } + if err := validateIdentityRemappingPrefix(signedPrefix); err != nil { + return nil, err + } + return &prmRemapIdentity{ + prmCommon: prmCommon{Type: prmTypeRemapIdentity}, + Prefix: prefix, + SignedPrefix: signedPrefix, + }, nil +} + +// NewPRMRemapIdentity returns a new "remapIdentity" PolicyRepositoryMatch. +func NewPRMRemapIdentity(prefix, signedPrefix string) (PolicyReferenceMatch, error) { + return newPRMRemapIdentity(prefix, signedPrefix) +} + +// Compile-time check that prmRemapIdentity implements json.Unmarshaler. +var _ json.Unmarshaler = (*prmRemapIdentity)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (prm *prmRemapIdentity) UnmarshalJSON(data []byte) error { + *prm = prmRemapIdentity{} + var tmp prmRemapIdentity + if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + "type": &tmp.Type, + "prefix": &tmp.Prefix, + "signedPrefix": &tmp.SignedPrefix, + }); err != nil { + return err + } + + if tmp.Type != prmTypeRemapIdentity { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + } + + res, err := newPRMRemapIdentity(tmp.Prefix, tmp.SignedPrefix) + if err != nil { + return err + } + *prm = *res + return nil +} diff --git a/vendor/github.com/containers/image/v5/signature/policy_eval.go b/vendor/github.com/containers/image/v5/signature/policy_eval.go new file mode 100644 index 00000000000..edcbf52f4da --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/policy_eval.go @@ -0,0 +1,288 @@ +// This defines the top-level policy evaluation API. +// To the extent possible, the interface of the functions provided +// here is intended to be completely unambiguous, and stable for users +// to rely on. + +package signature + +import ( + "context" + + "github.com/containers/image/v5/types" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// PolicyRequirementError is an explanatory text for rejecting a signature or an image. +type PolicyRequirementError string + +func (err PolicyRequirementError) Error() string { + return string(err) +} + +// signatureAcceptanceResult is the principal value returned by isSignatureAuthorAccepted. +type signatureAcceptanceResult string + +const ( + sarAccepted signatureAcceptanceResult = "sarAccepted" + sarRejected signatureAcceptanceResult = "sarRejected" + sarUnknown signatureAcceptanceResult = "sarUnknown" +) + +// PolicyRequirement is a rule which must be satisfied by at least one of the signatures of an image. +// The type is public, but its definition is private. +type PolicyRequirement interface { + // FIXME: For speed, we should support creating per-context state (not stored in the PolicyRequirement), to cache + // costly initialization like creating temporary GPG home directories and reading files. + // Setup() (someState, error) + // Then, the operations below would be done on the someState object, not directly on a PolicyRequirement. + + // isSignatureAuthorAccepted, given an image and a signature blob, returns: + // - sarAccepted if the signature has been verified against the appropriate public key + // (where "appropriate public key" may depend on the contents of the signature); + // in that case a parsed Signature should be returned. + // - sarRejected if the signature has not been verified; + // in that case error must be non-nil, and should be an PolicyRequirementError if evaluation + // succeeded but the result was rejection. + // - sarUnknown if if this PolicyRequirement does not deal with signatures. + // NOTE: sarUnknown should not be returned if this PolicyRequirement should make a decision but something failed. + // Returning sarUnknown and a non-nil error value is invalid. + // WARNING: This makes the signature contents acceptable for further processing, + // but it does not necessarily mean that the contents of the signature are + // consistent with local policy. + // For example: + // - Do not use a true value to determine whether to run + // a container based on this image; use IsRunningImageAllowed instead. + // - Just because a signature is accepted does not automatically mean the contents of the + // signature are authorized to run code as root, or to affect system or cluster configuration. + isSignatureAuthorAccepted(ctx context.Context, image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) + + // isRunningImageAllowed returns true if the requirement allows running an image. + // If it returns false, err must be non-nil, and should be an PolicyRequirementError if evaluation + // succeeded but the result was rejection. + // WARNING: This validates signatures and the manifest, but does not download or validate the + // layers. Users must validate that the layers match their expected digests. + isRunningImageAllowed(ctx context.Context, image types.UnparsedImage) (bool, error) +} + +// PolicyReferenceMatch specifies a set of image identities accepted in PolicyRequirement. +// The type is public, but its implementation is private. +type PolicyReferenceMatch interface { + // matchesDockerReference decides whether a specific image identity is accepted for an image + // (or, usually, for the image's Reference().DockerReference()). Note that + // image.Reference().DockerReference() may be nil. + matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool +} + +// PolicyContext encapsulates a policy and possible cached state +// for speeding up its evaluation. +type PolicyContext struct { + Policy *Policy + state policyContextState // Internal consistency checking +} + +// policyContextState is used internally to verify the users are not misusing a PolicyContext. +type policyContextState string + +const ( + pcInitializing policyContextState = "Initializing" + pcReady policyContextState = "Ready" + pcInUse policyContextState = "InUse" + pcDestroying policyContextState = "Destroying" + pcDestroyed policyContextState = "Destroyed" +) + +// changeContextState changes pc.state, or fails if the state is unexpected +func (pc *PolicyContext) changeState(expected, new policyContextState) error { + if pc.state != expected { + return errors.Errorf(`"Invalid PolicyContext state, expected "%s", found "%s"`, expected, pc.state) + } + pc.state = new + return nil +} + +// NewPolicyContext sets up and initializes a context for the specified policy. +// The policy must not be modified while the context exists. FIXME: make a deep copy? +// If this function succeeds, the caller should call PolicyContext.Destroy() when done. +func NewPolicyContext(policy *Policy) (*PolicyContext, error) { + pc := &PolicyContext{Policy: policy, state: pcInitializing} + // FIXME: initialize + if err := pc.changeState(pcInitializing, pcReady); err != nil { + // Huh?! This should never fail, we didn't give the pointer to anybody. + // Just give up and leave unclean state around. + return nil, err + } + return pc, nil +} + +// Destroy should be called when the user of the context is done with it. +func (pc *PolicyContext) Destroy() error { + if err := pc.changeState(pcReady, pcDestroying); err != nil { + return err + } + // FIXME: destroy + return pc.changeState(pcDestroying, pcDestroyed) +} + +// policyIdentityLogName returns a string description of the image identity for policy purposes. +// ONLY use this for log messages, not for any decisions! +func policyIdentityLogName(ref types.ImageReference) string { + return ref.Transport().Name() + ":" + ref.PolicyConfigurationIdentity() +} + +// requirementsForImageRef selects the appropriate requirements for ref. +func (pc *PolicyContext) requirementsForImageRef(ref types.ImageReference) PolicyRequirements { + // Do we have a PolicyTransportScopes for this transport? + transportName := ref.Transport().Name() + if transportScopes, ok := pc.Policy.Transports[transportName]; ok { + // Look for a full match. + identity := ref.PolicyConfigurationIdentity() + if req, ok := transportScopes[identity]; ok { + logrus.Debugf(` Using transport "%s" policy section %s`, transportName, identity) + return req + } + + // Look for a match of the possible parent namespaces. + for _, name := range ref.PolicyConfigurationNamespaces() { + if req, ok := transportScopes[name]; ok { + logrus.Debugf(` Using transport "%s" specific policy section %s`, transportName, name) + return req + } + } + + // Look for a default match for the transport. + if req, ok := transportScopes[""]; ok { + logrus.Debugf(` Using transport "%s" policy section ""`, transportName) + return req + } + } + + logrus.Debugf(" Using default policy section") + return pc.Policy.Default +} + +// GetSignaturesWithAcceptedAuthor returns those signatures from an image +// for which the policy accepts the author (and which have been successfully +// verified). +// NOTE: This may legitimately return an empty list and no error, if the image +// has no signatures or only invalid signatures. +// WARNING: This makes the signature contents acceptable for further processing, +// but it does not necessarily mean that the contents of the signature are +// consistent with local policy. +// For example: +// - Do not use a an existence of an accepted signature to determine whether to run +// a container based on this image; use IsRunningImageAllowed instead. +// - Just because a signature is accepted does not automatically mean the contents of the +// signature are authorized to run code as root, or to affect system or cluster configuration. +func (pc *PolicyContext) GetSignaturesWithAcceptedAuthor(ctx context.Context, image types.UnparsedImage) (sigs []*Signature, finalErr error) { + if err := pc.changeState(pcReady, pcInUse); err != nil { + return nil, err + } + defer func() { + if err := pc.changeState(pcInUse, pcReady); err != nil { + sigs = nil + finalErr = err + } + }() + + logrus.Debugf("GetSignaturesWithAcceptedAuthor for image %s", policyIdentityLogName(image.Reference())) + reqs := pc.requirementsForImageRef(image.Reference()) + + // FIXME: rename Signatures to UnverifiedSignatures + // FIXME: pass context.Context + unverifiedSignatures, err := image.Signatures(ctx) + if err != nil { + return nil, err + } + + res := make([]*Signature, 0, len(unverifiedSignatures)) + for sigNumber, sig := range unverifiedSignatures { + var acceptedSig *Signature // non-nil if accepted + rejected := false + // FIXME? Say more about the contents of the signature, i.e. parse it even before verification?! + logrus.Debugf("Evaluating signature %d:", sigNumber) + interpretingReqs: + for reqNumber, req := range reqs { + // FIXME: Log the requirement itself? For now, we use just the number. + // FIXME: supply state + switch res, as, err := req.isSignatureAuthorAccepted(ctx, image, sig); res { + case sarAccepted: + if as == nil { // Coverage: this should never happen + logrus.Debugf(" Requirement %d: internal inconsistency: sarAccepted but no parsed contents", reqNumber) + rejected = true + break interpretingReqs + } + logrus.Debugf(" Requirement %d: signature accepted", reqNumber) + if acceptedSig == nil { + acceptedSig = as + } else if *as != *acceptedSig { // Coverage: this should never happen + // Huh?! Two ways of verifying the same signature blob resulted in two different parses of its already accepted contents? + logrus.Debugf(" Requirement %d: internal inconsistency: sarAccepted but different parsed contents", reqNumber) + rejected = true + acceptedSig = nil + break interpretingReqs + } + case sarRejected: + logrus.Debugf(" Requirement %d: signature rejected: %s", reqNumber, err.Error()) + rejected = true + break interpretingReqs + case sarUnknown: + if err != nil { // Coverage: this should never happen + logrus.Debugf(" Requirement %d: internal inconsistency: sarUnknown but an error message %s", reqNumber, err.Error()) + rejected = true + break interpretingReqs + } + logrus.Debugf(" Requirement %d: signature state unknown, continuing", reqNumber) + default: // Coverage: this should never happen + logrus.Debugf(" Requirement %d: internal inconsistency: unknown result %#v", reqNumber, string(res)) + rejected = true + break interpretingReqs + } + } + // This also handles the (invalid) case of empty reqs, by rejecting the signature. + if acceptedSig != nil && !rejected { + logrus.Debugf(" Overall: OK, signature accepted") + res = append(res, acceptedSig) + } else { + logrus.Debugf(" Overall: Signature not accepted") + } + } + return res, nil +} + +// IsRunningImageAllowed returns true iff the policy allows running the image. +// If it returns false, err must be non-nil, and should be an PolicyRequirementError if evaluation +// succeeded but the result was rejection. +// WARNING: This validates signatures and the manifest, but does not download or validate the +// layers. Users must validate that the layers match their expected digests. +func (pc *PolicyContext) IsRunningImageAllowed(ctx context.Context, image types.UnparsedImage) (res bool, finalErr error) { + if err := pc.changeState(pcReady, pcInUse); err != nil { + return false, err + } + defer func() { + if err := pc.changeState(pcInUse, pcReady); err != nil { + res = false + finalErr = err + } + }() + + logrus.Debugf("IsRunningImageAllowed for image %s", policyIdentityLogName(image.Reference())) + reqs := pc.requirementsForImageRef(image.Reference()) + + if len(reqs) == 0 { + return false, PolicyRequirementError("List of verification policy requirements must not be empty") + } + + for reqNumber, req := range reqs { + // FIXME: supply state + allowed, err := req.isRunningImageAllowed(ctx, image) + if !allowed { + logrus.Debugf("Requirement %d: denied, done", reqNumber) + return false, err + } + logrus.Debugf(" Requirement %d: allowed", reqNumber) + } + // We have tested that len(reqs) != 0, so at least one req must have explicitly allowed this image. + logrus.Debugf("Overall: allowed") + return true, nil +} diff --git a/vendor/github.com/containers/image/v5/signature/policy_eval_baselayer.go b/vendor/github.com/containers/image/v5/signature/policy_eval_baselayer.go new file mode 100644 index 00000000000..55cdd3054f1 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/policy_eval_baselayer.go @@ -0,0 +1,20 @@ +// Policy evaluation for prSignedBaseLayer. + +package signature + +import ( + "context" + + "github.com/containers/image/v5/types" + "github.com/sirupsen/logrus" +) + +func (pr *prSignedBaseLayer) isSignatureAuthorAccepted(ctx context.Context, image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { + return sarUnknown, nil, nil +} + +func (pr *prSignedBaseLayer) isRunningImageAllowed(ctx context.Context, image types.UnparsedImage) (bool, error) { + // FIXME? Reject this at policy parsing time already? + logrus.Errorf("signedBaseLayer not implemented yet!") + return false, PolicyRequirementError("signedBaseLayer not implemented yet!") +} diff --git a/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go b/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go new file mode 100644 index 00000000000..26cca4759e0 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go @@ -0,0 +1,130 @@ +// Policy evaluation for prSignedBy. + +package signature + +import ( + "context" + "fmt" + "io/ioutil" + "strings" + + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +func (pr *prSignedBy) isSignatureAuthorAccepted(ctx context.Context, image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { + switch pr.KeyType { + case SBKeyTypeGPGKeys: + case SBKeyTypeSignedByGPGKeys, SBKeyTypeX509Certificates, SBKeyTypeSignedByX509CAs: + // FIXME? Reject this at policy parsing time already? + return sarRejected, nil, errors.Errorf(`"Unimplemented "keyType" value "%s"`, string(pr.KeyType)) + default: + // This should never happen, newPRSignedBy ensures KeyType.IsValid() + return sarRejected, nil, errors.Errorf(`"Unknown "keyType" value "%s"`, string(pr.KeyType)) + } + + if pr.KeyPath != "" && pr.KeyData != nil { + return sarRejected, nil, errors.New(`Internal inconsistency: both "keyPath" and "keyData" specified`) + } + // FIXME: move this to per-context initialization + var data []byte + if pr.KeyData != nil { + data = pr.KeyData + } else { + d, err := ioutil.ReadFile(pr.KeyPath) + if err != nil { + return sarRejected, nil, err + } + data = d + } + + // FIXME: move this to per-context initialization + mech, trustedIdentities, err := NewEphemeralGPGSigningMechanism(data) + if err != nil { + return sarRejected, nil, err + } + defer mech.Close() + if len(trustedIdentities) == 0 { + return sarRejected, nil, PolicyRequirementError("No public keys imported") + } + + signature, err := verifyAndExtractSignature(mech, sig, signatureAcceptanceRules{ + validateKeyIdentity: func(keyIdentity string) error { + for _, trustedIdentity := range trustedIdentities { + if keyIdentity == trustedIdentity { + return nil + } + } + // Coverage: We use a private GPG home directory and only import trusted keys, so this should + // not be reachable. + return PolicyRequirementError(fmt.Sprintf("Signature by key %s is not accepted", keyIdentity)) + }, + validateSignedDockerReference: func(ref string) error { + if !pr.SignedIdentity.matchesDockerReference(image, ref) { + return PolicyRequirementError(fmt.Sprintf("Signature for identity %s is not accepted", ref)) + } + return nil + }, + validateSignedDockerManifestDigest: func(digest digest.Digest) error { + m, _, err := image.Manifest(ctx) + if err != nil { + return err + } + digestMatches, err := manifest.MatchesDigest(m, digest) + if err != nil { + return err + } + if !digestMatches { + return PolicyRequirementError(fmt.Sprintf("Signature for digest %s does not match", digest)) + } + return nil + }, + }) + if err != nil { + return sarRejected, nil, err + } + + return sarAccepted, signature, nil +} + +func (pr *prSignedBy) isRunningImageAllowed(ctx context.Context, image types.UnparsedImage) (bool, error) { + // FIXME: pass context.Context + sigs, err := image.Signatures(ctx) + if err != nil { + return false, err + } + var rejections []error + for _, s := range sigs { + var reason error + switch res, _, err := pr.isSignatureAuthorAccepted(ctx, image, s); res { + case sarAccepted: + // One accepted signature is enough. + return true, nil + case sarRejected: + reason = err + case sarUnknown: + // Huh?! This should not happen at all; treat it as any other invalid value. + fallthrough + default: + reason = errors.Errorf(`Internal error: Unexpected signature verification result "%s"`, string(res)) + } + rejections = append(rejections, reason) + } + var summary error + switch len(rejections) { + case 0: + summary = PolicyRequirementError("A signature was required, but no signature exists") + case 1: + summary = rejections[0] + default: + var msgs []string + for _, e := range rejections { + msgs = append(msgs, e.Error()) + } + summary = PolicyRequirementError(fmt.Sprintf("None of the signatures were accepted, reasons: %s", + strings.Join(msgs, "; "))) + } + return false, summary +} diff --git a/vendor/github.com/containers/image/v5/signature/policy_eval_simple.go b/vendor/github.com/containers/image/v5/signature/policy_eval_simple.go new file mode 100644 index 00000000000..f949088b5f6 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/policy_eval_simple.go @@ -0,0 +1,29 @@ +// Policy evaluation for the various simple PolicyRequirement types. + +package signature + +import ( + "context" + "fmt" + + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" +) + +func (pr *prInsecureAcceptAnything) isSignatureAuthorAccepted(ctx context.Context, image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { + // prInsecureAcceptAnything semantics: Every image is allowed to run, + // but this does not consider the signature as verified. + return sarUnknown, nil, nil +} + +func (pr *prInsecureAcceptAnything) isRunningImageAllowed(ctx context.Context, image types.UnparsedImage) (bool, error) { + return true, nil +} + +func (pr *prReject) isSignatureAuthorAccepted(ctx context.Context, image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { + return sarRejected, nil, PolicyRequirementError(fmt.Sprintf("Any signatures for image %s are rejected by policy.", transports.ImageName(image.Reference()))) +} + +func (pr *prReject) isRunningImageAllowed(ctx context.Context, image types.UnparsedImage) (bool, error) { + return false, PolicyRequirementError(fmt.Sprintf("Running image %s is rejected by policy.", transports.ImageName(image.Reference()))) +} diff --git a/vendor/github.com/containers/image/v5/signature/policy_reference_match.go b/vendor/github.com/containers/image/v5/signature/policy_reference_match.go new file mode 100644 index 00000000000..064866cf6c3 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/policy_reference_match.go @@ -0,0 +1,154 @@ +// PolicyReferenceMatch implementations. + +package signature + +import ( + "fmt" + "strings" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" +) + +// parseImageAndDockerReference converts an image and a reference string into two parsed entities, failing on any error and handling unidentified images. +func parseImageAndDockerReference(image types.UnparsedImage, s2 string) (reference.Named, reference.Named, error) { + r1 := image.Reference().DockerReference() + if r1 == nil { + return nil, nil, PolicyRequirementError(fmt.Sprintf("Docker reference match attempted on image %s with no known Docker reference identity", + transports.ImageName(image.Reference()))) + } + r2, err := reference.ParseNormalizedNamed(s2) + if err != nil { + return nil, nil, err + } + return r1, r2, nil +} + +func (prm *prmMatchExact) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool { + intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference) + if err != nil { + return false + } + // Do not add default tags: image.Reference().DockerReference() should contain it already, and signatureDockerReference should be exact; so, verify that now. + if reference.IsNameOnly(intended) || reference.IsNameOnly(signature) { + return false + } + return signature.String() == intended.String() +} + +// matchRepoDigestOrExactReferenceValues implements prmMatchRepoDigestOrExact.matchesDockerReference +// using reference.Named values. +func matchRepoDigestOrExactReferenceValues(intended, signature reference.Named) bool { + // Do not add default tags: image.Reference().DockerReference() should contain it already, and signatureDockerReference should be exact; so, verify that now. + if reference.IsNameOnly(signature) { + return false + } + switch intended.(type) { + case reference.NamedTagged: // Includes the case when intended has both a tag and a digest. + return signature.String() == intended.String() + case reference.Canonical: + // We don’t actually compare the manifest digest against the signature here; that happens prSignedBy.in UnparsedImage.Manifest. + // Because UnparsedImage.Manifest verifies the intended.Digest() against the manifest, and prSignedBy verifies the signature digest against the manifest, + // we know that signature digest matches intended.Digest() (but intended.Digest() and signature digest may use different algorithms) + return signature.Name() == intended.Name() + default: // !reference.IsNameOnly(intended) + return false + } +} +func (prm *prmMatchRepoDigestOrExact) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool { + intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference) + if err != nil { + return false + } + return matchRepoDigestOrExactReferenceValues(intended, signature) +} + +func (prm *prmMatchRepository) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool { + intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference) + if err != nil { + return false + } + return signature.Name() == intended.Name() +} + +// parseDockerReferences converts two reference strings into parsed entities, failing on any error +func parseDockerReferences(s1, s2 string) (reference.Named, reference.Named, error) { + r1, err := reference.ParseNormalizedNamed(s1) + if err != nil { + return nil, nil, err + } + r2, err := reference.ParseNormalizedNamed(s2) + if err != nil { + return nil, nil, err + } + return r1, r2, nil +} + +func (prm *prmExactReference) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool { + intended, signature, err := parseDockerReferences(prm.DockerReference, signatureDockerReference) + if err != nil { + return false + } + // prm.DockerReference and signatureDockerReference should be exact; so, verify that now. + if reference.IsNameOnly(intended) || reference.IsNameOnly(signature) { + return false + } + return signature.String() == intended.String() +} + +func (prm *prmExactRepository) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool { + intended, signature, err := parseDockerReferences(prm.DockerRepository, signatureDockerReference) + if err != nil { + return false + } + return signature.Name() == intended.Name() +} + +// refMatchesPrefix returns true if ref matches prm.Prefix. +func (prm *prmRemapIdentity) refMatchesPrefix(ref reference.Named) bool { + name := ref.Name() + switch { + case len(name) < len(prm.Prefix): + return false + case len(name) == len(prm.Prefix): + return name == prm.Prefix + case len(name) > len(prm.Prefix): + // We are matching only ref.Name(), not ref.String(), so the only separator we are + // expecting is '/': + // - '@' is only valid to separate a digest, i.e. not a part of ref.Name() + // - similarly ':' to mark a tag would not be a part of ref.Name(); it can be a part of a + // host:port domain syntax, but we don't treat that specially and require an exact match + // of the domain. + return strings.HasPrefix(name, prm.Prefix) && name[len(prm.Prefix)] == '/' + default: + panic("Internal error: impossible comparison outcome") + } +} + +// remapReferencePrefix returns the result of remapping ref, if it matches prm.Prefix +// or the original ref if it does not. +func (prm *prmRemapIdentity) remapReferencePrefix(ref reference.Named) (reference.Named, error) { + if !prm.refMatchesPrefix(ref) { + return ref, nil + } + refString := ref.String() + newNamedRef := strings.Replace(refString, prm.Prefix, prm.SignedPrefix, 1) + newParsedRef, err := reference.ParseNamed(newNamedRef) + if err != nil { + return nil, fmt.Errorf(`error rewriting reference from "%s" to "%s": %v`, refString, newNamedRef, err) + } + return newParsedRef, nil +} + +func (prm *prmRemapIdentity) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool { + intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference) + if err != nil { + return false + } + intended, err = prm.remapReferencePrefix(intended) + if err != nil { + return false + } + return matchRepoDigestOrExactReferenceValues(intended, signature) +} diff --git a/vendor/github.com/containers/image/v5/signature/policy_types.go b/vendor/github.com/containers/image/v5/signature/policy_types.go new file mode 100644 index 00000000000..c6819929bd2 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/policy_types.go @@ -0,0 +1,163 @@ +// Note: Consider the API unstable until the code supports at least three different image formats or transports. + +// This defines types used to represent a signature verification policy in memory. +// Do not use the private types directly; either parse a configuration file, or construct a Policy from PolicyRequirements +// built using the constructor functions provided in policy_config.go. + +package signature + +// NOTE: Keep this in sync with docs/containers-policy.json.5.md! + +// Policy defines requirements for considering a signature, or an image, valid. +type Policy struct { + // Default applies to any image which does not have a matching policy in Transports. + // Note that this can happen even if a matching PolicyTransportScopes exists in Transports + // if the image matches none of the scopes. + Default PolicyRequirements `json:"default"` + Transports map[string]PolicyTransportScopes `json:"transports"` +} + +// PolicyTransportScopes defines policies for images for a specific transport, +// for various scopes, the map keys. +// Scopes are defined by the transport (types.ImageReference.PolicyConfigurationIdentity etc.); +// there is one scope precisely matching to a single image, and namespace scopes as prefixes +// of the single-image scope. (e.g. hostname[/zero[/or[/more[/namespaces[/individualimage]]]]]) +// The empty scope, if exists, is considered a parent namespace of all other scopes. +// Most specific scope wins, duplication is prohibited (hard failure). +type PolicyTransportScopes map[string]PolicyRequirements + +// PolicyRequirements is a set of requirements applying to a set of images; each of them must be satisfied (though perhaps each by a different signature). +// Must not be empty, frequently will only contain a single element. +type PolicyRequirements []PolicyRequirement + +// PolicyRequirement is a rule which must be satisfied by at least one of the signatures of an image. +// The type is public, but its definition is private. + +// prCommon is the common type field in a JSON encoding of PolicyRequirement. +type prCommon struct { + Type prTypeIdentifier `json:"type"` +} + +// prTypeIdentifier is string designating a kind of a PolicyRequirement. +type prTypeIdentifier string + +const ( + prTypeInsecureAcceptAnything prTypeIdentifier = "insecureAcceptAnything" + prTypeReject prTypeIdentifier = "reject" + prTypeSignedBy prTypeIdentifier = "signedBy" + prTypeSignedBaseLayer prTypeIdentifier = "signedBaseLayer" +) + +// prInsecureAcceptAnything is a PolicyRequirement with type = prTypeInsecureAcceptAnything: +// every image is allowed to run. +// Note that because PolicyRequirements are implicitly ANDed, this is necessary only if it is the only rule (to make the list non-empty and the policy explicit). +// NOTE: This allows the image to run; it DOES NOT consider the signature verified (per IsSignatureAuthorAccepted). +// FIXME? Better name? +type prInsecureAcceptAnything struct { + prCommon +} + +// prReject is a PolicyRequirement with type = prTypeReject: every image is rejected. +type prReject struct { + prCommon +} + +// prSignedBy is a PolicyRequirement with type = prTypeSignedBy: the image is signed by trusted keys for a specified identity +type prSignedBy struct { + prCommon + + // KeyType specifies what kind of key reference KeyPath/KeyData is. + // Acceptable values are “GPGKeys” | “signedByGPGKeys” “X.509Certificates” | “signedByX.509CAs” + // FIXME: eventually also support GPGTOFU, X.509TOFU, with KeyPath only + KeyType sbKeyType `json:"keyType"` + + // KeyPath is a pathname to a local file containing the trusted key(s). Exactly one of KeyPath and KeyData must be specified. + KeyPath string `json:"keyPath,omitempty"` + // KeyData contains the trusted key(s), base64-encoded. Exactly one of KeyPath and KeyData must be specified. + KeyData []byte `json:"keyData,omitempty"` + + // SignedIdentity specifies what image identity the signature must be claiming about the image. + // Defaults to "match-exact" if not specified. + SignedIdentity PolicyReferenceMatch `json:"signedIdentity"` +} + +// sbKeyType are the allowed values for prSignedBy.KeyType +type sbKeyType string + +const ( + // SBKeyTypeGPGKeys refers to keys contained in a GPG keyring + SBKeyTypeGPGKeys sbKeyType = "GPGKeys" + // SBKeyTypeSignedByGPGKeys refers to keys signed by keys in a GPG keyring + SBKeyTypeSignedByGPGKeys sbKeyType = "signedByGPGKeys" + // SBKeyTypeX509Certificates refers to keys in a set of X.509 certificates + // FIXME: PEM, DER? + SBKeyTypeX509Certificates sbKeyType = "X509Certificates" + // SBKeyTypeSignedByX509CAs refers to keys signed by one of the X.509 CAs + // FIXME: PEM, DER? + SBKeyTypeSignedByX509CAs sbKeyType = "signedByX509CAs" +) + +// prSignedBaseLayer is a PolicyRequirement with type = prSignedBaseLayer: the image has a specified, correctly signed, base image. +type prSignedBaseLayer struct { + prCommon + // BaseLayerIdentity specifies the base image to look for. "match-exact" is rejected, "match-repository" is unlikely to be useful. + BaseLayerIdentity PolicyReferenceMatch `json:"baseLayerIdentity"` +} + +// PolicyReferenceMatch specifies a set of image identities accepted in PolicyRequirement. +// The type is public, but its implementation is private. + +// prmCommon is the common type field in a JSON encoding of PolicyReferenceMatch. +type prmCommon struct { + Type prmTypeIdentifier `json:"type"` +} + +// prmTypeIdentifier is string designating a kind of a PolicyReferenceMatch. +type prmTypeIdentifier string + +const ( + prmTypeMatchExact prmTypeIdentifier = "matchExact" + prmTypeMatchRepoDigestOrExact prmTypeIdentifier = "matchRepoDigestOrExact" + prmTypeMatchRepository prmTypeIdentifier = "matchRepository" + prmTypeExactReference prmTypeIdentifier = "exactReference" + prmTypeExactRepository prmTypeIdentifier = "exactRepository" + prmTypeRemapIdentity prmTypeIdentifier = "remapIdentity" +) + +// prmMatchExact is a PolicyReferenceMatch with type = prmMatchExact: the two references must match exactly. +type prmMatchExact struct { + prmCommon +} + +// prmMatchRepoDigestOrExact is a PolicyReferenceMatch with type = prmMatchExactOrDigest: the two references must match exactly, +// except that digest references are also accepted if the repository name matches (regardless of tag/digest) and the signature applies to the referenced digest +type prmMatchRepoDigestOrExact struct { + prmCommon +} + +// prmMatchRepository is a PolicyReferenceMatch with type = prmMatchRepository: the two references must use the same repository, may differ in the tag. +type prmMatchRepository struct { + prmCommon +} + +// prmExactReference is a PolicyReferenceMatch with type = prmExactReference: matches a specified reference exactly. +type prmExactReference struct { + prmCommon + DockerReference string `json:"dockerReference"` +} + +// prmExactRepository is a PolicyReferenceMatch with type = prmExactRepository: matches a specified repository, with any tag. +type prmExactRepository struct { + prmCommon + DockerRepository string `json:"dockerRepository"` +} + +// prmRemapIdentity is a PolicyReferenceMatch with type = prmRemapIdentity: like prmMatchRepoDigestOrExact, +// except that a namespace (at least a host:port, at most a single repository) is substituted before matching the two references. +type prmRemapIdentity struct { + prmCommon + Prefix string `json:"prefix"` + SignedPrefix string `json:"signedPrefix"` + // Possibly let the users make a choice for tag/digest matching behavior + // similar to prmMatchExact/prmMatchRepository? +} diff --git a/vendor/github.com/containers/image/v5/signature/signature.go b/vendor/github.com/containers/image/v5/signature/signature.go new file mode 100644 index 00000000000..05bf8229e7d --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/signature.go @@ -0,0 +1,287 @@ +// Note: Consider the API unstable until the code supports at least three different image formats or transports. + +// NOTE: Keep this in sync with docs/atomic-signature.md and docs/atomic-signature-embedded.json! + +package signature + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/containers/image/v5/version" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +const ( + signatureType = "atomic container signature" +) + +// InvalidSignatureError is returned when parsing an invalid signature. +type InvalidSignatureError struct { + msg string +} + +func (err InvalidSignatureError) Error() string { + return err.msg +} + +// Signature is a parsed content of a signature. +// The only way to get this structure from a blob should be as a return value from a successful call to verifyAndExtractSignature below. +type Signature struct { + DockerManifestDigest digest.Digest + DockerReference string // FIXME: more precise type? +} + +// untrustedSignature is a parsed content of a signature. +type untrustedSignature struct { + UntrustedDockerManifestDigest digest.Digest + UntrustedDockerReference string // FIXME: more precise type? + UntrustedCreatorID *string + // This is intentionally an int64; the native JSON float64 type would allow to represent _some_ sub-second precision, + // but not nearly enough (with current timestamp values, a single unit in the last place is on the order of hundreds of nanoseconds). + // So, this is explicitly an int64, and we reject fractional values. If we did need more precise timestamps eventually, + // we would add another field, UntrustedTimestampNS int64. + UntrustedTimestamp *int64 +} + +// UntrustedSignatureInformation is information available in an untrusted signature. +// This may be useful when debugging signature verification failures, +// or when managing a set of signatures on a single image. +// +// WARNING: Do not use the contents of this for ANY security decisions, +// and be VERY CAREFUL about showing this information to humans in any way which suggest that these values “are probably” reliable. +// There is NO REASON to expect the values to be correct, or not intentionally misleading +// (including things like “✅ Verified by $authority”) +type UntrustedSignatureInformation struct { + UntrustedDockerManifestDigest digest.Digest + UntrustedDockerReference string // FIXME: more precise type? + UntrustedCreatorID *string + UntrustedTimestamp *time.Time + UntrustedShortKeyIdentifier string +} + +// newUntrustedSignature returns an untrustedSignature object with +// the specified primary contents and appropriate metadata. +func newUntrustedSignature(dockerManifestDigest digest.Digest, dockerReference string) untrustedSignature { + // Use intermediate variables for these values so that we can take their addresses. + // Golang guarantees that they will have a new address on every execution. + creatorID := "atomic " + version.Version + timestamp := time.Now().Unix() + return untrustedSignature{ + UntrustedDockerManifestDigest: dockerManifestDigest, + UntrustedDockerReference: dockerReference, + UntrustedCreatorID: &creatorID, + UntrustedTimestamp: ×tamp, + } +} + +// Compile-time check that untrustedSignature implements json.Marshaler +var _ json.Marshaler = (*untrustedSignature)(nil) + +// MarshalJSON implements the json.Marshaler interface. +func (s untrustedSignature) MarshalJSON() ([]byte, error) { + if s.UntrustedDockerManifestDigest == "" || s.UntrustedDockerReference == "" { + return nil, errors.New("Unexpected empty signature content") + } + critical := map[string]interface{}{ + "type": signatureType, + "image": map[string]string{"docker-manifest-digest": s.UntrustedDockerManifestDigest.String()}, + "identity": map[string]string{"docker-reference": s.UntrustedDockerReference}, + } + optional := map[string]interface{}{} + if s.UntrustedCreatorID != nil { + optional["creator"] = *s.UntrustedCreatorID + } + if s.UntrustedTimestamp != nil { + optional["timestamp"] = *s.UntrustedTimestamp + } + signature := map[string]interface{}{ + "critical": critical, + "optional": optional, + } + return json.Marshal(signature) +} + +// Compile-time check that untrustedSignature implements json.Unmarshaler +var _ json.Unmarshaler = (*untrustedSignature)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface +func (s *untrustedSignature) UnmarshalJSON(data []byte) error { + err := s.strictUnmarshalJSON(data) + if err != nil { + if formatErr, ok := err.(jsonFormatError); ok { + err = InvalidSignatureError{msg: formatErr.Error()} + } + } + return err +} + +// strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal jsonFormatError error type. +// Splitting it into a separate function allows us to do the jsonFormatError → InvalidSignatureError in a single place, the caller. +func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error { + var critical, optional json.RawMessage + if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + "critical": &critical, + "optional": &optional, + }); err != nil { + return err + } + + var creatorID string + var timestamp float64 + var gotCreatorID, gotTimestamp = false, false + if err := paranoidUnmarshalJSONObject(optional, func(key string) interface{} { + switch key { + case "creator": + gotCreatorID = true + return &creatorID + case "timestamp": + gotTimestamp = true + return ×tamp + default: + var ignore interface{} + return &ignore + } + }); err != nil { + return err + } + if gotCreatorID { + s.UntrustedCreatorID = &creatorID + } + if gotTimestamp { + intTimestamp := int64(timestamp) + if float64(intTimestamp) != timestamp { + return InvalidSignatureError{msg: "Field optional.timestamp is not is not an integer"} + } + s.UntrustedTimestamp = &intTimestamp + } + + var t string + var image, identity json.RawMessage + if err := paranoidUnmarshalJSONObjectExactFields(critical, map[string]interface{}{ + "type": &t, + "image": &image, + "identity": &identity, + }); err != nil { + return err + } + if t != signatureType { + return InvalidSignatureError{msg: fmt.Sprintf("Unrecognized signature type %s", t)} + } + + var digestString string + if err := paranoidUnmarshalJSONObjectExactFields(image, map[string]interface{}{ + "docker-manifest-digest": &digestString, + }); err != nil { + return err + } + s.UntrustedDockerManifestDigest = digest.Digest(digestString) + + return paranoidUnmarshalJSONObjectExactFields(identity, map[string]interface{}{ + "docker-reference": &s.UntrustedDockerReference, + }) +} + +// Sign formats the signature and returns a blob signed using mech and keyIdentity +// (If it seems surprising that this is a method on untrustedSignature, note that there +// isn’t a good reason to think that a key used by the user is trusted by any component +// of the system just because it is a private key — actually the presence of a private key +// on the system increases the likelihood of an a successful attack on that private key +// on that particular system.) +func (s untrustedSignature) sign(mech SigningMechanism, keyIdentity string, passphrase string) ([]byte, error) { + json, err := json.Marshal(s) + if err != nil { + return nil, err + } + + if newMech, ok := mech.(signingMechanismWithPassphrase); ok { + return newMech.SignWithPassphrase(json, keyIdentity, passphrase) + } + + if passphrase != "" { + return nil, errors.New("signing mechanism does not support passphrases") + } + + return mech.Sign(json, keyIdentity) +} + +// signatureAcceptanceRules specifies how to decide whether an untrusted signature is acceptable. +// We centralize the actual parsing and data extraction in verifyAndExtractSignature; this supplies +// the policy. We use an object instead of supplying func parameters to verifyAndExtractSignature +// because the functions have the same or similar types, so there is a risk of exchanging the functions; +// named members of this struct are more explicit. +type signatureAcceptanceRules struct { + validateKeyIdentity func(string) error + validateSignedDockerReference func(string) error + validateSignedDockerManifestDigest func(digest.Digest) error +} + +// verifyAndExtractSignature verifies that unverifiedSignature has been signed, and that its principal components +// match expected values, both as specified by rules, and returns it +func verifyAndExtractSignature(mech SigningMechanism, unverifiedSignature []byte, rules signatureAcceptanceRules) (*Signature, error) { + signed, keyIdentity, err := mech.Verify(unverifiedSignature) + if err != nil { + return nil, err + } + if err := rules.validateKeyIdentity(keyIdentity); err != nil { + return nil, err + } + + var unmatchedSignature untrustedSignature + if err := json.Unmarshal(signed, &unmatchedSignature); err != nil { + return nil, InvalidSignatureError{msg: err.Error()} + } + if err := rules.validateSignedDockerManifestDigest(unmatchedSignature.UntrustedDockerManifestDigest); err != nil { + return nil, err + } + if err := rules.validateSignedDockerReference(unmatchedSignature.UntrustedDockerReference); err != nil { + return nil, err + } + // signatureAcceptanceRules have accepted this value. + return &Signature{ + DockerManifestDigest: unmatchedSignature.UntrustedDockerManifestDigest, + DockerReference: unmatchedSignature.UntrustedDockerReference, + }, nil +} + +// GetUntrustedSignatureInformationWithoutVerifying extracts information available in an untrusted signature, +// WITHOUT doing any cryptographic verification. +// This may be useful when debugging signature verification failures, +// or when managing a set of signatures on a single image. +// +// WARNING: Do not use the contents of this for ANY security decisions, +// and be VERY CAREFUL about showing this information to humans in any way which suggest that these values “are probably” reliable. +// There is NO REASON to expect the values to be correct, or not intentionally misleading +// (including things like “✅ Verified by $authority”) +func GetUntrustedSignatureInformationWithoutVerifying(untrustedSignatureBytes []byte) (*UntrustedSignatureInformation, error) { + // NOTE: This should eventually do format autodetection. + mech, _, err := NewEphemeralGPGSigningMechanism([]byte{}) + if err != nil { + return nil, err + } + defer mech.Close() + + untrustedContents, shortKeyIdentifier, err := mech.UntrustedSignatureContents(untrustedSignatureBytes) + if err != nil { + return nil, err + } + var untrustedDecodedContents untrustedSignature + if err := json.Unmarshal(untrustedContents, &untrustedDecodedContents); err != nil { + return nil, InvalidSignatureError{msg: err.Error()} + } + + var timestamp *time.Time // = nil + if untrustedDecodedContents.UntrustedTimestamp != nil { + ts := time.Unix(*untrustedDecodedContents.UntrustedTimestamp, 0) + timestamp = &ts + } + return &UntrustedSignatureInformation{ + UntrustedDockerManifestDigest: untrustedDecodedContents.UntrustedDockerManifestDigest, + UntrustedDockerReference: untrustedDecodedContents.UntrustedDockerReference, + UntrustedCreatorID: untrustedDecodedContents.UntrustedCreatorID, + UntrustedTimestamp: timestamp, + UntrustedShortKeyIdentifier: shortKeyIdentifier, + }, nil +} diff --git a/vendor/github.com/containers/image/v5/storage/storage_image.go b/vendor/github.com/containers/image/v5/storage/storage_image.go new file mode 100644 index 00000000000..c44c9528036 --- /dev/null +++ b/vendor/github.com/containers/image/v5/storage/storage_image.go @@ -0,0 +1,1347 @@ +//go:build !containers_image_storage_stub +// +build !containers_image_storage_stub + +package storage + +import ( + "bytes" + "context" + "encoding/json" + stderrors "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "sync" + "sync/atomic" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/image" + "github.com/containers/image/v5/internal/putblobdigest" + "github.com/containers/image/v5/internal/tmpdir" + internalTypes "github.com/containers/image/v5/internal/types" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/blobinfocache/none" + "github.com/containers/image/v5/types" + "github.com/containers/storage" + graphdriver "github.com/containers/storage/drivers" + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/chunked" + "github.com/containers/storage/pkg/ioutils" + digest "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +var ( + // ErrBlobDigestMismatch could potentially be returned when PutBlob() is given a blob + // with a digest-based name that doesn't match its contents. + // Deprecated: PutBlob() doesn't do this any more (it just accepts the caller’s value), + // and there is no known user of this error. + ErrBlobDigestMismatch = stderrors.New("blob digest mismatch") + // ErrBlobSizeMismatch is returned when PutBlob() is given a blob + // with an expected size that doesn't match the reader. + ErrBlobSizeMismatch = stderrors.New("blob size mismatch") + // ErrNoSuchImage is returned when we attempt to access an image which + // doesn't exist in the storage area. + ErrNoSuchImage = storage.ErrNotAnImage +) + +type storageImageSource struct { + imageRef storageReference + image *storage.Image + systemContext *types.SystemContext // SystemContext used in GetBlob() to create temporary files + layerPosition map[digest.Digest]int // Where we are in reading a blob's layers + cachedManifest []byte // A cached copy of the manifest, if already known, or nil + getBlobMutex sync.Mutex // Mutex to sync state for parallel GetBlob executions + SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice + SignaturesSizes map[digest.Digest][]int `json:"signatures-sizes,omitempty"` // List of sizes of each signature slice +} + +type storageImageDestination struct { + imageRef storageReference + directory string // Temporary directory where we store blobs until Commit() time + nextTempFileID int32 // A counter that we use for computing filenames to assign to blobs + manifest []byte // Manifest contents, temporary + manifestDigest digest.Digest // Valid if len(manifest) != 0 + signatures []byte // Signature contents, temporary + signatureses map[digest.Digest][]byte // Instance signature contents, temporary + SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice + SignaturesSizes map[digest.Digest][]int `json:"signatures-sizes,omitempty"` // Sizes of each manifest's signature slice + + // A storage destination may be used concurrently. Accesses are + // serialized via a mutex. Please refer to the individual comments + // below for details. + lock sync.Mutex + // Mapping from layer (by index) to the associated ID in the storage. + // It's protected *implicitly* since `commitLayer()`, at any given + // time, can only be executed by *one* goroutine. Please refer to + // `queueOrCommit()` for further details on how the single-caller + // guarantee is implemented. + indexToStorageID map[int]*string + // All accesses to below data are protected by `lock` which is made + // *explicit* in the code. + blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs + fileSizes map[digest.Digest]int64 // Mapping from layer blobsums to their sizes + filenames map[digest.Digest]string // Mapping from layer blobsums to names of files we used to hold them + currentIndex int // The index of the layer to be committed (i.e., lower indices have already been committed) + indexToPulledLayerInfo map[int]*manifest.LayerInfo // Mapping from layer (by index) to pulled down blob + blobAdditionalLayer map[digest.Digest]storage.AdditionalLayer // Mapping from layer blobsums to their corresponding additional layer + diffOutputs map[digest.Digest]*graphdriver.DriverWithDifferOutput // Mapping from digest to differ output +} + +type storageImageCloser struct { + types.ImageCloser + size int64 +} + +// manifestBigDataKey returns a key suitable for recording a manifest with the specified digest using storage.Store.ImageBigData and related functions. +// If a specific manifest digest is explicitly requested by the user, the key returned by this function should be used preferably; +// for compatibility, if a manifest is not available under this key, check also storage.ImageDigestBigDataKey +func manifestBigDataKey(digest digest.Digest) string { + return storage.ImageDigestManifestBigDataNamePrefix + "-" + digest.String() +} + +// signatureBigDataKey returns a key suitable for recording the signatures associated with the manifest with the specified digest using storage.Store.ImageBigData and related functions. +// If a specific manifest digest is explicitly requested by the user, the key returned by this function should be used preferably; +func signatureBigDataKey(digest digest.Digest) string { + return "signature-" + digest.Encoded() +} + +// newImageSource sets up an image for reading. +func newImageSource(ctx context.Context, sys *types.SystemContext, imageRef storageReference) (*storageImageSource, error) { + // First, locate the image. + img, err := imageRef.resolveImage(sys) + if err != nil { + return nil, err + } + + // Build the reader object. + image := &storageImageSource{ + imageRef: imageRef, + systemContext: sys, + image: img, + layerPosition: make(map[digest.Digest]int), + SignatureSizes: []int{}, + SignaturesSizes: make(map[digest.Digest][]int), + } + if img.Metadata != "" { + if err := json.Unmarshal([]byte(img.Metadata), image); err != nil { + return nil, errors.Wrap(err, "decoding metadata for source image") + } + } + return image, nil +} + +// Reference returns the image reference that we used to find this image. +func (s *storageImageSource) Reference() types.ImageReference { + return s.imageRef +} + +// Close cleans up any resources we tied up while reading the image. +func (s *storageImageSource) Close() error { + return nil +} + +// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. +func (s *storageImageSource) HasThreadSafeGetBlob() bool { + return true +} + +// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). +// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. +// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. +func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (rc io.ReadCloser, n int64, err error) { + if info.Digest == image.GzippedEmptyLayerDigest { + return ioutil.NopCloser(bytes.NewReader(image.GzippedEmptyLayer)), int64(len(image.GzippedEmptyLayer)), nil + } + + // NOTE: the blob is first written to a temporary file and subsequently + // closed. The intention is to keep the time we own the storage lock + // as short as possible to allow other processes to access the storage. + rc, n, _, err = s.getBlobAndLayerID(info) + if err != nil { + return nil, 0, err + } + defer rc.Close() + + tmpFile, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(s.systemContext), "") + if err != nil { + return nil, 0, err + } + + if _, err := io.Copy(tmpFile, rc); err != nil { + return nil, 0, err + } + + if _, err := tmpFile.Seek(0, 0); err != nil { + return nil, 0, err + } + + wrapper := ioutils.NewReadCloserWrapper(tmpFile, func() error { + defer os.Remove(tmpFile.Name()) + return tmpFile.Close() + }) + + return wrapper, n, err +} + +// getBlobAndLayer reads the data blob or filesystem layer which matches the digest and size, if given. +func (s *storageImageSource) getBlobAndLayerID(info types.BlobInfo) (rc io.ReadCloser, n int64, layerID string, err error) { + var layer storage.Layer + var diffOptions *storage.DiffOptions + // We need a valid digest value. + err = info.Digest.Validate() + if err != nil { + return nil, -1, "", err + } + // Check if the blob corresponds to a diff that was used to initialize any layers. Our + // callers should try to retrieve layers using their uncompressed digests, so no need to + // check if they're using one of the compressed digests, which we can't reproduce anyway. + layers, _ := s.imageRef.transport.store.LayersByUncompressedDigest(info.Digest) + + // If it's not a layer, then it must be a data item. + if len(layers) == 0 { + b, err := s.imageRef.transport.store.ImageBigData(s.image.ID, info.Digest.String()) + if err != nil { + return nil, -1, "", err + } + r := bytes.NewReader(b) + logrus.Debugf("exporting opaque data as blob %q", info.Digest.String()) + return ioutil.NopCloser(r), int64(r.Len()), "", nil + } + // Step through the list of matching layers. Tests may want to verify that if we have multiple layers + // which claim to have the same contents, that we actually do have multiple layers, otherwise we could + // just go ahead and use the first one every time. + s.getBlobMutex.Lock() + i := s.layerPosition[info.Digest] + s.layerPosition[info.Digest] = i + 1 + s.getBlobMutex.Unlock() + if len(layers) > 0 { + layer = layers[i%len(layers)] + } + // Force the storage layer to not try to match any compression that was used when the layer was first + // handed to it. + noCompression := archive.Uncompressed + diffOptions = &storage.DiffOptions{ + Compression: &noCompression, + } + if layer.UncompressedSize < 0 { + n = -1 + } else { + n = layer.UncompressedSize + } + logrus.Debugf("exporting filesystem layer %q without compression for blob %q", layer.ID, info.Digest) + rc, err = s.imageRef.transport.store.Diff("", layer.ID, diffOptions) + if err != nil { + return nil, -1, "", err + } + return rc, n, layer.ID, err +} + +// GetManifest() reads the image's manifest. +func (s *storageImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) (manifestBlob []byte, MIMEType string, err error) { + if instanceDigest != nil { + key := manifestBigDataKey(*instanceDigest) + blob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, key) + if err != nil { + return nil, "", errors.Wrapf(err, "reading manifest for image instance %q", *instanceDigest) + } + return blob, manifest.GuessMIMEType(blob), err + } + if len(s.cachedManifest) == 0 { + // The manifest is stored as a big data item. + // Prefer the manifest corresponding to the user-specified digest, if available. + if s.imageRef.named != nil { + if digested, ok := s.imageRef.named.(reference.Digested); ok { + key := manifestBigDataKey(digested.Digest()) + blob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, key) + if err != nil && !os.IsNotExist(err) { // os.IsNotExist is true if the image exists but there is no data corresponding to key + return nil, "", err + } + if err == nil { + s.cachedManifest = blob + } + } + } + // If the user did not specify a digest, or this is an old image stored before manifestBigDataKey was introduced, use the default manifest. + // Note that the manifest may not match the expected digest, and that is likely to fail eventually, e.g. in c/image/image/UnparsedImage.Manifest(). + if len(s.cachedManifest) == 0 { + cachedBlob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, storage.ImageDigestBigDataKey) + if err != nil { + return nil, "", err + } + s.cachedManifest = cachedBlob + } + } + return s.cachedManifest, manifest.GuessMIMEType(s.cachedManifest), err +} + +// LayerInfosForCopy() returns the list of layer blobs that make up the root filesystem of +// the image, after they've been decompressed. +func (s *storageImageSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) { + manifestBlob, manifestType, err := s.GetManifest(ctx, instanceDigest) + if err != nil { + return nil, errors.Wrapf(err, "reading image manifest for %q", s.image.ID) + } + if manifest.MIMETypeIsMultiImage(manifestType) { + return nil, errors.Errorf("can't copy layers for a manifest list (shouldn't be attempted)") + } + man, err := manifest.FromBlob(manifestBlob, manifestType) + if err != nil { + return nil, errors.Wrapf(err, "parsing image manifest for %q", s.image.ID) + } + + uncompressedLayerType := "" + switch manifestType { + case imgspecv1.MediaTypeImageManifest: + uncompressedLayerType = imgspecv1.MediaTypeImageLayer + case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema2MediaType: + uncompressedLayerType = manifest.DockerV2SchemaLayerMediaTypeUncompressed + } + + physicalBlobInfos := []types.BlobInfo{} + layerID := s.image.TopLayer + for layerID != "" { + layer, err := s.imageRef.transport.store.Layer(layerID) + if err != nil { + return nil, errors.Wrapf(err, "reading layer %q in image %q", layerID, s.image.ID) + } + if layer.UncompressedDigest == "" { + return nil, errors.Errorf("uncompressed digest for layer %q is unknown", layerID) + } + if layer.UncompressedSize < 0 { + return nil, errors.Errorf("uncompressed size for layer %q is unknown", layerID) + } + blobInfo := types.BlobInfo{ + Digest: layer.UncompressedDigest, + Size: layer.UncompressedSize, + MediaType: uncompressedLayerType, + } + physicalBlobInfos = append([]types.BlobInfo{blobInfo}, physicalBlobInfos...) + layerID = layer.Parent + } + + res, err := buildLayerInfosForCopy(man.LayerInfos(), physicalBlobInfos) + if err != nil { + return nil, errors.Wrapf(err, "creating LayerInfosForCopy of image %q", s.image.ID) + } + return res, nil +} + +// buildLayerInfosForCopy builds a LayerInfosForCopy return value based on manifestInfos from the original manifest, +// but using layer data which we can actually produce — physicalInfos for non-empty layers, +// and image.GzippedEmptyLayer for empty ones. +// (This is split basically only to allow easily unit-testing the part that has no dependencies on the external environment.) +func buildLayerInfosForCopy(manifestInfos []manifest.LayerInfo, physicalInfos []types.BlobInfo) ([]types.BlobInfo, error) { + nextPhysical := 0 + res := make([]types.BlobInfo, len(manifestInfos)) + for i, mi := range manifestInfos { + if mi.EmptyLayer { + res[i] = types.BlobInfo{ + Digest: image.GzippedEmptyLayerDigest, + Size: int64(len(image.GzippedEmptyLayer)), + MediaType: mi.MediaType, + } + } else { + if nextPhysical >= len(physicalInfos) { + return nil, fmt.Errorf("expected more than %d physical layers to exist", len(physicalInfos)) + } + res[i] = physicalInfos[nextPhysical] + nextPhysical++ + } + } + if nextPhysical != len(physicalInfos) { + return nil, fmt.Errorf("used only %d out of %d physical layers", nextPhysical, len(physicalInfos)) + } + return res, nil +} + +// GetSignatures() parses the image's signatures blob into a slice of byte slices. +func (s *storageImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) (signatures [][]byte, err error) { + var offset int + sigslice := [][]byte{} + signature := []byte{} + signatureSizes := s.SignatureSizes + key := "signatures" + instance := "default instance" + if instanceDigest != nil { + signatureSizes = s.SignaturesSizes[*instanceDigest] + key = signatureBigDataKey(*instanceDigest) + instance = instanceDigest.Encoded() + } + if len(signatureSizes) > 0 { + signatureBlob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, key) + if err != nil { + return nil, errors.Wrapf(err, "looking up signatures data for image %q (%s)", s.image.ID, instance) + } + signature = signatureBlob + } + for _, length := range signatureSizes { + if offset+length > len(signature) { + return nil, errors.Wrapf(err, "looking up signatures data for image %q (%s): expected at least %d bytes, only found %d", s.image.ID, instance, len(signature), offset+length) + } + sigslice = append(sigslice, signature[offset:offset+length]) + offset += length + } + if offset != len(signature) { + return nil, errors.Errorf("signatures data (%s) contained %d extra bytes", instance, len(signatures)-offset) + } + return sigslice, nil +} + +// newImageDestination sets us up to write a new image, caching blobs in a temporary directory until +// it's time to Commit() the image +func newImageDestination(sys *types.SystemContext, imageRef storageReference) (*storageImageDestination, error) { + directory, err := ioutil.TempDir(tmpdir.TemporaryDirectoryForBigFiles(sys), "storage") + if err != nil { + return nil, errors.Wrapf(err, "creating a temporary directory") + } + image := &storageImageDestination{ + imageRef: imageRef, + directory: directory, + signatureses: make(map[digest.Digest][]byte), + blobDiffIDs: make(map[digest.Digest]digest.Digest), + blobAdditionalLayer: make(map[digest.Digest]storage.AdditionalLayer), + fileSizes: make(map[digest.Digest]int64), + filenames: make(map[digest.Digest]string), + SignatureSizes: []int{}, + SignaturesSizes: make(map[digest.Digest][]int), + indexToStorageID: make(map[int]*string), + indexToPulledLayerInfo: make(map[int]*manifest.LayerInfo), + diffOutputs: make(map[digest.Digest]*graphdriver.DriverWithDifferOutput), + } + return image, nil +} + +// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, +// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. +func (s *storageImageDestination) Reference() types.ImageReference { + return s.imageRef +} + +// Close cleans up the temporary directory and additional layer store handlers. +func (s *storageImageDestination) Close() error { + for _, al := range s.blobAdditionalLayer { + al.Release() + } + for _, v := range s.diffOutputs { + if v.Target != "" { + _ = s.imageRef.transport.store.CleanupStagingDirectory(v.Target) + } + } + return os.RemoveAll(s.directory) +} + +func (s *storageImageDestination) DesiredLayerCompression() types.LayerCompression { + // We ultimately have to decompress layers to populate trees on disk + // and need to explicitly ask for it here, so that the layers' MIME + // types can be set accordingly. + return types.PreserveOriginal +} + +func (s *storageImageDestination) computeNextBlobCacheFile() string { + return filepath.Join(s.directory, fmt.Sprintf("%d", atomic.AddInt32(&s.nextTempFileID, 1))) +} + +// PutBlobWithOptions is a wrapper around PutBlob. If options.LayerIndex is +// set, the blob will be committed directly. Either by the calling goroutine +// or by another goroutine already committing layers. +// +// Please not that TryReusingBlobWithOptions and PutBlobWithOptions *must* be +// used the together. Mixing the two with non "WithOptions" functions is not +// supported. +func (s *storageImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, options internalTypes.PutBlobOptions) (types.BlobInfo, error) { + info, err := s.PutBlob(ctx, stream, blobinfo, options.Cache, options.IsConfig) + if err != nil { + return info, err + } + + if options.IsConfig || options.LayerIndex == nil { + return info, nil + } + + return info, s.queueOrCommit(ctx, info, *options.LayerIndex, options.EmptyLayer) +} + +// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. +func (s *storageImageDestination) HasThreadSafePutBlob() bool { + return true +} + +// PutBlob writes contents of stream and returns data representing the result. +// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. +// inputInfo.Size is the expected length of stream, if known. +// inputInfo.MediaType describes the blob format, if known. +// May update cache. +// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available +// to any other readers for download using the supplied digest. +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. +func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { + // Stores a layer or data blob in our temporary directory, checking that any information + // in the blobinfo matches the incoming data. + errorBlobInfo := types.BlobInfo{ + Digest: "", + Size: -1, + } + if blobinfo.Digest != "" { + if err := blobinfo.Digest.Validate(); err != nil { + return errorBlobInfo, fmt.Errorf("invalid digest %#v: %w", blobinfo.Digest.String(), err) + } + } + + // Set up to digest the blob if necessary, and count its size while saving it to a file. + filename := s.computeNextBlobCacheFile() + file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0600) + if err != nil { + return errorBlobInfo, errors.Wrapf(err, "creating temporary file %q", filename) + } + defer file.Close() + counter := ioutils.NewWriteCounter(file) + stream = io.TeeReader(stream, counter) + digester, stream := putblobdigest.DigestIfUnknown(stream, blobinfo) + decompressed, err := archive.DecompressStream(stream) + if err != nil { + return errorBlobInfo, errors.Wrap(err, "setting up to decompress blob") + } + + diffID := digest.Canonical.Digester() + // Copy the data to the file. + // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). + _, err = io.Copy(diffID.Hash(), decompressed) + decompressed.Close() + if err != nil { + return errorBlobInfo, errors.Wrapf(err, "storing blob to file %q", filename) + } + + // Determine blob properties, and fail if information that we were given about the blob + // is known to be incorrect. + blobDigest := digester.Digest() + blobSize := blobinfo.Size + if blobSize < 0 { + blobSize = counter.Count + } else if blobinfo.Size != counter.Count { + return errorBlobInfo, errors.WithStack(ErrBlobSizeMismatch) + } + + // Record information about the blob. + s.lock.Lock() + s.blobDiffIDs[blobDigest] = diffID.Digest() + s.fileSizes[blobDigest] = counter.Count + s.filenames[blobDigest] = filename + s.lock.Unlock() + // This is safe because we have just computed diffID, and blobDigest was either computed + // by us, or validated by the caller (usually copy.digestingReader). + cache.RecordDigestUncompressedPair(blobDigest, diffID.Digest()) + return types.BlobInfo{ + Digest: blobDigest, + Size: blobSize, + MediaType: blobinfo.MediaType, + }, nil +} + +// TryReusingBlobWithOptions is a wrapper around TryReusingBlob. If +// options.LayerIndex is set, the reused blob will be recoreded as already +// pulled. +// +// Please not that TryReusingBlobWithOptions and PutBlobWithOptions *must* be +// used the together. Mixing the two with the non "WithOptions" functions +// is not supported. +func (s *storageImageDestination) TryReusingBlobWithOptions(ctx context.Context, blobinfo types.BlobInfo, options internalTypes.TryReusingBlobOptions) (bool, types.BlobInfo, error) { + reused, info, err := s.tryReusingBlobWithSrcRef(ctx, blobinfo, options.Cache, options.CanSubstitute, options.SrcRef) + if err != nil || !reused || options.LayerIndex == nil { + return reused, info, err + } + + return reused, info, s.queueOrCommit(ctx, info, *options.LayerIndex, options.EmptyLayer) +} + +// tryReusingBlobWithSrcRef is a wrapper around TryReusingBlob. +// If ref is provided, this function first tries to get layer from Additional Layer Store. +func (s *storageImageDestination) tryReusingBlobWithSrcRef(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool, ref reference.Named) (bool, types.BlobInfo, error) { + // lock the entire method as it executes fairly quickly + s.lock.Lock() + defer s.lock.Unlock() + + if ref != nil { + // Check if we have the layer in the underlying additional layer store. + aLayer, err := s.imageRef.transport.store.LookupAdditionalLayer(blobinfo.Digest, ref.String()) + if err != nil && errors.Cause(err) != storage.ErrLayerUnknown { + return false, types.BlobInfo{}, errors.Wrapf(err, `looking for compressed layers with digest %q and labels`, blobinfo.Digest) + } else if err == nil { + // Record the uncompressed value so that we can use it to calculate layer IDs. + s.blobDiffIDs[blobinfo.Digest] = aLayer.UncompressedDigest() + s.blobAdditionalLayer[blobinfo.Digest] = aLayer + return true, types.BlobInfo{ + Digest: blobinfo.Digest, + Size: aLayer.CompressedSize(), + MediaType: blobinfo.MediaType, + }, nil + } + } + + return s.tryReusingBlobLocked(ctx, blobinfo, cache, canSubstitute) +} + +type zstdFetcher struct { + stream internalTypes.ImageSourceSeekable + ctx context.Context + blobInfo types.BlobInfo +} + +// GetBlobAt converts from chunked.GetBlobAt to ImageSourceSeekable.GetBlobAt. +func (f *zstdFetcher) GetBlobAt(chunks []chunked.ImageSourceChunk) (chan io.ReadCloser, chan error, error) { + var newChunks []internalTypes.ImageSourceChunk + for _, v := range chunks { + i := internalTypes.ImageSourceChunk{ + Offset: v.Offset, + Length: v.Length, + } + newChunks = append(newChunks, i) + } + rc, errs, err := f.stream.GetBlobAt(f.ctx, f.blobInfo, newChunks) + if _, ok := err.(internalTypes.BadPartialRequestError); ok { + err = chunked.ErrBadRequest{} + } + return rc, errs, err + +} + +// PutBlobPartial attempts to create a blob using the data that is already present at the destination storage. stream is accessed +// in a non-sequential way to retrieve the missing chunks. +func (s *storageImageDestination) PutBlobPartial(ctx context.Context, stream internalTypes.ImageSourceSeekable, srcInfo types.BlobInfo, cache types.BlobInfoCache) (types.BlobInfo, error) { + fetcher := zstdFetcher{ + stream: stream, + ctx: ctx, + blobInfo: srcInfo, + } + + differ, err := chunked.GetDiffer(ctx, s.imageRef.transport.store, srcInfo.Size, srcInfo.Annotations, &fetcher) + if err != nil { + return srcInfo, err + } + + out, err := s.imageRef.transport.store.ApplyDiffWithDiffer("", nil, differ) + if err != nil { + return srcInfo, err + } + + blobDigest := srcInfo.Digest + + s.lock.Lock() + s.blobDiffIDs[blobDigest] = blobDigest + s.fileSizes[blobDigest] = 0 + s.filenames[blobDigest] = "" + s.diffOutputs[blobDigest] = out + s.lock.Unlock() + + return srcInfo, nil +} + +// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. +// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may +// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be +// reflected in the manifest that will be written. +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +// May use and/or update cache. +func (s *storageImageDestination) TryReusingBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { + // lock the entire method as it executes fairly quickly + s.lock.Lock() + defer s.lock.Unlock() + + return s.tryReusingBlobLocked(ctx, blobinfo, cache, canSubstitute) +} + +// tryReusingBlobLocked implements a core functionality of TryReusingBlob. +// This must be called with a lock being held on storageImageDestination. +func (s *storageImageDestination) tryReusingBlobLocked(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { + if blobinfo.Digest == "" { + return false, types.BlobInfo{}, errors.Errorf(`Can not check for a blob with unknown digest`) + } + if err := blobinfo.Digest.Validate(); err != nil { + return false, types.BlobInfo{}, errors.Wrapf(err, `Can not check for a blob with invalid digest`) + } + + // Check if we've already cached it in a file. + if size, ok := s.fileSizes[blobinfo.Digest]; ok { + return true, types.BlobInfo{ + Digest: blobinfo.Digest, + Size: size, + MediaType: blobinfo.MediaType, + }, nil + } + + // Check if we have a wasn't-compressed layer in storage that's based on that blob. + layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(blobinfo.Digest) + if err != nil && errors.Cause(err) != storage.ErrLayerUnknown { + return false, types.BlobInfo{}, errors.Wrapf(err, `looking for layers with digest %q`, blobinfo.Digest) + } + if len(layers) > 0 { + // Save this for completeness. + s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest + return true, types.BlobInfo{ + Digest: blobinfo.Digest, + Size: layers[0].UncompressedSize, + MediaType: blobinfo.MediaType, + }, nil + } + + // Check if we have a was-compressed layer in storage that's based on that blob. + layers, err = s.imageRef.transport.store.LayersByCompressedDigest(blobinfo.Digest) + if err != nil && errors.Cause(err) != storage.ErrLayerUnknown { + return false, types.BlobInfo{}, errors.Wrapf(err, `looking for compressed layers with digest %q`, blobinfo.Digest) + } + if len(layers) > 0 { + // Record the uncompressed value so that we can use it to calculate layer IDs. + s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest + return true, types.BlobInfo{ + Digest: blobinfo.Digest, + Size: layers[0].CompressedSize, + MediaType: blobinfo.MediaType, + }, nil + } + + // Does the blob correspond to a known DiffID which we already have available? + // Because we must return the size, which is unknown for unavailable compressed blobs, the returned BlobInfo refers to the + // uncompressed layer, and that can happen only if canSubstitute, or if the incoming manifest already specifies the size. + if canSubstitute || blobinfo.Size != -1 { + if uncompressedDigest := cache.UncompressedDigest(blobinfo.Digest); uncompressedDigest != "" && uncompressedDigest != blobinfo.Digest { + layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(uncompressedDigest) + if err != nil && errors.Cause(err) != storage.ErrLayerUnknown { + return false, types.BlobInfo{}, errors.Wrapf(err, `looking for layers with digest %q`, uncompressedDigest) + } + if len(layers) > 0 { + if blobinfo.Size != -1 { + s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest + return true, blobinfo, nil + } + if !canSubstitute { + return false, types.BlobInfo{}, fmt.Errorf("Internal error: canSubstitute was expected to be true for blobInfo %v", blobinfo) + } + s.blobDiffIDs[uncompressedDigest] = layers[0].UncompressedDigest + return true, types.BlobInfo{ + Digest: uncompressedDigest, + Size: layers[0].UncompressedSize, + MediaType: blobinfo.MediaType, + }, nil + } + } + } + + // Nope, we don't have it. + return false, types.BlobInfo{}, nil +} + +// computeID computes a recommended image ID based on information we have so far. If +// the manifest is not of a type that we recognize, we return an empty value, indicating +// that since we don't have a recommendation, a random ID should be used if one needs +// to be allocated. +func (s *storageImageDestination) computeID(m manifest.Manifest) string { + // Build the diffID list. We need the decompressed sums that we've been calculating to + // fill in the DiffIDs. It's expected (but not enforced by us) that the number of + // diffIDs corresponds to the number of non-EmptyLayer entries in the history. + var diffIDs []digest.Digest + switch m := m.(type) { + case *manifest.Schema1: + // Build a list of the diffIDs we've generated for the non-throwaway FS layers, + // in reverse of the order in which they were originally listed. + for i, compat := range m.ExtractedV1Compatibility { + if compat.ThrowAway { + continue + } + blobSum := m.FSLayers[i].BlobSum + diffID, ok := s.blobDiffIDs[blobSum] + if !ok { + logrus.Infof("error looking up diffID for layer %q", blobSum.String()) + return "" + } + diffIDs = append([]digest.Digest{diffID}, diffIDs...) + } + case *manifest.Schema2, *manifest.OCI1: + // We know the ID calculation for these formats doesn't actually use the diffIDs, + // so we don't need to populate the diffID list. + default: + return "" + } + id, err := m.ImageID(diffIDs) + if err != nil { + return "" + } + return id +} + +// getConfigBlob exists only to let us retrieve the configuration blob so that the manifest package can dig +// information out of it for Inspect(). +func (s *storageImageDestination) getConfigBlob(info types.BlobInfo) ([]byte, error) { + if info.Digest == "" { + return nil, errors.Errorf(`no digest supplied when reading blob`) + } + if err := info.Digest.Validate(); err != nil { + return nil, errors.Wrapf(err, `invalid digest supplied when reading blob`) + } + // Assume it's a file, since we're only calling this from a place that expects to read files. + if filename, ok := s.filenames[info.Digest]; ok { + contents, err2 := ioutil.ReadFile(filename) + if err2 != nil { + return nil, errors.Wrapf(err2, `reading blob from file %q`, filename) + } + return contents, nil + } + // If it's not a file, it's a bug, because we're not expecting to be asked for a layer. + return nil, errors.New("blob not found") +} + +// queueOrCommit queues in the specified blob to be committed to the storage. +// If no other goroutine is already committing layers, the layer and all +// subsequent layers (if already queued) will be committed to the storage. +func (s *storageImageDestination) queueOrCommit(ctx context.Context, blob types.BlobInfo, index int, emptyLayer bool) error { + // NOTE: whenever the code below is touched, make sure that all code + // paths unlock the lock and to unlock it exactly once. + // + // Conceptually, the code is divided in two stages: + // + // 1) Queue in work by marking the layer as ready to be committed. + // If at least one previous/parent layer with a lower index has + // not yet been committed, return early. + // + // 2) Process the queued-in work by committing the "ready" layers + // in sequence. Make sure that more items can be queued-in + // during the comparatively I/O expensive task of committing a + // layer. + // + // The conceptual benefit of this design is that caller can continue + // pulling layers after an early return. At any given time, only one + // caller is the "worker" routine committing layers. All other routines + // can continue pulling and queuing in layers. + s.lock.Lock() + s.indexToPulledLayerInfo[index] = &manifest.LayerInfo{ + BlobInfo: blob, + EmptyLayer: emptyLayer, + } + + // We're still waiting for at least one previous/parent layer to be + // committed, so there's nothing to do. + if index != s.currentIndex { + s.lock.Unlock() + return nil + } + + for info := s.indexToPulledLayerInfo[index]; info != nil; info = s.indexToPulledLayerInfo[index] { + s.lock.Unlock() + // Note: commitLayer locks on-demand. + if err := s.commitLayer(ctx, *info, index); err != nil { + return err + } + s.lock.Lock() + index++ + } + + // Set the index at the very end to make sure that only one routine + // enters stage 2). + s.currentIndex = index + s.lock.Unlock() + return nil +} + +// commitLayer commits the specified blob with the given index to the storage. +// Note that the previous layer is expected to already be committed. +// +// Caution: this function must be called without holding `s.lock`. Callers +// must guarantee that, at any given time, at most one goroutine may execute +// `commitLayer()`. +func (s *storageImageDestination) commitLayer(ctx context.Context, blob manifest.LayerInfo, index int) error { + // Already committed? Return early. + if _, alreadyCommitted := s.indexToStorageID[index]; alreadyCommitted { + return nil + } + + // Start with an empty string or the previous layer ID. Note that + // `s.indexToStorageID` can only be accessed by *one* goroutine at any + // given time. Hence, we don't need to lock accesses. + var lastLayer string + if prev := s.indexToStorageID[index-1]; prev != nil { + lastLayer = *prev + } + + // Carry over the previous ID for empty non-base layers. + if blob.EmptyLayer { + s.indexToStorageID[index] = &lastLayer + return nil + } + + // Check if there's already a layer with the ID that we'd give to the result of applying + // this layer blob to its parent, if it has one, or the blob's hex value otherwise. + s.lock.Lock() + diffID, haveDiffID := s.blobDiffIDs[blob.Digest] + s.lock.Unlock() + if !haveDiffID { + // Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob(), + // or to even check if we had it. + // Use none.NoCache to avoid a repeated DiffID lookup in the BlobInfoCache; a caller + // that relies on using a blob digest that has never been seen by the store had better call + // TryReusingBlob; not calling PutBlob already violates the documented API, so there’s only + // so far we are going to accommodate that (if we should be doing that at all). + logrus.Debugf("looking for diffID for blob %+v", blob.Digest) + // NOTE: use `TryReusingBlob` to prevent recursion. + has, _, err := s.TryReusingBlob(ctx, blob.BlobInfo, none.NoCache, false) + if err != nil { + return errors.Wrapf(err, "checking for a layer based on blob %q", blob.Digest.String()) + } + if !has { + return errors.Errorf("error determining uncompressed digest for blob %q", blob.Digest.String()) + } + diffID, haveDiffID = s.blobDiffIDs[blob.Digest] + if !haveDiffID { + return errors.Errorf("we have blob %q, but don't know its uncompressed digest", blob.Digest.String()) + } + } + id := diffID.Hex() + if lastLayer != "" { + id = digest.Canonical.FromBytes([]byte(lastLayer + "+" + diffID.Hex())).Hex() + } + if layer, err2 := s.imageRef.transport.store.Layer(id); layer != nil && err2 == nil { + // There's already a layer that should have the right contents, just reuse it. + lastLayer = layer.ID + s.indexToStorageID[index] = &lastLayer + return nil + } + + s.lock.Lock() + diffOutput, ok := s.diffOutputs[blob.Digest] + s.lock.Unlock() + if ok { + layer, err := s.imageRef.transport.store.CreateLayer(id, lastLayer, nil, "", false, nil) + if err != nil { + return err + } + + // FIXME: what to do with the uncompressed digest? + diffOutput.UncompressedDigest = blob.Digest + + if err := s.imageRef.transport.store.ApplyDiffFromStagingDirectory(layer.ID, diffOutput.Target, diffOutput, nil); err != nil { + _ = s.imageRef.transport.store.Delete(layer.ID) + return err + } + + s.indexToStorageID[index] = &layer.ID + return nil + } + + s.lock.Lock() + al, ok := s.blobAdditionalLayer[blob.Digest] + s.lock.Unlock() + if ok { + layer, err := al.PutAs(id, lastLayer, nil) + if err != nil { + return errors.Wrapf(err, "failed to put layer from digest and labels") + } + lastLayer = layer.ID + s.indexToStorageID[index] = &lastLayer + return nil + } + + // Check if we previously cached a file with that blob's contents. If we didn't, + // then we need to read the desired contents from a layer. + s.lock.Lock() + filename, ok := s.filenames[blob.Digest] + s.lock.Unlock() + if !ok { + // Try to find the layer with contents matching that blobsum. + layer := "" + layers, err2 := s.imageRef.transport.store.LayersByUncompressedDigest(diffID) + if err2 == nil && len(layers) > 0 { + layer = layers[0].ID + } else { + layers, err2 = s.imageRef.transport.store.LayersByCompressedDigest(blob.Digest) + if err2 == nil && len(layers) > 0 { + layer = layers[0].ID + } + } + if layer == "" { + return errors.Wrapf(err2, "locating layer for blob %q", blob.Digest) + } + // Read the layer's contents. + noCompression := archive.Uncompressed + diffOptions := &storage.DiffOptions{ + Compression: &noCompression, + } + diff, err2 := s.imageRef.transport.store.Diff("", layer, diffOptions) + if err2 != nil { + return errors.Wrapf(err2, "reading layer %q for blob %q", layer, blob.Digest) + } + // Copy the layer diff to a file. Diff() takes a lock that it holds + // until the ReadCloser that it returns is closed, and PutLayer() wants + // the same lock, so the diff can't just be directly streamed from one + // to the other. + filename = s.computeNextBlobCacheFile() + file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0600) + if err != nil { + diff.Close() + return errors.Wrapf(err, "creating temporary file %q", filename) + } + // Copy the data to the file. + // TODO: This can take quite some time, and should ideally be cancellable using + // ctx.Done(). + _, err = io.Copy(file, diff) + diff.Close() + file.Close() + if err != nil { + return errors.Wrapf(err, "storing blob to file %q", filename) + } + // Make sure that we can find this file later, should we need the layer's + // contents again. + s.lock.Lock() + s.filenames[blob.Digest] = filename + s.lock.Unlock() + } + // Read the cached blob and use it as a diff. + file, err := os.Open(filename) + if err != nil { + return errors.Wrapf(err, "opening file %q", filename) + } + defer file.Close() + // Build the new layer using the diff, regardless of where it came from. + // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). + layer, _, err := s.imageRef.transport.store.PutLayer(id, lastLayer, nil, "", false, &storage.LayerOptions{ + OriginalDigest: blob.Digest, + UncompressedDigest: diffID, + }, file) + if err != nil && errors.Cause(err) != storage.ErrDuplicateID { + return errors.Wrapf(err, "adding layer with blob %q", blob.Digest) + } + + s.indexToStorageID[index] = &layer.ID + return nil +} + +// Commit marks the process of storing the image as successful and asks for the image to be persisted. +// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list +// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the +// original manifest list digest, if desired. +// WARNING: This does not have any transactional semantics: +// - Uploaded data MAY be visible to others before Commit() is called +// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) +func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error { + if len(s.manifest) == 0 { + return errors.New("Internal error: storageImageDestination.Commit() called without PutManifest()") + } + toplevelManifest, _, err := unparsedToplevel.Manifest(ctx) + if err != nil { + return errors.Wrapf(err, "retrieving top-level manifest") + } + // If the name we're saving to includes a digest, then check that the + // manifests that we're about to save all either match the one from the + // unparsedToplevel, or match the digest in the name that we're using. + if s.imageRef.named != nil { + if digested, ok := s.imageRef.named.(reference.Digested); ok { + matches, err := manifest.MatchesDigest(s.manifest, digested.Digest()) + if err != nil { + return err + } + if !matches { + matches, err = manifest.MatchesDigest(toplevelManifest, digested.Digest()) + if err != nil { + return err + } + } + if !matches { + return fmt.Errorf("Manifest to be saved does not match expected digest %s", digested.Digest()) + } + } + } + // Find the list of layer blobs. + man, err := manifest.FromBlob(s.manifest, manifest.GuessMIMEType(s.manifest)) + if err != nil { + return errors.Wrapf(err, "parsing manifest") + } + layerBlobs := man.LayerInfos() + + // Extract, commit, or find the layers. + for i, blob := range layerBlobs { + if err := s.commitLayer(ctx, blob, i); err != nil { + return err + } + } + var lastLayer string + if len(layerBlobs) > 0 { // Can happen when using caches + prev := s.indexToStorageID[len(layerBlobs)-1] + if prev == nil { + return errors.Errorf("Internal error: StorageImageDestination.Commit(): previous layer %d hasn't been committed (lastLayer == nil)", len(layerBlobs)-1) + } + lastLayer = *prev + } + + // If one of those blobs was a configuration blob, then we can try to dig out the date when the image + // was originally created, in case we're just copying it. If not, no harm done. + options := &storage.ImageOptions{} + if inspect, err := man.Inspect(s.getConfigBlob); err == nil && inspect.Created != nil { + logrus.Debugf("setting image creation date to %s", inspect.Created) + options.CreationDate = *inspect.Created + } + // Create the image record, pointing to the most-recently added layer. + intendedID := s.imageRef.id + if intendedID == "" { + intendedID = s.computeID(man) + } + oldNames := []string{} + img, err := s.imageRef.transport.store.CreateImage(intendedID, nil, lastLayer, "", options) + if err != nil { + if errors.Cause(err) != storage.ErrDuplicateID { + logrus.Debugf("error creating image: %q", err) + return errors.Wrapf(err, "creating image %q", intendedID) + } + img, err = s.imageRef.transport.store.Image(intendedID) + if err != nil { + return errors.Wrapf(err, "reading image %q", intendedID) + } + if img.TopLayer != lastLayer { + logrus.Debugf("error creating image: image with ID %q exists, but uses different layers", intendedID) + return errors.Wrapf(storage.ErrDuplicateID, "image with ID %q already exists, but uses a different top layer", intendedID) + } + logrus.Debugf("reusing image ID %q", img.ID) + oldNames = append(oldNames, img.Names...) + } else { + logrus.Debugf("created new image ID %q", img.ID) + } + + // Clean up the unfinished image on any error. + // (Is this the right thing to do if the image has existed before?) + commitSucceeded := false + defer func() { + if !commitSucceeded { + logrus.Errorf("Updating image %q (old names %v) failed, deleting it", img.ID, oldNames) + if _, err := s.imageRef.transport.store.DeleteImage(img.ID, true); err != nil { + logrus.Errorf("Error deleting incomplete image %q: %v", img.ID, err) + } + } + }() + + // Add the non-layer blobs as data items. Since we only share layers, they should all be in files, so + // we just need to screen out the ones that are actually layers to get the list of non-layers. + dataBlobs := make(map[digest.Digest]struct{}) + for blob := range s.filenames { + dataBlobs[blob] = struct{}{} + } + for _, layerBlob := range layerBlobs { + delete(dataBlobs, layerBlob.Digest) + } + for blob := range dataBlobs { + v, err := ioutil.ReadFile(s.filenames[blob]) + if err != nil { + return errors.Wrapf(err, "copying non-layer blob %q to image", blob) + } + if err := s.imageRef.transport.store.SetImageBigData(img.ID, blob.String(), v, manifest.Digest); err != nil { + logrus.Debugf("error saving big data %q for image %q: %v", blob.String(), img.ID, err) + return errors.Wrapf(err, "saving big data %q for image %q", blob.String(), img.ID) + } + } + // Save the unparsedToplevel's manifest if it differs from the per-platform one, which is saved below. + if len(toplevelManifest) != 0 && !bytes.Equal(toplevelManifest, s.manifest) { + manifestDigest, err := manifest.Digest(toplevelManifest) + if err != nil { + return errors.Wrapf(err, "digesting top-level manifest") + } + key := manifestBigDataKey(manifestDigest) + if err := s.imageRef.transport.store.SetImageBigData(img.ID, key, toplevelManifest, manifest.Digest); err != nil { + logrus.Debugf("error saving top-level manifest for image %q: %v", img.ID, err) + return errors.Wrapf(err, "saving top-level manifest for image %q", img.ID) + } + } + // Save the image's manifest. Allow looking it up by digest by using the key convention defined by the Store. + // Record the manifest twice: using a digest-specific key to allow references to that specific digest instance, + // and using storage.ImageDigestBigDataKey for future users that don’t specify any digest and for compatibility with older readers. + key := manifestBigDataKey(s.manifestDigest) + if err := s.imageRef.transport.store.SetImageBigData(img.ID, key, s.manifest, manifest.Digest); err != nil { + logrus.Debugf("error saving manifest for image %q: %v", img.ID, err) + return errors.Wrapf(err, "saving manifest for image %q", img.ID) + } + key = storage.ImageDigestBigDataKey + if err := s.imageRef.transport.store.SetImageBigData(img.ID, key, s.manifest, manifest.Digest); err != nil { + logrus.Debugf("error saving manifest for image %q: %v", img.ID, err) + return errors.Wrapf(err, "saving manifest for image %q", img.ID) + } + // Save the signatures, if we have any. + if len(s.signatures) > 0 { + if err := s.imageRef.transport.store.SetImageBigData(img.ID, "signatures", s.signatures, manifest.Digest); err != nil { + logrus.Debugf("error saving signatures for image %q: %v", img.ID, err) + return errors.Wrapf(err, "saving signatures for image %q", img.ID) + } + } + for instanceDigest, signatures := range s.signatureses { + key := signatureBigDataKey(instanceDigest) + if err := s.imageRef.transport.store.SetImageBigData(img.ID, key, signatures, manifest.Digest); err != nil { + logrus.Debugf("error saving signatures for image %q: %v", img.ID, err) + return errors.Wrapf(err, "saving signatures for image %q", img.ID) + } + } + // Save our metadata. + metadata, err := json.Marshal(s) + if err != nil { + logrus.Debugf("error encoding metadata for image %q: %v", img.ID, err) + return errors.Wrapf(err, "encoding metadata for image %q", img.ID) + } + if len(metadata) != 0 { + if err = s.imageRef.transport.store.SetMetadata(img.ID, string(metadata)); err != nil { + logrus.Debugf("error saving metadata for image %q: %v", img.ID, err) + return errors.Wrapf(err, "saving metadata for image %q", img.ID) + } + logrus.Debugf("saved image metadata %q", string(metadata)) + } + // Adds the reference's name on the image. We don't need to worry about avoiding duplicate + // values because AddNames() will deduplicate the list that we pass to it. + if name := s.imageRef.DockerReference(); name != nil { + if err := s.imageRef.transport.store.AddNames(img.ID, []string{name.String()}); err != nil { + return errors.Wrapf(err, "adding names %v to image %q", name, img.ID) + } + logrus.Debugf("added name %q to image %q", name, img.ID) + } + + commitSucceeded = true + return nil +} + +var manifestMIMETypes = []string{ + imgspecv1.MediaTypeImageManifest, + manifest.DockerV2Schema2MediaType, + manifest.DockerV2Schema1SignedMediaType, + manifest.DockerV2Schema1MediaType, +} + +func (s *storageImageDestination) SupportedManifestMIMETypes() []string { + return manifestMIMETypes +} + +// PutManifest writes the manifest to the destination. +func (s *storageImageDestination) PutManifest(ctx context.Context, manifestBlob []byte, instanceDigest *digest.Digest) error { + digest, err := manifest.Digest(manifestBlob) + if err != nil { + return err + } + newBlob := make([]byte, len(manifestBlob)) + copy(newBlob, manifestBlob) + s.manifest = newBlob + s.manifestDigest = digest + return nil +} + +// SupportsSignatures returns an error if we can't expect GetSignatures() to return data that was +// previously supplied to PutSignatures(). +func (s *storageImageDestination) SupportsSignatures(ctx context.Context) error { + return nil +} + +// AcceptsForeignLayerURLs returns false iff foreign layers in the manifest should actually be +// uploaded to the image destination, true otherwise. +func (s *storageImageDestination) AcceptsForeignLayerURLs() bool { + return false +} + +// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise. +func (s *storageImageDestination) MustMatchRuntimeOS() bool { + return true +} + +// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), +// and would prefer to receive an unmodified manifest instead of one modified for the destination. +// Does not make a difference if Reference().DockerReference() is nil. +func (s *storageImageDestination) IgnoresEmbeddedDockerReference() bool { + return true // Yes, we want the unmodified manifest +} + +// PutSignatures records the image's signatures for committing as a single data blob. +func (s *storageImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error { + sizes := []int{} + sigblob := []byte{} + for _, sig := range signatures { + sizes = append(sizes, len(sig)) + newblob := make([]byte, len(sigblob)+len(sig)) + copy(newblob, sigblob) + copy(newblob[len(sigblob):], sig) + sigblob = newblob + } + if instanceDigest == nil { + s.signatures = sigblob + s.SignatureSizes = sizes + if len(s.manifest) > 0 { + manifestDigest := s.manifestDigest + instanceDigest = &manifestDigest + } + } + if instanceDigest != nil { + s.signatureses[*instanceDigest] = sigblob + s.SignaturesSizes[*instanceDigest] = sizes + } + return nil +} + +// getSize() adds up the sizes of the image's data blobs (which includes the configuration blob), the +// signatures, and the uncompressed sizes of all of the image's layers. +func (s *storageImageSource) getSize() (int64, error) { + var sum int64 + // Size up the data blobs. + dataNames, err := s.imageRef.transport.store.ListImageBigData(s.image.ID) + if err != nil { + return -1, errors.Wrapf(err, "reading image %q", s.image.ID) + } + for _, dataName := range dataNames { + bigSize, err := s.imageRef.transport.store.ImageBigDataSize(s.image.ID, dataName) + if err != nil { + return -1, errors.Wrapf(err, "reading data blob size %q for %q", dataName, s.image.ID) + } + sum += bigSize + } + // Add the signature sizes. + for _, sigSize := range s.SignatureSizes { + sum += int64(sigSize) + } + // Walk the layer list. + layerID := s.image.TopLayer + for layerID != "" { + layer, err := s.imageRef.transport.store.Layer(layerID) + if err != nil { + return -1, err + } + if layer.UncompressedDigest == "" || layer.UncompressedSize < 0 { + return -1, errors.Errorf("size for layer %q is unknown, failing getSize()", layerID) + } + sum += layer.UncompressedSize + if layer.Parent == "" { + break + } + layerID = layer.Parent + } + return sum, nil +} + +// Size() adds up the sizes of the image's data blobs (which includes the configuration blob), the +// signatures, and the uncompressed sizes of all of the image's layers. +func (s *storageImageSource) Size() (int64, error) { + return s.getSize() +} + +// Size() returns the previously-computed size of the image, with no error. +func (s *storageImageCloser) Size() (int64, error) { + return s.size, nil +} + +// newImage creates an image that also knows its size +func newImage(ctx context.Context, sys *types.SystemContext, s storageReference) (types.ImageCloser, error) { + src, err := newImageSource(ctx, sys, s) + if err != nil { + return nil, err + } + img, err := image.FromSource(ctx, sys, src) + if err != nil { + return nil, err + } + size, err := src.getSize() + if err != nil { + return nil, err + } + return &storageImageCloser{ImageCloser: img, size: size}, nil +} diff --git a/vendor/github.com/containers/image/v5/storage/storage_reference.go b/vendor/github.com/containers/image/v5/storage/storage_reference.go new file mode 100644 index 00000000000..7c6da112c74 --- /dev/null +++ b/vendor/github.com/containers/image/v5/storage/storage_reference.go @@ -0,0 +1,288 @@ +//go:build !containers_image_storage_stub +// +build !containers_image_storage_stub + +package storage + +import ( + "context" + "strings" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" + "github.com/containers/storage" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// A storageReference holds an arbitrary name and/or an ID, which is a 32-byte +// value hex-encoded into a 64-character string, and a reference to a Store +// where an image is, or would be, kept. +// Either "named" or "id" must be set. +type storageReference struct { + transport storageTransport + named reference.Named // may include a tag and/or a digest + id string +} + +func newReference(transport storageTransport, named reference.Named, id string) (*storageReference, error) { + if named == nil && id == "" { + return nil, ErrInvalidReference + } + if named != nil && reference.IsNameOnly(named) { + return nil, errors.Wrapf(ErrInvalidReference, "reference %s has neither a tag nor a digest", named.String()) + } + if id != "" { + if err := validateImageID(id); err != nil { + return nil, errors.Wrapf(ErrInvalidReference, "invalid ID value %q: %v", id, err) + } + } + // We take a copy of the transport, which contains a pointer to the + // store that it used for resolving this reference, so that the + // transport that we'll return from Transport() won't be affected by + // further calls to the original transport's SetStore() method. + return &storageReference{ + transport: transport, + named: named, + id: id, + }, nil +} + +// imageMatchesRepo returns true iff image.Names contains an element with the same repo as ref +func imageMatchesRepo(image *storage.Image, ref reference.Named) bool { + repo := ref.Name() + for _, name := range image.Names { + if named, err := reference.ParseNormalizedNamed(name); err == nil { + if named.Name() == repo { + return true + } + } + } + return false +} + +// multiArchImageMatchesSystemContext returns true if if the passed-in image both contains a +// multi-arch manifest that matches the passed-in digest, and the image is the per-platform +// image instance that matches sys. +// +// See the comment in storageReference.ResolveImage explaining why +// this check is necessary. +func multiArchImageMatchesSystemContext(store storage.Store, img *storage.Image, manifestDigest digest.Digest, sys *types.SystemContext) bool { + // Load the manifest that matches the specified digest. + // We don't need to care about storage.ImageDigestBigDataKey because + // manifests lists are only stored into storage by c/image versions + // that know about manifestBigDataKey, and only using that key. + key := manifestBigDataKey(manifestDigest) + manifestBytes, err := store.ImageBigData(img.ID, key) + if err != nil { + return false + } + // The manifest is either a list, or not a list. If it's a list, find + // the digest of the instance that matches the current system, and try + // to load that manifest from the image record, and use it. + manifestType := manifest.GuessMIMEType(manifestBytes) + if !manifest.MIMETypeIsMultiImage(manifestType) { + // manifestDigest directly specifies a per-platform image, so we aren't + // choosing among different variants. + return false + } + list, err := manifest.ListFromBlob(manifestBytes, manifestType) + if err != nil { + return false + } + chosenInstance, err := list.ChooseInstance(sys) + if err != nil { + return false + } + key = manifestBigDataKey(chosenInstance) + _, err = store.ImageBigData(img.ID, key) + return err == nil // true if img.ID is based on chosenInstance. +} + +// Resolve the reference's name to an image ID in the store, if there's already +// one present with the same name or ID, and return the image. +func (s *storageReference) resolveImage(sys *types.SystemContext) (*storage.Image, error) { + var loadedImage *storage.Image + if s.id == "" && s.named != nil { + // Look for an image that has the expanded reference name as an explicit Name value. + image, err := s.transport.store.Image(s.named.String()) + if image != nil && err == nil { + loadedImage = image + s.id = image.ID + } + } + if s.id == "" && s.named != nil { + if digested, ok := s.named.(reference.Digested); ok { + // Look for an image with the specified digest that has the same name, + // though possibly with a different tag or digest, as a Name value, so + // that the canonical reference can be implicitly resolved to the image. + // + // Typically there should be at most one such image, because the same + // manifest digest implies the same config, and we choose the storage ID + // based on the config (deduplicating images), except: + // - the user can explicitly specify an ID when creating the image. + // In this case we don't have a preference among the alternatives. + // - when pulling an image from a multi-platform manifest list, we also + // store the manifest list in the image; this allows referencing a + // per-platform image using the manifest list digest, but that also + // means that we can have multiple genuinely different images in the + // storage matching the same manifest list digest (if pulled using different + // SystemContext.{OS,Architecture,Variant}Choice to the same storage). + // In this case we prefer the image matching the current SystemContext. + images, err := s.transport.store.ImagesByDigest(digested.Digest()) + if err == nil && len(images) > 0 { + for _, image := range images { + if imageMatchesRepo(image, s.named) { + if loadedImage == nil || multiArchImageMatchesSystemContext(s.transport.store, image, digested.Digest(), sys) { + loadedImage = image + s.id = image.ID + } + } + } + } + } + } + if s.id == "" { + logrus.Debugf("reference %q does not resolve to an image ID", s.StringWithinTransport()) + return nil, errors.Wrapf(ErrNoSuchImage, "reference %q does not resolve to an image ID", s.StringWithinTransport()) + } + if loadedImage == nil { + img, err := s.transport.store.Image(s.id) + if err != nil { + return nil, errors.Wrapf(err, "reading image %q", s.id) + } + loadedImage = img + } + if s.named != nil { + if !imageMatchesRepo(loadedImage, s.named) { + logrus.Errorf("no image matching reference %q found", s.StringWithinTransport()) + return nil, ErrNoSuchImage + } + } + // Default to having the image digest that we hand back match the most recently + // added manifest... + if digest, ok := loadedImage.BigDataDigests[storage.ImageDigestBigDataKey]; ok { + loadedImage.Digest = digest + } + // ... unless the named reference says otherwise, and it matches one of the digests + // in the image. For those cases, set the Digest field to that value, for the + // sake of older consumers that don't know there's a whole list in there now. + if s.named != nil { + if digested, ok := s.named.(reference.Digested); ok { + for _, digest := range loadedImage.Digests { + if digest == digested.Digest() { + loadedImage.Digest = digest + break + } + } + } + } + return loadedImage, nil +} + +// Return a Transport object that defaults to using the same store that we used +// to build this reference object. +func (s storageReference) Transport() types.ImageTransport { + return &storageTransport{ + store: s.transport.store, + defaultUIDMap: s.transport.defaultUIDMap, + defaultGIDMap: s.transport.defaultGIDMap, + } +} + +// Return a name with a tag or digest, if we have either, else return it bare. +func (s storageReference) DockerReference() reference.Named { + return s.named +} + +// Return a name with a tag, prefixed with the graph root and driver name, to +// disambiguate between images which may be present in multiple stores and +// share only their names. +func (s storageReference) StringWithinTransport() string { + optionsList := "" + options := s.transport.store.GraphOptions() + if len(options) > 0 { + optionsList = ":" + strings.Join(options, ",") + } + res := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "+" + s.transport.store.RunRoot() + optionsList + "]" + if s.named != nil { + res = res + s.named.String() + } + if s.id != "" { + res = res + "@" + s.id + } + return res +} + +func (s storageReference) PolicyConfigurationIdentity() string { + res := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "]" + if s.named != nil { + res = res + s.named.String() + } + if s.id != "" { + res = res + "@" + s.id + } + return res +} + +// Also accept policy that's tied to the combination of the graph root and +// driver name, to apply to all images stored in the Store, and to just the +// graph root, in case we're using multiple drivers in the same directory for +// some reason. +func (s storageReference) PolicyConfigurationNamespaces() []string { + storeSpec := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "]" + driverlessStoreSpec := "[" + s.transport.store.GraphRoot() + "]" + namespaces := []string{} + if s.named != nil { + if s.id != "" { + // The reference without the ID is also a valid namespace. + namespaces = append(namespaces, storeSpec+s.named.String()) + } + tagged, isTagged := s.named.(reference.Tagged) + _, isDigested := s.named.(reference.Digested) + if isTagged && isDigested { // s.named is "name:tag@digest"; add a "name:tag" parent namespace. + namespaces = append(namespaces, storeSpec+s.named.Name()+":"+tagged.Tag()) + } + components := strings.Split(s.named.Name(), "/") + for len(components) > 0 { + namespaces = append(namespaces, storeSpec+strings.Join(components, "/")) + components = components[:len(components)-1] + } + } + namespaces = append(namespaces, storeSpec) + namespaces = append(namespaces, driverlessStoreSpec) + return namespaces +} + +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (s storageReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + return newImage(ctx, sys, s) +} + +func (s storageReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + img, err := s.resolveImage(sys) + if err != nil { + return err + } + layers, err := s.transport.store.DeleteImage(img.ID, true) + if err == nil { + logrus.Debugf("deleted image %q", img.ID) + for _, layer := range layers { + logrus.Debugf("deleted layer %q", layer) + } + } + return err +} + +func (s storageReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { + return newImageSource(ctx, sys, s) +} + +func (s storageReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { + return newImageDestination(sys, s) +} diff --git a/vendor/github.com/containers/image/v5/storage/storage_transport.go b/vendor/github.com/containers/image/v5/storage/storage_transport.go new file mode 100644 index 00000000000..07393ee7431 --- /dev/null +++ b/vendor/github.com/containers/image/v5/storage/storage_transport.go @@ -0,0 +1,388 @@ +//go:build !containers_image_storage_stub +// +build !containers_image_storage_stub + +package storage + +import ( + "fmt" + "path/filepath" + "strings" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" + "github.com/containers/storage" + "github.com/containers/storage/pkg/idtools" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const ( + minimumTruncatedIDLength = 3 +) + +func init() { + transports.Register(Transport) +} + +var ( + // Transport is an ImageTransport that uses either a default + // storage.Store or one that's it's explicitly told to use. + Transport StoreTransport = &storageTransport{} + // ErrInvalidReference is returned when ParseReference() is passed an + // empty reference. + ErrInvalidReference = errors.New("invalid reference") + // ErrPathNotAbsolute is returned when a graph root is not an absolute + // path name. + ErrPathNotAbsolute = errors.New("path name is not absolute") +) + +// StoreTransport is an ImageTransport that uses a storage.Store to parse +// references, either its own default or one that it's told to use. +type StoreTransport interface { + types.ImageTransport + // SetStore sets the default store for this transport. + SetStore(storage.Store) + // GetStoreIfSet returns the default store for this transport, or nil if not set/determined yet. + GetStoreIfSet() storage.Store + // GetImage retrieves the image from the transport's store that's named + // by the reference. + GetImage(types.ImageReference) (*storage.Image, error) + // GetStoreImage retrieves the image from a specified store that's named + // by the reference. + GetStoreImage(storage.Store, types.ImageReference) (*storage.Image, error) + // ParseStoreReference parses a reference, overriding any store + // specification that it may contain. + ParseStoreReference(store storage.Store, reference string) (*storageReference, error) + // NewStoreReference creates a reference for (named@ID) in store. + // either of name or ID can be unset; named must not be a reference.IsNameOnly. + NewStoreReference(store storage.Store, named reference.Named, id string) (*storageReference, error) + // SetDefaultUIDMap sets the default UID map to use when opening stores. + SetDefaultUIDMap(idmap []idtools.IDMap) + // SetDefaultGIDMap sets the default GID map to use when opening stores. + SetDefaultGIDMap(idmap []idtools.IDMap) + // DefaultUIDMap returns the default UID map used when opening stores. + DefaultUIDMap() []idtools.IDMap + // DefaultGIDMap returns the default GID map used when opening stores. + DefaultGIDMap() []idtools.IDMap +} + +type storageTransport struct { + store storage.Store + defaultUIDMap []idtools.IDMap + defaultGIDMap []idtools.IDMap +} + +func (s *storageTransport) Name() string { + // Still haven't really settled on a name. + return "containers-storage" +} + +// SetStore sets the Store object which the Transport will use for parsing +// references when information about a Store is not directly specified as part +// of the reference. If one is not set, the library will attempt to initialize +// one with default settings when a reference needs to be parsed. Calling +// SetStore does not affect previously parsed references. +func (s *storageTransport) SetStore(store storage.Store) { + s.store = store +} + +// GetStoreIfSet returns the default store for this transport, as set using SetStore() or initialized by default, or nil if not set/determined yet. +func (s *storageTransport) GetStoreIfSet() storage.Store { + return s.store +} + +// SetDefaultUIDMap sets the default UID map to use when opening stores. +func (s *storageTransport) SetDefaultUIDMap(idmap []idtools.IDMap) { + s.defaultUIDMap = idmap +} + +// SetDefaultGIDMap sets the default GID map to use when opening stores. +func (s *storageTransport) SetDefaultGIDMap(idmap []idtools.IDMap) { + s.defaultGIDMap = idmap +} + +// DefaultUIDMap returns the default UID map used when opening stores. +func (s *storageTransport) DefaultUIDMap() []idtools.IDMap { + return s.defaultUIDMap +} + +// DefaultGIDMap returns the default GID map used when opening stores. +func (s *storageTransport) DefaultGIDMap() []idtools.IDMap { + return s.defaultGIDMap +} + +// ParseStoreReference takes a name or an ID, tries to figure out which it is +// relative to the given store, and returns it in a reference object. +func (s storageTransport) ParseStoreReference(store storage.Store, ref string) (*storageReference, error) { + if ref == "" { + return nil, errors.Wrapf(ErrInvalidReference, "%q is an empty reference", ref) + } + if ref[0] == '[' { + // Ignore the store specifier. + closeIndex := strings.IndexRune(ref, ']') + if closeIndex < 1 { + return nil, errors.Wrapf(ErrInvalidReference, "store specifier in %q did not end", ref) + } + ref = ref[closeIndex+1:] + } + + // The reference may end with an image ID. Image IDs and digests use the same "@" separator; + // here we only peel away an image ID, and leave digests alone. + split := strings.LastIndex(ref, "@") + id := "" + if split != -1 { + possibleID := ref[split+1:] + if possibleID == "" { + return nil, errors.Wrapf(ErrInvalidReference, "empty trailing digest or ID in %q", ref) + } + // If it looks like a digest, leave it alone for now. + if _, err := digest.Parse(possibleID); err != nil { + // Otherwise… + if err := validateImageID(possibleID); err == nil { + id = possibleID // … it is a full ID + } else if img, err := store.Image(possibleID); err == nil && img != nil && len(possibleID) >= minimumTruncatedIDLength && strings.HasPrefix(img.ID, possibleID) { + // … it is a truncated version of the ID of an image that's present in local storage, + // so we might as well use the expanded value. + id = img.ID + } else { + return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like an image ID or digest", possibleID) + } + // We have recognized an image ID; peel it off. + ref = ref[:split] + } + } + + // If we only have one @-delimited portion, then _maybe_ it's a truncated image ID. Only check on that if it's + // at least of what we guess is a reasonable minimum length, because we don't want a really short value + // like "a" matching an image by ID prefix when the input was actually meant to specify an image name. + if id == "" && len(ref) >= minimumTruncatedIDLength && !strings.ContainsAny(ref, "@:") { + if img, err := store.Image(ref); err == nil && img != nil && strings.HasPrefix(img.ID, ref) { + // It's a truncated version of the ID of an image that's present in local storage; + // we need to expand it. + id = img.ID + ref = "" + } + } + + var named reference.Named + // Unless we have an un-named "ID" or "@ID" reference (where ID might only have been a prefix), which has been + // completely parsed above, the initial portion should be a name, possibly with a tag and/or a digest.. + if ref != "" { + var err error + named, err = reference.ParseNormalizedNamed(ref) + if err != nil { + return nil, errors.Wrapf(err, "parsing named reference %q", ref) + } + named = reference.TagNameOnly(named) + } + + result, err := s.NewStoreReference(store, named, id) + if err != nil { + return nil, err + } + logrus.Debugf("parsed reference into %q", result.StringWithinTransport()) + return result, nil +} + +// NewStoreReference creates a reference for (named@ID) in store. +// either of name or ID can be unset; named must not be a reference.IsNameOnly. +func (s *storageTransport) NewStoreReference(store storage.Store, named reference.Named, id string) (*storageReference, error) { + return newReference(storageTransport{store: store, defaultUIDMap: s.defaultUIDMap, defaultGIDMap: s.defaultGIDMap}, named, id) +} + +func (s *storageTransport) GetStore() (storage.Store, error) { + // Return the transport's previously-set store. If we don't have one + // of those, initialize one now. + if s.store == nil { + options, err := storage.DefaultStoreOptionsAutoDetectUID() + if err != nil { + return nil, err + } + options.UIDMap = s.defaultUIDMap + options.GIDMap = s.defaultGIDMap + store, err := storage.GetStore(options) + if err != nil { + return nil, err + } + s.store = store + } + return s.store, nil +} + +// ParseReference takes a name and a tag or digest and/or ID +// ("_name_"/"@_id_"/"_name_:_tag_"/"_name_:_tag_@_id_"/"_name_@_digest_"/"_name_@_digest_@_id_"/"_name_:_tag_@_digest_"/"_name_:_tag_@_digest_@_id_"), +// possibly prefixed with a store specifier in the form "[_graphroot_]" or +// "[_driver_@_graphroot_]" or "[_driver_@_graphroot_+_runroot_]" or +// "[_driver_@_graphroot_:_options_]" or "[_driver_@_graphroot_+_runroot_:_options_]", +// tries to figure out which it is, and returns it in a reference object. +// If _id_ is the ID of an image that's present in local storage, it can be truncated, and +// even be specified as if it were a _name_, value. +func (s *storageTransport) ParseReference(reference string) (types.ImageReference, error) { + var store storage.Store + // Check if there's a store location prefix. If there is, then it + // needs to match a store that was previously initialized using + // storage.GetStore(), or be enough to let the storage library fill out + // the rest using knowledge that it has from elsewhere. + if len(reference) > 0 && reference[0] == '[' { + closeIndex := strings.IndexRune(reference, ']') + if closeIndex < 1 { + return nil, ErrInvalidReference + } + storeSpec := reference[1:closeIndex] + reference = reference[closeIndex+1:] + // Peel off a "driver@" from the start. + driverInfo := "" + driverSplit := strings.SplitN(storeSpec, "@", 2) + if len(driverSplit) != 2 { + if storeSpec == "" { + return nil, ErrInvalidReference + } + } else { + driverInfo = driverSplit[0] + if driverInfo == "" { + return nil, ErrInvalidReference + } + storeSpec = driverSplit[1] + if storeSpec == "" { + return nil, ErrInvalidReference + } + } + // Peel off a ":options" from the end. + var options []string + optionsSplit := strings.SplitN(storeSpec, ":", 2) + if len(optionsSplit) == 2 { + options = strings.Split(optionsSplit[1], ",") + storeSpec = optionsSplit[0] + } + // Peel off a "+runroot" from the new end. + runRootInfo := "" + runRootSplit := strings.SplitN(storeSpec, "+", 2) + if len(runRootSplit) == 2 { + runRootInfo = runRootSplit[1] + storeSpec = runRootSplit[0] + } + // The rest is our graph root. + rootInfo := storeSpec + // Check that any paths are absolute paths. + if rootInfo != "" && !filepath.IsAbs(rootInfo) { + return nil, ErrPathNotAbsolute + } + if runRootInfo != "" && !filepath.IsAbs(runRootInfo) { + return nil, ErrPathNotAbsolute + } + store2, err := storage.GetStore(storage.StoreOptions{ + GraphDriverName: driverInfo, + GraphRoot: rootInfo, + RunRoot: runRootInfo, + GraphDriverOptions: options, + UIDMap: s.defaultUIDMap, + GIDMap: s.defaultGIDMap, + }) + if err != nil { + return nil, err + } + store = store2 + } else { + // We didn't have a store spec, so use the default. + store2, err := s.GetStore() + if err != nil { + return nil, err + } + store = store2 + } + return s.ParseStoreReference(store, reference) +} + +func (s storageTransport) GetStoreImage(store storage.Store, ref types.ImageReference) (*storage.Image, error) { + dref := ref.DockerReference() + if dref != nil { + if img, err := store.Image(dref.String()); err == nil { + return img, nil + } + } + if sref, ok := ref.(*storageReference); ok { + tmpRef := *sref + if img, err := tmpRef.resolveImage(nil); err == nil { + return img, nil + } + } + return nil, storage.ErrImageUnknown +} + +func (s *storageTransport) GetImage(ref types.ImageReference) (*storage.Image, error) { + store, err := s.GetStore() + if err != nil { + return nil, err + } + return s.GetStoreImage(store, ref) +} + +func (s storageTransport) ValidatePolicyConfigurationScope(scope string) error { + // Check that there's a store location prefix. Values we're passed are + // expected to come from PolicyConfigurationIdentity or + // PolicyConfigurationNamespaces, so if there's no store location, + // something's wrong. + if scope[0] != '[' { + return ErrInvalidReference + } + // Parse the store location prefix. + closeIndex := strings.IndexRune(scope, ']') + if closeIndex < 1 { + return ErrInvalidReference + } + storeSpec := scope[1:closeIndex] + scope = scope[closeIndex+1:] + storeInfo := strings.SplitN(storeSpec, "@", 2) + if len(storeInfo) == 1 && storeInfo[0] != "" { + // One component: the graph root. + if !filepath.IsAbs(storeInfo[0]) { + return ErrPathNotAbsolute + } + } else if len(storeInfo) == 2 && storeInfo[0] != "" && storeInfo[1] != "" { + // Two components: the driver type and the graph root. + if !filepath.IsAbs(storeInfo[1]) { + return ErrPathNotAbsolute + } + } else { + // Anything else: scope specified in a form we don't + // recognize. + return ErrInvalidReference + } + // That might be all of it, and that's okay. + if scope == "" { + return nil + } + + fields := strings.SplitN(scope, "@", 3) + switch len(fields) { + case 1: // name only + case 2: // name:tag@ID or name[:tag]@digest + if idErr := validateImageID(fields[1]); idErr != nil { + if _, digestErr := digest.Parse(fields[1]); digestErr != nil { + return fmt.Errorf("%v is neither a valid digest(%s) nor a valid ID(%s)", fields[1], digestErr.Error(), idErr.Error()) + } + } + case 3: // name[:tag]@digest@ID + if _, err := digest.Parse(fields[1]); err != nil { + return err + } + if err := validateImageID(fields[2]); err != nil { + return err + } + default: // Coverage: This should never happen + return errors.New("Internal error: unexpected number of fields form strings.SplitN") + } + // As for field[0], if it is non-empty at all: + // FIXME? We could be verifying the various character set and length restrictions + // from docker/distribution/reference.regexp.go, but other than that there + // are few semantically invalid strings. + return nil +} + +// validateImageID returns nil if id is a valid (full) image ID, or an error +func validateImageID(id string) error { + _, err := digest.Parse("sha256:" + id) + return err +} diff --git a/vendor/github.com/containers/image/v5/tarball/doc.go b/vendor/github.com/containers/image/v5/tarball/doc.go new file mode 100644 index 00000000000..e9d321b8f87 --- /dev/null +++ b/vendor/github.com/containers/image/v5/tarball/doc.go @@ -0,0 +1,60 @@ +// Package tarball provides a way to generate images using one or more layer +// tarballs and an optional template configuration. +// +// An example: +// package main +// +// import ( +// "context" +// +// cp "github.com/containers/image/v5/copy" +// "github.com/containers/image/v5/signature" +// "github.com/containers/image/v5/tarball" +// "github.com/containers/image/v5/transports/alltransports" +// "github.com/containers/image/v5/types" +// imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" +// ) +// +// func imageFromTarball() { +// src, err := alltransports.ParseImageName("tarball:/var/cache/mock/fedora-26-x86_64/root_cache/cache.tar.gz") +// // - or - +// // src, err := tarball.Transport.ParseReference("/var/cache/mock/fedora-26-x86_64/root_cache/cache.tar.gz") +// if err != nil { +// panic(err) +// } +// updater, ok := src.(tarball.ConfigUpdater) +// if !ok { +// panic("unexpected: a tarball reference should implement tarball.ConfigUpdater") +// } +// config := imgspecv1.Image{ +// Config: imgspecv1.ImageConfig{ +// Cmd: []string{"/bin/bash"}, +// }, +// } +// annotations := make(map[string]string) +// annotations[imgspecv1.AnnotationDescription] = "test image built from a mock root cache" +// err = updater.ConfigUpdate(config, annotations) +// if err != nil { +// panic(err) +// } +// dest, err := alltransports.ParseImageName("docker-daemon:mock:latest") +// if err != nil { +// panic(err) +// } +// +// policy, err := signature.DefaultPolicy(nil) +// if err != nil { +// panic(err) +// } +// +// pc, err := signature.NewPolicyContext(policy) +// if err != nil { +// panic(err) +// } +// defer pc.Destroy() +// _, err = cp.Image(context.TODO(), pc, dest, src, nil) +// if err != nil { +// panic(err) +// } +// } +package tarball diff --git a/vendor/github.com/containers/image/v5/tarball/tarball_reference.go b/vendor/github.com/containers/image/v5/tarball/tarball_reference.go new file mode 100644 index 00000000000..23f67c49e62 --- /dev/null +++ b/vendor/github.com/containers/image/v5/tarball/tarball_reference.go @@ -0,0 +1,93 @@ +package tarball + +import ( + "context" + "fmt" + "os" + "strings" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/image" + "github.com/containers/image/v5/types" + + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ConfigUpdater is an interface that ImageReferences for "tarball" images also +// implement. It can be used to set values for a configuration, and to set +// image annotations which will be present in the images returned by the +// reference's NewImage() or NewImageSource() methods. +type ConfigUpdater interface { + ConfigUpdate(config imgspecv1.Image, annotations map[string]string) error +} + +type tarballReference struct { + config imgspecv1.Image + annotations map[string]string + filenames []string + stdin []byte +} + +// ConfigUpdate updates the image's default configuration and adds annotations +// which will be visible in source images created using this reference. +func (r *tarballReference) ConfigUpdate(config imgspecv1.Image, annotations map[string]string) error { + r.config = config + if r.annotations == nil { + r.annotations = make(map[string]string) + } + for k, v := range annotations { + r.annotations[k] = v + } + return nil +} + +func (r *tarballReference) Transport() types.ImageTransport { + return Transport +} + +func (r *tarballReference) StringWithinTransport() string { + return strings.Join(r.filenames, ":") +} + +func (r *tarballReference) DockerReference() reference.Named { + return nil +} + +func (r *tarballReference) PolicyConfigurationIdentity() string { + return "" +} + +func (r *tarballReference) PolicyConfigurationNamespaces() []string { + return nil +} + +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (r *tarballReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + src, err := r.NewImageSource(ctx, sys) + if err != nil { + return nil, err + } + img, err := image.FromSource(ctx, sys, src) + if err != nil { + src.Close() + return nil, err + } + return img, nil +} + +func (r *tarballReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + for _, filename := range r.filenames { + if err := os.Remove(filename); err != nil && !os.IsNotExist(err) { + return fmt.Errorf("error removing %q: %v", filename, err) + } + } + return nil +} + +func (r *tarballReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { + return nil, fmt.Errorf(`"tarball:" locations can only be read from, not written to`) +} diff --git a/vendor/github.com/containers/image/v5/tarball/tarball_src.go b/vendor/github.com/containers/image/v5/tarball/tarball_src.go new file mode 100644 index 00000000000..694ad17bd1a --- /dev/null +++ b/vendor/github.com/containers/image/v5/tarball/tarball_src.go @@ -0,0 +1,274 @@ +package tarball + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "runtime" + "strings" + "time" + + "github.com/containers/image/v5/types" + "github.com/klauspost/pgzip" + digest "github.com/opencontainers/go-digest" + imgspecs "github.com/opencontainers/image-spec/specs-go" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +type tarballImageSource struct { + reference tarballReference + filenames []string + diffIDs []digest.Digest + diffSizes []int64 + blobIDs []digest.Digest + blobSizes []int64 + blobTypes []string + config []byte + configID digest.Digest + configSize int64 + manifest []byte +} + +func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { + // Gather up the digests, sizes, and date information for all of the files. + filenames := []string{} + diffIDs := []digest.Digest{} + diffSizes := []int64{} + blobIDs := []digest.Digest{} + blobSizes := []int64{} + blobTimes := []time.Time{} + blobTypes := []string{} + for _, filename := range r.filenames { + var file *os.File + var err error + var blobSize int64 + var blobTime time.Time + var reader io.Reader + if filename == "-" { + blobSize = int64(len(r.stdin)) + blobTime = time.Now() + reader = bytes.NewReader(r.stdin) + } else { + file, err = os.Open(filename) + if err != nil { + return nil, fmt.Errorf("error opening %q for reading: %v", filename, err) + } + defer file.Close() + reader = file + fileinfo, err := file.Stat() + if err != nil { + return nil, fmt.Errorf("error reading size of %q: %v", filename, err) + } + blobSize = fileinfo.Size() + blobTime = fileinfo.ModTime() + } + + // Default to assuming the layer is compressed. + layerType := imgspecv1.MediaTypeImageLayerGzip + + // Set up to digest the file as it is. + blobIDdigester := digest.Canonical.Digester() + reader = io.TeeReader(reader, blobIDdigester.Hash()) + + // Set up to digest the file after we maybe decompress it. + diffIDdigester := digest.Canonical.Digester() + uncompressed, err := pgzip.NewReader(reader) + if err == nil { + // It is compressed, so the diffID is the digest of the uncompressed version + reader = io.TeeReader(uncompressed, diffIDdigester.Hash()) + } else { + // It is not compressed, so the diffID and the blobID are going to be the same + diffIDdigester = blobIDdigester + layerType = imgspecv1.MediaTypeImageLayer + uncompressed = nil + } + // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). + n, err := io.Copy(ioutil.Discard, reader) + if err != nil { + return nil, fmt.Errorf("error reading %q: %v", filename, err) + } + if uncompressed != nil { + uncompressed.Close() + } + + // Grab our uncompressed and possibly-compressed digests and sizes. + filenames = append(filenames, filename) + diffIDs = append(diffIDs, diffIDdigester.Digest()) + diffSizes = append(diffSizes, n) + blobIDs = append(blobIDs, blobIDdigester.Digest()) + blobSizes = append(blobSizes, blobSize) + blobTimes = append(blobTimes, blobTime) + blobTypes = append(blobTypes, layerType) + } + + // Build the rootfs and history for the configuration blob. + rootfs := imgspecv1.RootFS{ + Type: "layers", + DiffIDs: diffIDs, + } + created := time.Time{} + history := []imgspecv1.History{} + // Pick up the layer comment from the configuration's history list, if one is set. + comment := "imported from tarball" + if len(r.config.History) > 0 && r.config.History[0].Comment != "" { + comment = r.config.History[0].Comment + } + for i := range diffIDs { + createdBy := fmt.Sprintf("/bin/sh -c #(nop) ADD file:%s in %c", diffIDs[i].Hex(), os.PathSeparator) + history = append(history, imgspecv1.History{ + Created: &blobTimes[i], + CreatedBy: createdBy, + Comment: comment, + }) + // Use the mtime of the most recently modified file as the image's creation time. + if created.Before(blobTimes[i]) { + created = blobTimes[i] + } + } + + // Pick up other defaults from the config in the reference. + config := r.config + if config.Created == nil { + config.Created = &created + } + if config.Architecture == "" { + config.Architecture = runtime.GOARCH + } + if config.OS == "" { + config.OS = runtime.GOOS + } + config.RootFS = rootfs + config.History = history + + // Encode and digest the image configuration blob. + configBytes, err := json.Marshal(&config) + if err != nil { + return nil, fmt.Errorf("error generating configuration blob for %q: %v", strings.Join(r.filenames, separator), err) + } + configID := digest.Canonical.FromBytes(configBytes) + configSize := int64(len(configBytes)) + + // Populate a manifest with the configuration blob and the file as the single layer. + layerDescriptors := []imgspecv1.Descriptor{} + for i := range blobIDs { + layerDescriptors = append(layerDescriptors, imgspecv1.Descriptor{ + Digest: blobIDs[i], + Size: blobSizes[i], + MediaType: blobTypes[i], + }) + } + annotations := make(map[string]string) + for k, v := range r.annotations { + annotations[k] = v + } + manifest := imgspecv1.Manifest{ + Versioned: imgspecs.Versioned{ + SchemaVersion: 2, + }, + Config: imgspecv1.Descriptor{ + Digest: configID, + Size: configSize, + MediaType: imgspecv1.MediaTypeImageConfig, + }, + Layers: layerDescriptors, + Annotations: annotations, + } + + // Encode the manifest. + manifestBytes, err := json.Marshal(&manifest) + if err != nil { + return nil, fmt.Errorf("error generating manifest for %q: %v", strings.Join(r.filenames, separator), err) + } + + // Return the image. + src := &tarballImageSource{ + reference: *r, + filenames: filenames, + diffIDs: diffIDs, + diffSizes: diffSizes, + blobIDs: blobIDs, + blobSizes: blobSizes, + blobTypes: blobTypes, + config: configBytes, + configID: configID, + configSize: configSize, + manifest: manifestBytes, + } + + return src, nil +} + +func (is *tarballImageSource) Close() error { + return nil +} + +// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. +func (is *tarballImageSource) HasThreadSafeGetBlob() bool { + return false +} + +// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). +// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. +// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. +func (is *tarballImageSource) GetBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { + // We should only be asked about things in the manifest. Maybe the configuration blob. + if blobinfo.Digest == is.configID { + return ioutil.NopCloser(bytes.NewBuffer(is.config)), is.configSize, nil + } + // Maybe one of the layer blobs. + for i := range is.blobIDs { + if blobinfo.Digest == is.blobIDs[i] { + // We want to read that layer: open the file or memory block and hand it back. + if is.filenames[i] == "-" { + return ioutil.NopCloser(bytes.NewBuffer(is.reference.stdin)), int64(len(is.reference.stdin)), nil + } + reader, err := os.Open(is.filenames[i]) + if err != nil { + return nil, -1, fmt.Errorf("error opening %q: %v", is.filenames[i], err) + } + return reader, is.blobSizes[i], nil + } + } + return nil, -1, fmt.Errorf("no blob with digest %q found", blobinfo.Digest.String()) +} + +// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). +// It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); +// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). +func (is *tarballImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { + if instanceDigest != nil { + return nil, "", fmt.Errorf("manifest lists are not supported by the %q transport", transportName) + } + return is.manifest, imgspecv1.MediaTypeImageManifest, nil +} + +// GetSignatures returns the image's signatures. It may use a remote (= slow) service. +// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil, +// as there can be no secondary manifests. +func (*tarballImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + if instanceDigest != nil { + return nil, fmt.Errorf("manifest lists are not supported by the %q transport", transportName) + } + return nil, nil +} + +func (is *tarballImageSource) Reference() types.ImageReference { + return &is.reference +} + +// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer +// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob() +// to read the image's layers. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (*tarballImageSource) LayerInfosForCopy(context.Context, *digest.Digest) ([]types.BlobInfo, error) { + return nil, nil +} diff --git a/vendor/github.com/containers/image/v5/tarball/tarball_transport.go b/vendor/github.com/containers/image/v5/tarball/tarball_transport.go new file mode 100644 index 00000000000..d407c657fae --- /dev/null +++ b/vendor/github.com/containers/image/v5/tarball/tarball_transport.go @@ -0,0 +1,75 @@ +package tarball + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "strings" + + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" +) + +const ( + transportName = "tarball" + separator = ":" +) + +var ( + // Transport implements the types.ImageTransport interface for "tarball:" images, + // which are makeshift images constructed using one or more possibly-compressed tar + // archives. + Transport = &tarballTransport{} +) + +type tarballTransport struct { +} + +func (t *tarballTransport) Name() string { + return transportName +} + +func (t *tarballTransport) ParseReference(reference string) (types.ImageReference, error) { + var stdin []byte + var err error + filenames := strings.Split(reference, separator) + for _, filename := range filenames { + if filename == "-" { + stdin, err = ioutil.ReadAll(os.Stdin) + if err != nil { + return nil, fmt.Errorf("error buffering stdin: %v", err) + } + continue + } + f, err := os.Open(filename) + if err != nil { + return nil, fmt.Errorf("error opening %q: %v", filename, err) + } + f.Close() + } + return NewReference(filenames, stdin) +} + +// NewReference creates a new "tarball:" reference for the listed fileNames. +// If any of the fileNames is "-", the contents of stdin are used instead. +func NewReference(fileNames []string, stdin []byte) (types.ImageReference, error) { + for _, path := range fileNames { + if strings.Contains(path, separator) { + return nil, fmt.Errorf("Invalid path %q: paths including the separator %q are not supported", path, separator) + } + } + return &tarballReference{ + filenames: fileNames, + stdin: stdin, + }, nil +} + +func (t *tarballTransport) ValidatePolicyConfigurationScope(scope string) error { + // See the explanation in daemonReference.PolicyConfigurationIdentity. + return errors.New(`tarball: does not support any scopes except the default "" one`) +} + +func init() { + transports.Register(Transport) +} diff --git a/vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go b/vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go new file mode 100644 index 00000000000..0bae8b2599d --- /dev/null +++ b/vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go @@ -0,0 +1,48 @@ +package alltransports + +import ( + "strings" + + // register all known transports + // NOTE: Make sure docs/containers-policy.json.5.md is updated when adding or updating + // a transport. + _ "github.com/containers/image/v5/directory" + _ "github.com/containers/image/v5/docker" + _ "github.com/containers/image/v5/docker/archive" + _ "github.com/containers/image/v5/oci/archive" + _ "github.com/containers/image/v5/oci/layout" + _ "github.com/containers/image/v5/openshift" + _ "github.com/containers/image/v5/sif" + _ "github.com/containers/image/v5/tarball" + + // The ostree transport is registered by ostree*.go + // The storage transport is registered by storage*.go + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" + "github.com/pkg/errors" +) + +// ParseImageName converts a URL-like image name to a types.ImageReference. +func ParseImageName(imgName string) (types.ImageReference, error) { + // Keep this in sync with TransportFromImageName! + parts := strings.SplitN(imgName, ":", 2) + if len(parts) != 2 { + return nil, errors.Errorf(`Invalid image name "%s", expected colon-separated transport:reference`, imgName) + } + transport := transports.Get(parts[0]) + if transport == nil { + return nil, errors.Errorf(`Invalid image name "%s", unknown transport "%s"`, imgName, parts[0]) + } + return transport.ParseReference(parts[1]) +} + +// TransportFromImageName converts an URL-like name to a types.ImageTransport or nil when +// the transport is unknown or when the input is invalid. +func TransportFromImageName(imageName string) types.ImageTransport { + // Keep this in sync with ParseImageName! + parts := strings.SplitN(imageName, ":", 2) + if len(parts) == 2 { + return transports.Get(parts[0]) + } + return nil +} diff --git a/vendor/github.com/containers/image/v5/transports/alltransports/docker_daemon.go b/vendor/github.com/containers/image/v5/transports/alltransports/docker_daemon.go new file mode 100644 index 00000000000..ffac6e0b8a3 --- /dev/null +++ b/vendor/github.com/containers/image/v5/transports/alltransports/docker_daemon.go @@ -0,0 +1,9 @@ +//go:build !containers_image_docker_daemon_stub +// +build !containers_image_docker_daemon_stub + +package alltransports + +import ( + // Register the docker-daemon transport + _ "github.com/containers/image/v5/docker/daemon" +) diff --git a/vendor/github.com/containers/image/v5/transports/alltransports/docker_daemon_stub.go b/vendor/github.com/containers/image/v5/transports/alltransports/docker_daemon_stub.go new file mode 100644 index 00000000000..ddc347bf35d --- /dev/null +++ b/vendor/github.com/containers/image/v5/transports/alltransports/docker_daemon_stub.go @@ -0,0 +1,10 @@ +//go:build containers_image_docker_daemon_stub +// +build containers_image_docker_daemon_stub + +package alltransports + +import "github.com/containers/image/v5/transports" + +func init() { + transports.Register(transports.NewStubTransport("docker-daemon")) +} diff --git a/vendor/github.com/containers/image/v5/transports/alltransports/ostree.go b/vendor/github.com/containers/image/v5/transports/alltransports/ostree.go new file mode 100644 index 00000000000..2340702bdc5 --- /dev/null +++ b/vendor/github.com/containers/image/v5/transports/alltransports/ostree.go @@ -0,0 +1,9 @@ +//go:build containers_image_ostree && linux +// +build containers_image_ostree,linux + +package alltransports + +import ( + // Register the ostree transport + _ "github.com/containers/image/v5/ostree" +) diff --git a/vendor/github.com/containers/image/v5/transports/alltransports/ostree_stub.go b/vendor/github.com/containers/image/v5/transports/alltransports/ostree_stub.go new file mode 100644 index 00000000000..8c4175188f0 --- /dev/null +++ b/vendor/github.com/containers/image/v5/transports/alltransports/ostree_stub.go @@ -0,0 +1,10 @@ +//go:build !containers_image_ostree || !linux +// +build !containers_image_ostree !linux + +package alltransports + +import "github.com/containers/image/v5/transports" + +func init() { + transports.Register(transports.NewStubTransport("ostree")) +} diff --git a/vendor/github.com/containers/image/v5/transports/alltransports/storage.go b/vendor/github.com/containers/image/v5/transports/alltransports/storage.go new file mode 100644 index 00000000000..1e399cdb024 --- /dev/null +++ b/vendor/github.com/containers/image/v5/transports/alltransports/storage.go @@ -0,0 +1,9 @@ +//go:build !containers_image_storage_stub +// +build !containers_image_storage_stub + +package alltransports + +import ( + // Register the storage transport + _ "github.com/containers/image/v5/storage" +) diff --git a/vendor/github.com/containers/image/v5/transports/alltransports/storage_stub.go b/vendor/github.com/containers/image/v5/transports/alltransports/storage_stub.go new file mode 100644 index 00000000000..30802661f17 --- /dev/null +++ b/vendor/github.com/containers/image/v5/transports/alltransports/storage_stub.go @@ -0,0 +1,10 @@ +//go:build containers_image_storage_stub +// +build containers_image_storage_stub + +package alltransports + +import "github.com/containers/image/v5/transports" + +func init() { + transports.Register(transports.NewStubTransport("containers-storage")) +} diff --git a/vendor/github.com/containers/image/v5/transports/stub.go b/vendor/github.com/containers/image/v5/transports/stub.go new file mode 100644 index 00000000000..2c186a90cca --- /dev/null +++ b/vendor/github.com/containers/image/v5/transports/stub.go @@ -0,0 +1,36 @@ +package transports + +import ( + "fmt" + + "github.com/containers/image/v5/types" +) + +// stubTransport is an implementation of types.ImageTransport which has a name, but rejects any references with “the transport $name: is not supported in this build”. +type stubTransport string + +// NewStubTransport returns an implementation of types.ImageTransport which has a name, but rejects any references with “the transport $name: is not supported in this build”. +func NewStubTransport(name string) types.ImageTransport { + return stubTransport(name) +} + +// Name returns the name of the transport, which must be unique among other transports. +func (s stubTransport) Name() string { + return string(s) +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. +func (s stubTransport) ParseReference(reference string) (types.ImageReference, error) { + return nil, fmt.Errorf(`The transport "%s:" is not supported in this build`, string(s)) +} + +// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys +// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). +// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. +// scope passed to this function will not be "", that value is always allowed. +func (s stubTransport) ValidatePolicyConfigurationScope(scope string) error { + // Allowing any reference in here allows tools with some transports stubbed-out to still + // use signature verification policies which refer to these stubbed-out transports. + // See also the treatment of unknown transports in policyTransportScopesWithTransport.UnmarshalJSON . + return nil +} diff --git a/vendor/github.com/containers/image/v5/transports/transports.go b/vendor/github.com/containers/image/v5/transports/transports.go new file mode 100644 index 00000000000..46ee3710fcb --- /dev/null +++ b/vendor/github.com/containers/image/v5/transports/transports.go @@ -0,0 +1,90 @@ +package transports + +import ( + "fmt" + "sort" + "sync" + + "github.com/containers/image/v5/types" +) + +// knownTransports is a registry of known ImageTransport instances. +type knownTransports struct { + transports map[string]types.ImageTransport + mu sync.Mutex +} + +func (kt *knownTransports) Get(k string) types.ImageTransport { + kt.mu.Lock() + t := kt.transports[k] + kt.mu.Unlock() + return t +} + +func (kt *knownTransports) Remove(k string) { + kt.mu.Lock() + delete(kt.transports, k) + kt.mu.Unlock() +} + +func (kt *knownTransports) Add(t types.ImageTransport) { + kt.mu.Lock() + defer kt.mu.Unlock() + name := t.Name() + if t := kt.transports[name]; t != nil { + panic(fmt.Sprintf("Duplicate image transport name %s", name)) + } + kt.transports[name] = t +} + +var kt *knownTransports + +func init() { + kt = &knownTransports{ + transports: make(map[string]types.ImageTransport), + } +} + +// Get returns the transport specified by name or nil when unavailable. +func Get(name string) types.ImageTransport { + return kt.Get(name) +} + +// Delete deletes a transport from the registered transports. +func Delete(name string) { + kt.Remove(name) +} + +// Register registers a transport. +func Register(t types.ImageTransport) { + kt.Add(t) +} + +// ImageName converts a types.ImageReference into an URL-like image name, which MUST be such that +// ParseImageName(ImageName(reference)) returns an equivalent reference. +// +// This is the generally recommended way to refer to images in the UI. +// +// NOTE: The returned string is not promised to be equal to the original input to ParseImageName; +// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. +func ImageName(ref types.ImageReference) string { + return ref.Transport().Name() + ":" + ref.StringWithinTransport() +} + +// ListNames returns a list of non deprecated transport names. +// Deprecated transports can be used, but are not presented to users. +func ListNames() []string { + kt.mu.Lock() + defer kt.mu.Unlock() + deprecated := map[string]bool{ + "atomic": true, + } + var names []string + for _, transport := range kt.transports { + if !deprecated[transport.Name()] { + names = append(names, transport.Name()) + } + } + sort.Strings(names) + return names +} diff --git a/vendor/github.com/containers/image/v5/types/types.go b/vendor/github.com/containers/image/v5/types/types.go new file mode 100644 index 00000000000..dcff8caf76b --- /dev/null +++ b/vendor/github.com/containers/image/v5/types/types.go @@ -0,0 +1,694 @@ +package types + +import ( + "context" + "io" + "time" + + "github.com/containers/image/v5/docker/reference" + compression "github.com/containers/image/v5/pkg/compression/types" + digest "github.com/opencontainers/go-digest" + v1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ImageTransport is a top-level namespace for ways to to store/load an image. +// It should generally correspond to ImageSource/ImageDestination implementations. +// +// Note that ImageTransport is based on "ways the users refer to image storage", not necessarily on the underlying physical transport. +// For example, all Docker References would be used within a single "docker" transport, regardless of whether the images are pulled over HTTP or HTTPS +// (or, even, IPv4 or IPv6). +// +// OTOH all images using the same transport should (apart from versions of the image format), be interoperable. +// For example, several different ImageTransport implementations may be based on local filesystem paths, +// but using completely different formats for the contents of that path (a single tar file, a directory containing tarballs, a fully expanded container filesystem, ...) +// +// See also transports.KnownTransports. +type ImageTransport interface { + // Name returns the name of the transport, which must be unique among other transports. + Name() string + // ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. + ParseReference(reference string) (ImageReference, error) + // ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys + // (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). + // It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. + // scope passed to this function will not be "", that value is always allowed. + ValidatePolicyConfigurationScope(scope string) error +} + +// ImageReference is an abstracted way to refer to an image location, namespaced within an ImageTransport. +// +// The object should preferably be immutable after creation, with any parsing/state-dependent resolving happening +// within an ImageTransport.ParseReference() or equivalent API creating the reference object. +// That's also why the various identification/formatting methods of this type do not support returning errors. +// +// WARNING: While this design freezes the content of the reference within this process, it can not freeze the outside +// world: paths may be replaced by symlinks elsewhere, HTTP APIs may start returning different results, and so on. +type ImageReference interface { + Transport() ImageTransport + // StringWithinTransport returns a string representation of the reference, which MUST be such that + // reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. + // NOTE: The returned string is not promised to be equal to the original input to ParseReference; + // e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. + // WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix; + // instead, see transports.ImageName(). + StringWithinTransport() string + + // DockerReference returns a Docker reference associated with this reference + // (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, + // not e.g. after redirect or alias processing), or nil if unknown/not applicable. + DockerReference() reference.Named + + // PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. + // This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; + // The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical + // (i.e. various references with exactly the same semantics should return the same configuration identity) + // It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but + // not required/guaranteed that it will be a valid input to Transport().ParseReference(). + // Returns "" if configuration identities for these references are not supported. + PolicyConfigurationIdentity() string + + // PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search + // for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed + // in order, terminating on first match, and an implicit "" is always checked at the end. + // It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), + // and each following element to be a prefix of the element preceding it. + PolicyConfigurationNamespaces() []string + + // NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. + // The caller must call .Close() on the returned ImageCloser. + // NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, + // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. + // WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. + NewImage(ctx context.Context, sys *SystemContext) (ImageCloser, error) + // NewImageSource returns a types.ImageSource for this reference. + // The caller must call .Close() on the returned ImageSource. + NewImageSource(ctx context.Context, sys *SystemContext) (ImageSource, error) + // NewImageDestination returns a types.ImageDestination for this reference. + // The caller must call .Close() on the returned ImageDestination. + NewImageDestination(ctx context.Context, sys *SystemContext) (ImageDestination, error) + + // DeleteImage deletes the named image from the registry, if supported. + DeleteImage(ctx context.Context, sys *SystemContext) error +} + +// LayerCompression indicates if layers must be compressed, decompressed or preserved +type LayerCompression int + +const ( + // PreserveOriginal indicates the layer must be preserved, ie + // no compression or decompression. + PreserveOriginal LayerCompression = iota + // Decompress indicates the layer must be decompressed + Decompress + // Compress indicates the layer must be compressed + Compress +) + +// LayerCrypto indicates if layers have been encrypted or decrypted or none +type LayerCrypto int + +const ( + // PreserveOriginalCrypto indicates the layer must be preserved, ie + // no encryption/decryption + PreserveOriginalCrypto LayerCrypto = iota + // Encrypt indicates the layer is encrypted + Encrypt + // Decrypt indicates the layer is decrypted + Decrypt +) + +// BlobInfo collects known information about a blob (layer/config). +// In some situations, some fields may be unknown, in others they may be mandatory; documenting an “unknown” value here does not override that. +type BlobInfo struct { + Digest digest.Digest // "" if unknown. + Size int64 // -1 if unknown + URLs []string + Annotations map[string]string + MediaType string + // CompressionOperation is used in Image.UpdateLayerInfos to instruct + // whether the original layer's "compressed or not" should be preserved, + // possibly while changing the compression algorithm from one to another, + // or if it should be compressed or decompressed. The field defaults to + // preserve the original layer's compressedness. + // TODO: To remove together with CryptoOperation in re-design to remove + // field out out of BlobInfo. + CompressionOperation LayerCompression + // CompressionAlgorithm is used in Image.UpdateLayerInfos to set the correct + // MIME type for compressed layers (e.g., gzip or zstd). This field MUST be + // set when `CompressionOperation == Compress` and MAY be set when + // `CompressionOperation == PreserveOriginal` and the compression type is + // being changed for an already-compressed layer. + CompressionAlgorithm *compression.Algorithm + // CryptoOperation is used in Image.UpdateLayerInfos to instruct + // whether the original layer was encrypted/decrypted + // TODO: To remove together with CompressionOperation in re-design to + // remove field out out of BlobInfo. + CryptoOperation LayerCrypto +} + +// BICTransportScope encapsulates transport-dependent representation of a “scope” where blobs are or are not present. +// BlobInfocache.RecordKnownLocations / BlobInfocache.CandidateLocations record data about blobs keyed by (scope, digest). +// The scope will typically be similar to an ImageReference, or a superset of it within which blobs are reusable. +// +// NOTE: The contents of this structure may be recorded in a persistent file, possibly shared across different +// tools which use different versions of the transport. Allow for reasonable backward/forward compatibility, +// at least by not failing hard when encountering unknown data. +type BICTransportScope struct { + Opaque string +} + +// BICLocationReference encapsulates transport-dependent representation of a blob location within a BICTransportScope. +// Each transport can store arbitrary data using BlobInfoCache.RecordKnownLocation, and ImageDestination.TryReusingBlob +// can look it up using BlobInfoCache.CandidateLocations. +// +// NOTE: The contents of this structure may be recorded in a persistent file, possibly shared across different +// tools which use different versions of the transport. Allow for reasonable backward/forward compatibility, +// at least by not failing hard when encountering unknown data. +type BICLocationReference struct { + Opaque string +} + +// BICReplacementCandidate is an item returned by BlobInfoCache.CandidateLocations. +type BICReplacementCandidate struct { + Digest digest.Digest + Location BICLocationReference +} + +// BlobInfoCache records data useful for reusing blobs, or substituting equivalent ones, to avoid unnecessary blob copies. +// +// It records two kinds of data: +// - Sets of corresponding digest vs. uncompressed digest ("DiffID") pairs: +// One of the two digests is known to be uncompressed, and a single uncompressed digest may correspond to more than one compressed digest. +// This allows matching compressed layer blobs to existing local uncompressed layers (to avoid unnecessary download and decompression), +// or uncompressed layer blobs to existing remote compressed layers (to avoid unnecessary compression and upload)/ +// +// It is allowed to record an (uncompressed digest, the same uncompressed digest) correspondence, to express that the digest is known +// to be uncompressed (i.e. that a conversion from schema1 does not have to decompress the blob to compute a DiffID value). +// +// This mapping is primarily maintained in generic copy.Image code, but transports may want to contribute more data points if they independently +// compress/decompress blobs for their own purposes. +// +// - Known blob locations, managed by individual transports: +// The transports call RecordKnownLocation when encountering a blob that could possibly be reused (typically in GetBlob/PutBlob/TryReusingBlob), +// recording transport-specific information that allows the transport to reuse the blob in the future; +// then, TryReusingBlob implementations can call CandidateLocations to look up previously recorded blob locations that could be reused. +// +// Each transport defines its own “scopes” within which blob reuse is possible (e.g. in, the docker/distribution case, blobs +// can be directly reused within a registry, or mounted across registries within a registry server.) +// +// None of the methods return an error indication: errors when neither reading from, nor writing to, the cache, should be fatal; +// users of the cache should just fall back to copying the blobs the usual way. +// +// The BlobInfoCache interface is deprecated. Consumers of this library should use one of the implementations provided by +// subpackages of the library's "pkg/blobinfocache" package in preference to implementing the interface on their own. +type BlobInfoCache interface { + // UncompressedDigest returns an uncompressed digest corresponding to anyDigest. + // May return anyDigest if it is known to be uncompressed. + // Returns "" if nothing is known about the digest (it may be compressed or uncompressed). + UncompressedDigest(anyDigest digest.Digest) digest.Digest + // RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed. + // It’s allowed for anyDigest == uncompressed. + // WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. + // because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. + // (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) + RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) + + // RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope, + // and can be reused given the opaque location data. + RecordKnownLocation(transport ImageTransport, scope BICTransportScope, digest digest.Digest, location BICLocationReference) + // CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused + // within the specified (transport scope) (if they still exist, which is not guaranteed). + // + // If !canSubstitute, the returned candidates will match the submitted digest exactly; if canSubstitute, + // data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same + // uncompressed digest. + CandidateLocations(transport ImageTransport, scope BICTransportScope, digest digest.Digest, canSubstitute bool) []BICReplacementCandidate +} + +// ImageSource is a service, possibly remote (= slow), to download components of a single image or a named image set (manifest list). +// This is primarily useful for copying images around; for examining their properties, Image (below) +// is usually more useful. +// Each ImageSource should eventually be closed by calling Close(). +// +// WARNING: Various methods which return an object identified by digest generally do not +// validate that the returned data actually matches that digest; this is the caller’s responsibility. +type ImageSource interface { + // Reference returns the reference used to set up this source, _as specified by the user_ + // (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. + Reference() ImageReference + // Close removes resources associated with an initialized ImageSource, if any. + Close() error + // GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). + // It may use a remote (= slow) service. + // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); + // this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). + GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) + // GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). + // The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. + // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. + GetBlob(context.Context, BlobInfo, BlobInfoCache) (io.ReadCloser, int64, error) + // HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. + HasThreadSafeGetBlob() bool + // GetSignatures returns the image's signatures. It may use a remote (= slow) service. + // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for + // (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list + // (e.g. if the source never returns manifest lists). + GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) + // LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer + // blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob() + // to read the image's layers. + // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for + // (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list + // (e.g. if the source never returns manifest lists). + // The Digest field is guaranteed to be provided; Size may be -1. + // WARNING: The list may contain duplicates, and they are semantically relevant. + LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]BlobInfo, error) +} + +// ImageDestination is a service, possibly remote (= slow), to store components of a single image. +// +// There is a specific required order for some of the calls: +// TryReusingBlob/PutBlob on the various blobs, if any, MUST be called before PutManifest (manifest references blobs, which may be created or compressed only at push time) +// PutSignatures, if called, MUST be called after PutManifest (signatures reference manifest contents) +// Finally, Commit MUST be called if the caller wants the image, as formed by the components saved above, to persist. +// +// Each ImageDestination should eventually be closed by calling Close(). +type ImageDestination interface { + // Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, + // e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. + Reference() ImageReference + // Close removes resources associated with an initialized ImageDestination, if any. + Close() error + + // SupportedManifestMIMETypes tells which manifest mime types the destination supports + // If an empty slice or nil it's returned, then any mime type can be tried to upload + SupportedManifestMIMETypes() []string + // SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. + // Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. + SupportsSignatures(ctx context.Context) error + // DesiredLayerCompression indicates the kind of compression to apply on layers + DesiredLayerCompression() LayerCompression + // AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually + // uploaded to the image destination, true otherwise. + AcceptsForeignLayerURLs() bool + // MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise. + MustMatchRuntimeOS() bool + // IgnoresEmbeddedDockerReference() returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), + // and would prefer to receive an unmodified manifest instead of one modified for the destination. + // Does not make a difference if Reference().DockerReference() is nil. + IgnoresEmbeddedDockerReference() bool + + // PutBlob writes contents of stream and returns data representing the result. + // inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. + // inputInfo.Size is the expected length of stream, if known. + // inputInfo.MediaType describes the blob format, if known. + // May update cache. + // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available + // to any other readers for download using the supplied digest. + // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. + PutBlob(ctx context.Context, stream io.Reader, inputInfo BlobInfo, cache BlobInfoCache, isConfig bool) (BlobInfo, error) + // HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. + HasThreadSafePutBlob() bool + // TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination + // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). + // info.Digest must not be empty. + // If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. + // If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may + // include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be + // reflected in the manifest that will be written. + // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. + // May use and/or update cache. + TryReusingBlob(ctx context.Context, info BlobInfo, cache BlobInfoCache, canSubstitute bool) (bool, BlobInfo, error) + // PutManifest writes manifest to the destination. + // If instanceDigest is not nil, it contains a digest of the specific manifest instance to write the manifest for + // (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. + // It is expected but not enforced that the instanceDigest, when specified, matches the digest of `manifest` as generated + // by `manifest.Digest()`. + // FIXME? This should also receive a MIME type if known, to differentiate between schema versions. + // If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), + // but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. + PutManifest(ctx context.Context, manifest []byte, instanceDigest *digest.Digest) error + // PutSignatures writes a set of signatures to the destination. + // If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for + // (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. + // MUST be called after PutManifest (signatures may reference manifest contents). + PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error + // Commit marks the process of storing the image as successful and asks for the image to be persisted. + // unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list + // if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the + // original manifest list digest, if desired. + // WARNING: This does not have any transactional semantics: + // - Uploaded data MAY be visible to others before Commit() is called + // - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) + Commit(ctx context.Context, unparsedToplevel UnparsedImage) error +} + +// ManifestTypeRejectedError is returned by ImageDestination.PutManifest if the destination is in principle available, +// refuses specifically this manifest type, but may accept a different manifest type. +type ManifestTypeRejectedError struct { // We only use a struct to allow a type assertion, without limiting the contents of the error otherwise. + Err error +} + +func (e ManifestTypeRejectedError) Error() string { + return e.Err.Error() +} + +// UnparsedImage is an Image-to-be; until it is verified and accepted, it only caries its identity and caches manifest and signature blobs. +// Thus, an UnparsedImage can be created from an ImageSource simply by fetching blobs without interpreting them, +// allowing cryptographic signature verification to happen first, before even fetching the manifest, or parsing anything else. +// This also makes the UnparsedImage→Image conversion an explicitly visible step. +// +// An UnparsedImage is a pair of (ImageSource, instance digest); it can represent either a manifest list or a single image instance. +// +// The UnparsedImage must not be used after the underlying ImageSource is Close()d. +type UnparsedImage interface { + // Reference returns the reference used to set up this source, _as specified by the user_ + // (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. + Reference() ImageReference + // Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need. + Manifest(ctx context.Context) ([]byte, string, error) + // Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need. + Signatures(ctx context.Context) ([][]byte, error) +} + +// Image is the primary API for inspecting properties of images. +// An Image is based on a pair of (ImageSource, instance digest); it can represent either a manifest list or a single image instance. +// +// The Image must not be used after the underlying ImageSource is Close()d. +type Image interface { + // Note that Reference may return nil in the return value of UpdatedImage! + UnparsedImage + // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. + // Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. + ConfigInfo() BlobInfo + // ConfigBlob returns the blob described by ConfigInfo, if ConfigInfo().Digest != ""; nil otherwise. + // The result is cached; it is OK to call this however often you need. + ConfigBlob(context.Context) ([]byte, error) + // OCIConfig returns the image configuration as per OCI v1 image-spec. Information about + // layers in the resulting configuration isn't guaranteed to be returned to due how + // old image manifests work (docker v2s1 especially). + OCIConfig(context.Context) (*v1.Image, error) + // LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). + // The Digest field is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. + // WARNING: The list may contain duplicates, and they are semantically relevant. + LayerInfos() []BlobInfo + // LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer blobsums that are listed in the image's manifest. + // The Digest field is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. + // WARNING: The list may contain duplicates, and they are semantically relevant. + LayerInfosForCopy(context.Context) ([]BlobInfo, error) + // EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. + // It returns false if the manifest does not embed a Docker reference. + // (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.) + EmbeddedDockerReferenceConflicts(ref reference.Named) bool + // Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. + Inspect(context.Context) (*ImageInspectInfo, error) + // UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. + // This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute + // (most importantly it forces us to download the full layers even if they are already present at the destination). + UpdatedImageNeedsLayerDiffIDs(options ManifestUpdateOptions) bool + // UpdatedImage returns a types.Image modified according to options. + // Everything in options.InformationOnly should be provided, other fields should be set only if a modification is desired. + // This does not change the state of the original Image object. + // The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError if + // manifests of type options.ManifestMIMEType can not include layers that are compressed + // in accordance with the CompressionOperation and CompressionAlgorithm specified in one + // or more options.LayerInfos items, though retrying with a different + // options.ManifestMIMEType or with different CompressionOperation+CompressionAlgorithm + // values might succeed. + UpdatedImage(ctx context.Context, options ManifestUpdateOptions) (Image, error) + // SupportsEncryption returns an indicator that the image supports encryption + // + // Deprecated: Initially used to determine if a manifest can be copied from a source manifest type since + // the process of updating a manifest between different manifest types was to update then convert. + // This resulted in some fields in the update being lost. This has been fixed by: https://github.com/containers/image/pull/836 + SupportsEncryption(ctx context.Context) bool + // Size returns an approximation of the amount of disk space which is consumed by the image in its current + // location. If the size is not known, -1 will be returned. + Size() (int64, error) +} + +// ImageCloser is an Image with a Close() method which must be called by the user. +// This is returned by ImageReference.NewImage, which transparently instantiates a types.ImageSource, +// to ensure that the ImageSource is closed. +type ImageCloser interface { + Image + // Close removes resources associated with an initialized ImageCloser. + Close() error +} + +// ManifestUpdateOptions is a way to pass named optional arguments to Image.UpdatedManifest +type ManifestUpdateOptions struct { + LayerInfos []BlobInfo // Complete BlobInfos (size+digest+urls+annotations) which should replace the originals, in order (the root layer first, and then successive layered layers). BlobInfos' MediaType fields are ignored. + EmbeddedDockerReference reference.Named + ManifestMIMEType string + // The values below are NOT requests to modify the image; they provide optional context which may or may not be used. + InformationOnly ManifestUpdateInformation +} + +// ManifestUpdateInformation is a component of ManifestUpdateOptions, named here +// only to make writing struct literals possible. +type ManifestUpdateInformation struct { + Destination ImageDestination // and yes, UpdatedManifest may write to Destination (see the schema2 → schema1 conversion logic in image/docker_schema2.go) + LayerInfos []BlobInfo // Complete BlobInfos (size+digest) which have been uploaded, in order (the root layer first, and then successive layered layers) + LayerDiffIDs []digest.Digest // Digest values for the _uncompressed_ contents of the blobs which have been uploaded, in the same order. +} + +// ImageInspectInfo is a set of metadata describing Docker images, primarily their manifest and configuration. +// The Tag field is a legacy field which is here just for the Docker v2s1 manifest. It won't be supported +// for other manifest types. +type ImageInspectInfo struct { + Tag string + Created *time.Time + DockerVersion string + Labels map[string]string + Architecture string + Variant string + Os string + Layers []string + Env []string +} + +// DockerAuthConfig contains authorization information for connecting to a registry. +// the value of Username and Password can be empty for accessing the registry anonymously +type DockerAuthConfig struct { + Username string + Password string + // IdentityToken can be used as an refresh_token in place of username and + // password to obtain the bearer/access token in oauth2 flow. If identity + // token is set, password should not be set. + // Ref: https://docs.docker.com/registry/spec/auth/oauth/ + IdentityToken string +} + +// OptionalBool is a boolean with an additional undefined value, which is meant +// to be used in the context of user input to distinguish between a +// user-specified value and a default value. +type OptionalBool byte + +const ( + // OptionalBoolUndefined indicates that the OptionalBoolean hasn't been written. + OptionalBoolUndefined OptionalBool = iota + // OptionalBoolTrue represents the boolean true. + OptionalBoolTrue + // OptionalBoolFalse represents the boolean false. + OptionalBoolFalse +) + +// NewOptionalBool converts the input bool into either OptionalBoolTrue or +// OptionalBoolFalse. The function is meant to avoid boilerplate code of users. +func NewOptionalBool(b bool) OptionalBool { + o := OptionalBoolFalse + if b { + o = OptionalBoolTrue + } + return o +} + +// ShortNameMode defines the mode of short-name resolution. +// +// The use of unqualified-search registries entails an ambiguity as it's +// unclear from which registry a given image, referenced by a short name, may +// be pulled from. +// +// The ShortNameMode type defines how short names should resolve. +type ShortNameMode int + +const ( + ShortNameModeInvalid ShortNameMode = iota + // Use all configured unqualified-search registries without prompting + // the user. + ShortNameModeDisabled + // If stdout and stdin are a TTY, prompt the user to select a configured + // unqualified-search registry. Otherwise, use all configured + // unqualified-search registries. + // + // Note that if only one unqualified-search registry is set, it will be + // used without prompting. + ShortNameModePermissive + // Always prompt the user to select a configured unqualified-search + // registry. Throw an error if stdout or stdin is not a TTY as + // prompting isn't possible. + // + // Note that if only one unqualified-search registry is set, it will be + // used without prompting. + ShortNameModeEnforcing +) + +// SystemContext allows parameterizing access to implicitly-accessed resources, +// like configuration files in /etc and users' login state in their home directory. +// Various components can share the same field only if their semantics is exactly +// the same; if in doubt, add a new field. +// It is always OK to pass nil instead of a SystemContext. +type SystemContext struct { + // If not "", prefixed to any absolute paths used by default by the library (e.g. in /etc/). + // Not used for any of the more specific path overrides available in this struct. + // Not used for any paths specified by users in config files (even if the location of the config file _was_ affected by it). + // NOTE: If this is set, environment-variable overrides of paths are ignored (to keep the semantics simple: to create an /etc replacement, just set RootForImplicitAbsolutePaths . + // and there is no need to worry about the environment.) + // NOTE: This does NOT affect paths starting by $HOME. + RootForImplicitAbsolutePaths string + + // === Global configuration overrides === + // If not "", overrides the system's default path for signature.Policy configuration. + SignaturePolicyPath string + // If not "", overrides the system's default path for registries.d (Docker signature storage configuration) + RegistriesDirPath string + // Path to the system-wide registries configuration file + SystemRegistriesConfPath string + // Path to the system-wide registries configuration directory + SystemRegistriesConfDirPath string + // Path to the user-specific short-names configuration file + UserShortNameAliasConfPath string + // If set, short-name resolution in pkg/shortnames must follow the specified mode + ShortNameMode *ShortNameMode + // If set, short names will resolve in pkg/shortnames to docker.io only, and unqualified-search registries and + // short-name aliases in registries.conf are ignored. Note that this field is only intended to help enforce + // resolving to Docker Hub in the Docker-compatible REST API of Podman; it should never be used outside this + // specific context. + PodmanOnlyShortNamesIgnoreRegistriesConfAndForceDockerHub bool + // If not "", overrides the default path for the authentication file, but only new format files + AuthFilePath string + // if not "", overrides the default path for the authentication file, but with the legacy format; + // the code currently will by default look for legacy format files like .dockercfg in the $HOME dir; + // but in addition to the home dir, openshift may mount .dockercfg files (via secret mount) + // in locations other than the home dir; openshift components should then set this field in those cases; + // this field is ignored if `AuthFilePath` is set (we favor the newer format); + // only reading of this data is supported; + LegacyFormatAuthFilePath string + // If not "", overrides the use of platform.GOARCH when choosing an image or verifying architecture match. + ArchitectureChoice string + // If not "", overrides the use of platform.GOOS when choosing an image or verifying OS match. + OSChoice string + // If not "", overrides the use of detected ARM platform variant when choosing an image or verifying variant match. + VariantChoice string + // If not "", overrides the system's default directory containing a blob info cache. + BlobInfoCacheDir string + // Additional tags when creating or copying a docker-archive. + DockerArchiveAdditionalTags []reference.NamedTagged + // If not "", overrides the temporary directory to use for storing big files + BigFilesTemporaryDir string + + // === OCI.Transport overrides === + // If not "", a directory containing a CA certificate (ending with ".crt"), + // a client certificate (ending with ".cert") and a client certificate key + // (ending with ".key") used when downloading OCI image layers. + OCICertPath string + // Allow downloading OCI image layers over HTTP, or HTTPS with failed TLS verification. Note that this does not affect other TLS connections. + OCIInsecureSkipTLSVerify bool + // If not "", use a shared directory for storing blobs rather than within OCI layouts + OCISharedBlobDirPath string + // Allow UnCompress image layer for OCI image layer + OCIAcceptUncompressedLayers bool + + // === docker.Transport overrides === + // If not "", a directory containing a CA certificate (ending with ".crt"), + // a client certificate (ending with ".cert") and a client certificate key + // (ending with ".key") used when talking to a container registry. + DockerCertPath string + // If not "", overrides the system’s default path for a directory containing host[:port] subdirectories with the same structure as DockerCertPath above. + // Ignored if DockerCertPath is non-empty. + DockerPerHostCertDirPath string + // Allow contacting container registries over HTTP, or HTTPS with failed TLS verification. Note that this does not affect other TLS connections. + DockerInsecureSkipTLSVerify OptionalBool + // if nil, the library tries to parse ~/.docker/config.json to retrieve credentials + // Ignored if DockerBearerRegistryToken is non-empty. + DockerAuthConfig *DockerAuthConfig + // if not "", the library uses this registry token to authenticate to the registry + DockerBearerRegistryToken string + // if not "", an User-Agent header is added to each request when contacting a registry. + DockerRegistryUserAgent string + // if true, a V1 ping attempt isn't done to give users a better error. Default is false. + // Note that this field is used mainly to integrate containers/image into projectatomic/docker + // in order to not break any existing docker's integration tests. + DockerDisableV1Ping bool + // If true, dockerImageDestination.SupportedManifestMIMETypes will omit the Schema1 media types from the supported list + DockerDisableDestSchema1MIMETypes bool + // If true, the physical pull source of docker transport images logged as info level + DockerLogMirrorChoice bool + // Directory to use for OSTree temporary files + OSTreeTmpDirPath string + // If true, all blobs will have precomputed digests to ensure layers are not uploaded that already exist on the registry. + // Note that this requires writing blobs to temporary files, and takes more time than the default behavior, + // when the digest for a blob is unknown. + DockerRegistryPushPrecomputeDigests bool + + // === docker/daemon.Transport overrides === + // A directory containing a CA certificate (ending with ".crt"), + // a client certificate (ending with ".cert") and a client certificate key + // (ending with ".key") used when talking to a Docker daemon. + DockerDaemonCertPath string + // The hostname or IP to the Docker daemon. If not set (aka ""), client.DefaultDockerHost is assumed. + DockerDaemonHost string + // Used to skip TLS verification, off by default. To take effect DockerDaemonCertPath needs to be specified as well. + DockerDaemonInsecureSkipTLSVerify bool + + // === dir.Transport overrides === + // DirForceCompress compresses the image layers if set to true + DirForceCompress bool + // DirForceDecompress decompresses the image layers if set to true + DirForceDecompress bool + + // CompressionFormat is the format to use for the compression of the blobs + CompressionFormat *compression.Algorithm + // CompressionLevel specifies what compression level is used + CompressionLevel *int +} + +// ProgressEvent is the type of events a progress reader can produce +// Warning: new event types may be added any time. +type ProgressEvent uint + +const ( + // ProgressEventNewArtifact will be fired on progress reader setup + ProgressEventNewArtifact ProgressEvent = iota + + // ProgressEventRead indicates that the artifact download is currently in + // progress + ProgressEventRead + + // ProgressEventDone is fired when the data transfer has been finished for + // the specific artifact + ProgressEventDone + + // ProgressEventSkipped is fired when the artifact has been skipped because + // its already available at the destination + ProgressEventSkipped +) + +// ProgressProperties is used to pass information from the copy code to a monitor which +// can use the real-time information to produce output or react to changes. +type ProgressProperties struct { + // The event indicating what + Event ProgressEvent + + // The artifact which has been updated in this interval + Artifact BlobInfo + + // The currently downloaded size in bytes + // Increases from 0 to the final Artifact size + Offset uint64 + + // The additional offset which has been downloaded inside the last update + // interval. Will be reset after each ProgressEventRead event. + OffsetUpdate uint64 +} diff --git a/vendor/github.com/containers/image/v5/version/version.go b/vendor/github.com/containers/image/v5/version/version.go new file mode 100644 index 00000000000..641e757f03d --- /dev/null +++ b/vendor/github.com/containers/image/v5/version/version.go @@ -0,0 +1,18 @@ +package version + +import "fmt" + +const ( + // VersionMajor is for an API incompatible changes + VersionMajor = 5 + // VersionMinor is for functionality in a backwards-compatible manner + VersionMinor = 19 + // VersionPatch is for backwards-compatible bug fixes + VersionPatch = 3 + + // VersionDev indicates development branch. Releases will be empty string. + VersionDev = "" +) + +// Version is the specification version that the package types support. +var Version = fmt.Sprintf("%d.%d.%d%s", VersionMajor, VersionMinor, VersionPatch, VersionDev) diff --git a/vendor/github.com/containers/libtrust/CONTRIBUTING.md b/vendor/github.com/containers/libtrust/CONTRIBUTING.md new file mode 100644 index 00000000000..05be0f8ab3d --- /dev/null +++ b/vendor/github.com/containers/libtrust/CONTRIBUTING.md @@ -0,0 +1,13 @@ +# Contributing to libtrust + +Want to hack on libtrust? Awesome! Here are instructions to get you +started. + +libtrust is a part of the [Docker](https://www.docker.com) project, and follows +the same rules and principles. If you're already familiar with the way +Docker does things, you'll feel right at home. + +Otherwise, go read +[Docker's contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md). + +Happy hacking! diff --git a/vendor/github.com/containers/libtrust/LICENSE b/vendor/github.com/containers/libtrust/LICENSE new file mode 100644 index 00000000000..27448585ad4 --- /dev/null +++ b/vendor/github.com/containers/libtrust/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2014 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containers/libtrust/MAINTAINERS b/vendor/github.com/containers/libtrust/MAINTAINERS new file mode 100644 index 00000000000..9768175feb4 --- /dev/null +++ b/vendor/github.com/containers/libtrust/MAINTAINERS @@ -0,0 +1,3 @@ +Solomon Hykes +Josh Hawn (github: jlhawn) +Derek McGowan (github: dmcgowan) diff --git a/vendor/github.com/containers/libtrust/README.md b/vendor/github.com/containers/libtrust/README.md new file mode 100644 index 00000000000..dcffb31ae4a --- /dev/null +++ b/vendor/github.com/containers/libtrust/README.md @@ -0,0 +1,22 @@ +# libtrust + +> **WARNING** this library is no longer actively developed, and will be integrated +> in the [docker/distribution][https://www.github.com/docker/distribution] +> repository in future. + +Libtrust is library for managing authentication and authorization using public key cryptography. + +Authentication is handled using the identity attached to the public key. +Libtrust provides multiple methods to prove possession of the private key associated with an identity. + - TLS x509 certificates + - Signature verification + - Key Challenge + +Authorization and access control is managed through a distributed trust graph. +Trust servers are used as the authorities of the trust graph and allow caching portions of the graph for faster access. + +## Copyright and license + +Code and documentation copyright 2014 Docker, inc. Code released under the Apache 2.0 license. +Docs released under Creative commons. + diff --git a/vendor/github.com/containers/libtrust/certificates.go b/vendor/github.com/containers/libtrust/certificates.go new file mode 100644 index 00000000000..3dcca33cb18 --- /dev/null +++ b/vendor/github.com/containers/libtrust/certificates.go @@ -0,0 +1,175 @@ +package libtrust + +import ( + "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "fmt" + "io/ioutil" + "math/big" + "net" + "time" +) + +type certTemplateInfo struct { + commonName string + domains []string + ipAddresses []net.IP + isCA bool + clientAuth bool + serverAuth bool +} + +func generateCertTemplate(info *certTemplateInfo) *x509.Certificate { + // Generate a certificate template which is valid from the past week to + // 10 years from now. The usage of the certificate depends on the + // specified fields in the given certTempInfo object. + var ( + keyUsage x509.KeyUsage + extKeyUsage []x509.ExtKeyUsage + ) + + if info.isCA { + keyUsage = x509.KeyUsageCertSign + } + + if info.clientAuth { + extKeyUsage = append(extKeyUsage, x509.ExtKeyUsageClientAuth) + } + + if info.serverAuth { + extKeyUsage = append(extKeyUsage, x509.ExtKeyUsageServerAuth) + } + + return &x509.Certificate{ + SerialNumber: big.NewInt(0), + Subject: pkix.Name{ + CommonName: info.commonName, + }, + NotBefore: time.Now().Add(-time.Hour * 24 * 7), + NotAfter: time.Now().Add(time.Hour * 24 * 365 * 10), + DNSNames: info.domains, + IPAddresses: info.ipAddresses, + IsCA: info.isCA, + KeyUsage: keyUsage, + ExtKeyUsage: extKeyUsage, + BasicConstraintsValid: info.isCA, + } +} + +func generateCert(pub PublicKey, priv PrivateKey, subInfo, issInfo *certTemplateInfo) (cert *x509.Certificate, err error) { + pubCertTemplate := generateCertTemplate(subInfo) + privCertTemplate := generateCertTemplate(issInfo) + + certDER, err := x509.CreateCertificate( + rand.Reader, pubCertTemplate, privCertTemplate, + pub.CryptoPublicKey(), priv.CryptoPrivateKey(), + ) + if err != nil { + return nil, fmt.Errorf("failed to create certificate: %s", err) + } + + cert, err = x509.ParseCertificate(certDER) + if err != nil { + return nil, fmt.Errorf("failed to parse certificate: %s", err) + } + + return +} + +// GenerateSelfSignedServerCert creates a self-signed certificate for the +// given key which is to be used for TLS servers with the given domains and +// IP addresses. +func GenerateSelfSignedServerCert(key PrivateKey, domains []string, ipAddresses []net.IP) (*x509.Certificate, error) { + info := &certTemplateInfo{ + commonName: key.KeyID(), + domains: domains, + ipAddresses: ipAddresses, + serverAuth: true, + } + + return generateCert(key.PublicKey(), key, info, info) +} + +// GenerateSelfSignedClientCert creates a self-signed certificate for the +// given key which is to be used for TLS clients. +func GenerateSelfSignedClientCert(key PrivateKey) (*x509.Certificate, error) { + info := &certTemplateInfo{ + commonName: key.KeyID(), + clientAuth: true, + } + + return generateCert(key.PublicKey(), key, info, info) +} + +// GenerateCACert creates a certificate which can be used as a trusted +// certificate authority. +func GenerateCACert(signer PrivateKey, trustedKey PublicKey) (*x509.Certificate, error) { + subjectInfo := &certTemplateInfo{ + commonName: trustedKey.KeyID(), + isCA: true, + } + issuerInfo := &certTemplateInfo{ + commonName: signer.KeyID(), + } + + return generateCert(trustedKey, signer, subjectInfo, issuerInfo) +} + +// GenerateCACertPool creates a certificate authority pool to be used for a +// TLS configuration. Any self-signed certificates issued by the specified +// trusted keys will be verified during a TLS handshake +func GenerateCACertPool(signer PrivateKey, trustedKeys []PublicKey) (*x509.CertPool, error) { + certPool := x509.NewCertPool() + + for _, trustedKey := range trustedKeys { + cert, err := GenerateCACert(signer, trustedKey) + if err != nil { + return nil, fmt.Errorf("failed to generate CA certificate: %s", err) + } + + certPool.AddCert(cert) + } + + return certPool, nil +} + +// LoadCertificateBundle loads certificates from the given file. The file should be pem encoded +// containing one or more certificates. The expected pem type is "CERTIFICATE". +func LoadCertificateBundle(filename string) ([]*x509.Certificate, error) { + b, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + certificates := []*x509.Certificate{} + var block *pem.Block + block, b = pem.Decode(b) + for ; block != nil; block, b = pem.Decode(b) { + if block.Type == "CERTIFICATE" { + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, err + } + certificates = append(certificates, cert) + } else { + return nil, fmt.Errorf("invalid pem block type: %s", block.Type) + } + } + + return certificates, nil +} + +// LoadCertificatePool loads a CA pool from the given file. The file should be pem encoded +// containing one or more certificates. The expected pem type is "CERTIFICATE". +func LoadCertificatePool(filename string) (*x509.CertPool, error) { + certs, err := LoadCertificateBundle(filename) + if err != nil { + return nil, err + } + pool := x509.NewCertPool() + for _, cert := range certs { + pool.AddCert(cert) + } + return pool, nil +} diff --git a/vendor/github.com/containers/libtrust/doc.go b/vendor/github.com/containers/libtrust/doc.go new file mode 100644 index 00000000000..ec5d2159c11 --- /dev/null +++ b/vendor/github.com/containers/libtrust/doc.go @@ -0,0 +1,9 @@ +/* +Package libtrust provides an interface for managing authentication and +authorization using public key cryptography. Authentication is handled +using the identity attached to the public key and verified through TLS +x509 certificates, a key challenge, or signature. Authorization and +access control is managed through a trust graph distributed between +both remote trust servers and locally cached and managed data. +*/ +package libtrust diff --git a/vendor/github.com/containers/libtrust/ec_key.go b/vendor/github.com/containers/libtrust/ec_key.go new file mode 100644 index 00000000000..0ee1b9110a8 --- /dev/null +++ b/vendor/github.com/containers/libtrust/ec_key.go @@ -0,0 +1,422 @@ +package libtrust + +import ( + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/x509" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io" + "math/big" +) + +/* + * EC DSA PUBLIC KEY + */ + +// ecPublicKey implements a libtrust.PublicKey using elliptic curve digital +// signature algorithms. +type ecPublicKey struct { + *ecdsa.PublicKey + curveName string + signatureAlgorithm *signatureAlgorithm + extended map[string]interface{} +} + +func fromECPublicKey(cryptoPublicKey *ecdsa.PublicKey) (*ecPublicKey, error) { + curve := cryptoPublicKey.Curve + + switch { + case curve == elliptic.P256(): + return &ecPublicKey{cryptoPublicKey, "P-256", es256, map[string]interface{}{}}, nil + case curve == elliptic.P384(): + return &ecPublicKey{cryptoPublicKey, "P-384", es384, map[string]interface{}{}}, nil + case curve == elliptic.P521(): + return &ecPublicKey{cryptoPublicKey, "P-521", es512, map[string]interface{}{}}, nil + default: + return nil, errors.New("unsupported elliptic curve") + } +} + +// KeyType returns the key type for elliptic curve keys, i.e., "EC". +func (k *ecPublicKey) KeyType() string { + return "EC" +} + +// CurveName returns the elliptic curve identifier. +// Possible values are "P-256", "P-384", and "P-521". +func (k *ecPublicKey) CurveName() string { + return k.curveName +} + +// KeyID returns a distinct identifier which is unique to this Public Key. +func (k *ecPublicKey) KeyID() string { + return keyIDFromCryptoKey(k) +} + +func (k *ecPublicKey) String() string { + return fmt.Sprintf("EC Public Key <%s>", k.KeyID()) +} + +// Verify verifyies the signature of the data in the io.Reader using this +// PublicKey. The alg parameter should identify the digital signature +// algorithm which was used to produce the signature and should be supported +// by this public key. Returns a nil error if the signature is valid. +func (k *ecPublicKey) Verify(data io.Reader, alg string, signature []byte) error { + // For EC keys there is only one supported signature algorithm depending + // on the curve parameters. + if k.signatureAlgorithm.HeaderParam() != alg { + return fmt.Errorf("unable to verify signature: EC Public Key with curve %q does not support signature algorithm %q", k.curveName, alg) + } + + // signature is the concatenation of (r, s), base64Url encoded. + sigLength := len(signature) + expectedOctetLength := 2 * ((k.Params().BitSize + 7) >> 3) + if sigLength != expectedOctetLength { + return fmt.Errorf("signature length is %d octets long, should be %d", sigLength, expectedOctetLength) + } + + rBytes, sBytes := signature[:sigLength/2], signature[sigLength/2:] + r := new(big.Int).SetBytes(rBytes) + s := new(big.Int).SetBytes(sBytes) + + hasher := k.signatureAlgorithm.HashID().New() + _, err := io.Copy(hasher, data) + if err != nil { + return fmt.Errorf("error reading data to sign: %s", err) + } + hash := hasher.Sum(nil) + + if !ecdsa.Verify(k.PublicKey, hash, r, s) { + return errors.New("invalid signature") + } + + return nil +} + +// CryptoPublicKey returns the internal object which can be used as a +// crypto.PublicKey for use with other standard library operations. The type +// is either *rsa.PublicKey or *ecdsa.PublicKey +func (k *ecPublicKey) CryptoPublicKey() crypto.PublicKey { + return k.PublicKey +} + +func (k *ecPublicKey) toMap() map[string]interface{} { + jwk := make(map[string]interface{}) + for k, v := range k.extended { + jwk[k] = v + } + jwk["kty"] = k.KeyType() + jwk["kid"] = k.KeyID() + jwk["crv"] = k.CurveName() + + xBytes := k.X.Bytes() + yBytes := k.Y.Bytes() + octetLength := (k.Params().BitSize + 7) >> 3 + // MUST include leading zeros in the output so that x, y are each + // *octetLength* bytes long. + xBuf := make([]byte, octetLength-len(xBytes), octetLength) + yBuf := make([]byte, octetLength-len(yBytes), octetLength) + xBuf = append(xBuf, xBytes...) + yBuf = append(yBuf, yBytes...) + + jwk["x"] = joseBase64UrlEncode(xBuf) + jwk["y"] = joseBase64UrlEncode(yBuf) + + return jwk +} + +// MarshalJSON serializes this Public Key using the JWK JSON serialization format for +// elliptic curve keys. +func (k *ecPublicKey) MarshalJSON() (data []byte, err error) { + return json.Marshal(k.toMap()) +} + +// PEMBlock serializes this Public Key to DER-encoded PKIX format. +func (k *ecPublicKey) PEMBlock() (*pem.Block, error) { + derBytes, err := x509.MarshalPKIXPublicKey(k.PublicKey) + if err != nil { + return nil, fmt.Errorf("unable to serialize EC PublicKey to DER-encoded PKIX format: %s", err) + } + k.extended["kid"] = k.KeyID() // For display purposes. + return createPemBlock("PUBLIC KEY", derBytes, k.extended) +} + +func (k *ecPublicKey) AddExtendedField(field string, value interface{}) { + k.extended[field] = value +} + +func (k *ecPublicKey) GetExtendedField(field string) interface{} { + v, ok := k.extended[field] + if !ok { + return nil + } + return v +} + +func ecPublicKeyFromMap(jwk map[string]interface{}) (*ecPublicKey, error) { + // JWK key type (kty) has already been determined to be "EC". + // Need to extract 'crv', 'x', 'y', and 'kid' and check for + // consistency. + + // Get the curve identifier value. + crv, err := stringFromMap(jwk, "crv") + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key curve identifier: %s", err) + } + + var ( + curve elliptic.Curve + sigAlg *signatureAlgorithm + ) + + switch { + case crv == "P-256": + curve = elliptic.P256() + sigAlg = es256 + case crv == "P-384": + curve = elliptic.P384() + sigAlg = es384 + case crv == "P-521": + curve = elliptic.P521() + sigAlg = es512 + default: + return nil, fmt.Errorf("JWK EC Public Key curve identifier not supported: %q\n", crv) + } + + // Get the X and Y coordinates for the public key point. + xB64Url, err := stringFromMap(jwk, "x") + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key x-coordinate: %s", err) + } + x, err := parseECCoordinate(xB64Url, curve) + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key x-coordinate: %s", err) + } + + yB64Url, err := stringFromMap(jwk, "y") + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key y-coordinate: %s", err) + } + y, err := parseECCoordinate(yB64Url, curve) + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key y-coordinate: %s", err) + } + + key := &ecPublicKey{ + PublicKey: &ecdsa.PublicKey{Curve: curve, X: x, Y: y}, + curveName: crv, signatureAlgorithm: sigAlg, + } + + // Key ID is optional too, but if it exists, it should match the key. + _, ok := jwk["kid"] + if ok { + kid, err := stringFromMap(jwk, "kid") + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key ID: %s", err) + } + if kid != key.KeyID() { + return nil, fmt.Errorf("JWK EC Public Key ID does not match: %s", kid) + } + } + + key.extended = jwk + + return key, nil +} + +/* + * EC DSA PRIVATE KEY + */ + +// ecPrivateKey implements a JWK Private Key using elliptic curve digital signature +// algorithms. +type ecPrivateKey struct { + ecPublicKey + *ecdsa.PrivateKey +} + +func fromECPrivateKey(cryptoPrivateKey *ecdsa.PrivateKey) (*ecPrivateKey, error) { + publicKey, err := fromECPublicKey(&cryptoPrivateKey.PublicKey) + if err != nil { + return nil, err + } + + return &ecPrivateKey{*publicKey, cryptoPrivateKey}, nil +} + +// PublicKey returns the Public Key data associated with this Private Key. +func (k *ecPrivateKey) PublicKey() PublicKey { + return &k.ecPublicKey +} + +func (k *ecPrivateKey) String() string { + return fmt.Sprintf("EC Private Key <%s>", k.KeyID()) +} + +// Sign signs the data read from the io.Reader using a signature algorithm supported +// by the elliptic curve private key. If the specified hashing algorithm is +// supported by this key, that hash function is used to generate the signature +// otherwise the the default hashing algorithm for this key is used. Returns +// the signature and the name of the JWK signature algorithm used, e.g., +// "ES256", "ES384", "ES512". +func (k *ecPrivateKey) Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) { + // Generate a signature of the data using the internal alg. + // The given hashId is only a suggestion, and since EC keys only support + // on signature/hash algorithm given the curve name, we disregard it for + // the elliptic curve JWK signature implementation. + r, s, err := k.sign(data, hashID) + if err != nil { + return nil, "", fmt.Errorf("error producing signature: %s", err) + } + + rBytes, sBytes := r.Bytes(), s.Bytes() + octetLength := (k.ecPublicKey.Params().BitSize + 7) >> 3 + // MUST include leading zeros in the output + rBuf := make([]byte, octetLength-len(rBytes), octetLength) + sBuf := make([]byte, octetLength-len(sBytes), octetLength) + + rBuf = append(rBuf, rBytes...) + sBuf = append(sBuf, sBytes...) + + signature = append(rBuf, sBuf...) + alg = k.signatureAlgorithm.HeaderParam() + + return +} + +// CryptoPrivateKey returns the internal object which can be used as a +// crypto.PublicKey for use with other standard library operations. The type +// is either *rsa.PublicKey or *ecdsa.PublicKey +func (k *ecPrivateKey) CryptoPrivateKey() crypto.PrivateKey { + return k.PrivateKey +} + +func (k *ecPrivateKey) toMap() map[string]interface{} { + jwk := k.ecPublicKey.toMap() + + dBytes := k.D.Bytes() + // The length of this octet string MUST be ceiling(log-base-2(n)/8) + // octets (where n is the order of the curve). This is because the private + // key d must be in the interval [1, n-1] so the bitlength of d should be + // no larger than the bitlength of n-1. The easiest way to find the octet + // length is to take bitlength(n-1), add 7 to force a carry, and shift this + // bit sequence right by 3, which is essentially dividing by 8 and adding + // 1 if there is any remainder. Thus, the private key value d should be + // output to (bitlength(n-1)+7)>>3 octets. + n := k.ecPublicKey.Params().N + octetLength := (new(big.Int).Sub(n, big.NewInt(1)).BitLen() + 7) >> 3 + // Create a buffer with the necessary zero-padding. + dBuf := make([]byte, octetLength-len(dBytes), octetLength) + dBuf = append(dBuf, dBytes...) + + jwk["d"] = joseBase64UrlEncode(dBuf) + + return jwk +} + +// MarshalJSON serializes this Private Key using the JWK JSON serialization format for +// elliptic curve keys. +func (k *ecPrivateKey) MarshalJSON() (data []byte, err error) { + return json.Marshal(k.toMap()) +} + +// PEMBlock serializes this Private Key to DER-encoded PKIX format. +func (k *ecPrivateKey) PEMBlock() (*pem.Block, error) { + derBytes, err := x509.MarshalECPrivateKey(k.PrivateKey) + if err != nil { + return nil, fmt.Errorf("unable to serialize EC PrivateKey to DER-encoded PKIX format: %s", err) + } + k.extended["keyID"] = k.KeyID() // For display purposes. + return createPemBlock("EC PRIVATE KEY", derBytes, k.extended) +} + +func ecPrivateKeyFromMap(jwk map[string]interface{}) (*ecPrivateKey, error) { + dB64Url, err := stringFromMap(jwk, "d") + if err != nil { + return nil, fmt.Errorf("JWK EC Private Key: %s", err) + } + + // JWK key type (kty) has already been determined to be "EC". + // Need to extract the public key information, then extract the private + // key value 'd'. + publicKey, err := ecPublicKeyFromMap(jwk) + if err != nil { + return nil, err + } + + d, err := parseECPrivateParam(dB64Url, publicKey.Curve) + if err != nil { + return nil, fmt.Errorf("JWK EC Private Key d-param: %s", err) + } + + key := &ecPrivateKey{ + ecPublicKey: *publicKey, + PrivateKey: &ecdsa.PrivateKey{ + PublicKey: *publicKey.PublicKey, + D: d, + }, + } + + return key, nil +} + +/* + * Key Generation Functions. + */ + +func generateECPrivateKey(curve elliptic.Curve) (k *ecPrivateKey, err error) { + k = new(ecPrivateKey) + k.PrivateKey, err = ecdsa.GenerateKey(curve, rand.Reader) + if err != nil { + return nil, err + } + + k.ecPublicKey.PublicKey = &k.PrivateKey.PublicKey + k.extended = make(map[string]interface{}) + + return +} + +// GenerateECP256PrivateKey generates a key pair using elliptic curve P-256. +func GenerateECP256PrivateKey() (PrivateKey, error) { + k, err := generateECPrivateKey(elliptic.P256()) + if err != nil { + return nil, fmt.Errorf("error generating EC P-256 key: %s", err) + } + + k.curveName = "P-256" + k.signatureAlgorithm = es256 + + return k, nil +} + +// GenerateECP384PrivateKey generates a key pair using elliptic curve P-384. +func GenerateECP384PrivateKey() (PrivateKey, error) { + k, err := generateECPrivateKey(elliptic.P384()) + if err != nil { + return nil, fmt.Errorf("error generating EC P-384 key: %s", err) + } + + k.curveName = "P-384" + k.signatureAlgorithm = es384 + + return k, nil +} + +// GenerateECP521PrivateKey generates aß key pair using elliptic curve P-521. +func GenerateECP521PrivateKey() (PrivateKey, error) { + k, err := generateECPrivateKey(elliptic.P521()) + if err != nil { + return nil, fmt.Errorf("error generating EC P-521 key: %s", err) + } + + k.curveName = "P-521" + k.signatureAlgorithm = es512 + + return k, nil +} diff --git a/vendor/github.com/containers/libtrust/ec_key_no_openssl.go b/vendor/github.com/containers/libtrust/ec_key_no_openssl.go new file mode 100644 index 00000000000..d6cdaca3f8d --- /dev/null +++ b/vendor/github.com/containers/libtrust/ec_key_no_openssl.go @@ -0,0 +1,23 @@ +// +build !libtrust_openssl + +package libtrust + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rand" + "fmt" + "io" + "math/big" +) + +func (k *ecPrivateKey) sign(data io.Reader, hashID crypto.Hash) (r, s *big.Int, err error) { + hasher := k.signatureAlgorithm.HashID().New() + _, err = io.Copy(hasher, data) + if err != nil { + return nil, nil, fmt.Errorf("error reading data to sign: %s", err) + } + hash := hasher.Sum(nil) + + return ecdsa.Sign(rand.Reader, k.PrivateKey, hash) +} diff --git a/vendor/github.com/containers/libtrust/ec_key_openssl.go b/vendor/github.com/containers/libtrust/ec_key_openssl.go new file mode 100644 index 00000000000..4137511f119 --- /dev/null +++ b/vendor/github.com/containers/libtrust/ec_key_openssl.go @@ -0,0 +1,24 @@ +// +build libtrust_openssl + +package libtrust + +import ( + "bytes" + "crypto" + "crypto/ecdsa" + "crypto/rand" + "fmt" + "io" + "math/big" +) + +func (k *ecPrivateKey) sign(data io.Reader, hashID crypto.Hash) (r, s *big.Int, err error) { + hId := k.signatureAlgorithm.HashID() + buf := new(bytes.Buffer) + _, err = buf.ReadFrom(data) + if err != nil { + return nil, nil, fmt.Errorf("error reading data: %s", err) + } + + return ecdsa.HashSign(rand.Reader, k.PrivateKey, buf.Bytes(), hId) +} diff --git a/vendor/github.com/containers/libtrust/filter.go b/vendor/github.com/containers/libtrust/filter.go new file mode 100644 index 00000000000..5b2b4fca6fb --- /dev/null +++ b/vendor/github.com/containers/libtrust/filter.go @@ -0,0 +1,50 @@ +package libtrust + +import ( + "path/filepath" +) + +// FilterByHosts filters the list of PublicKeys to only those which contain a +// 'hosts' pattern which matches the given host. If *includeEmpty* is true, +// then keys which do not specify any hosts are also returned. +func FilterByHosts(keys []PublicKey, host string, includeEmpty bool) ([]PublicKey, error) { + filtered := make([]PublicKey, 0, len(keys)) + + for _, pubKey := range keys { + var hosts []string + switch v := pubKey.GetExtendedField("hosts").(type) { + case []string: + hosts = v + case []interface{}: + for _, value := range v { + h, ok := value.(string) + if !ok { + continue + } + hosts = append(hosts, h) + } + } + + if len(hosts) == 0 { + if includeEmpty { + filtered = append(filtered, pubKey) + } + continue + } + + // Check if any hosts match pattern + for _, hostPattern := range hosts { + match, err := filepath.Match(hostPattern, host) + if err != nil { + return nil, err + } + + if match { + filtered = append(filtered, pubKey) + continue + } + } + } + + return filtered, nil +} diff --git a/vendor/github.com/containers/libtrust/hash.go b/vendor/github.com/containers/libtrust/hash.go new file mode 100644 index 00000000000..a2df787dd99 --- /dev/null +++ b/vendor/github.com/containers/libtrust/hash.go @@ -0,0 +1,56 @@ +package libtrust + +import ( + "crypto" + _ "crypto/sha256" // Registrer SHA224 and SHA256 + _ "crypto/sha512" // Registrer SHA384 and SHA512 + "fmt" +) + +type signatureAlgorithm struct { + algHeaderParam string + hashID crypto.Hash +} + +func (h *signatureAlgorithm) HeaderParam() string { + return h.algHeaderParam +} + +func (h *signatureAlgorithm) HashID() crypto.Hash { + return h.hashID +} + +var ( + rs256 = &signatureAlgorithm{"RS256", crypto.SHA256} + rs384 = &signatureAlgorithm{"RS384", crypto.SHA384} + rs512 = &signatureAlgorithm{"RS512", crypto.SHA512} + es256 = &signatureAlgorithm{"ES256", crypto.SHA256} + es384 = &signatureAlgorithm{"ES384", crypto.SHA384} + es512 = &signatureAlgorithm{"ES512", crypto.SHA512} +) + +func rsaSignatureAlgorithmByName(alg string) (*signatureAlgorithm, error) { + switch { + case alg == "RS256": + return rs256, nil + case alg == "RS384": + return rs384, nil + case alg == "RS512": + return rs512, nil + default: + return nil, fmt.Errorf("RSA Digital Signature Algorithm %q not supported", alg) + } +} + +func rsaPKCS1v15SignatureAlgorithmForHashID(hashID crypto.Hash) *signatureAlgorithm { + switch { + case hashID == crypto.SHA512: + return rs512 + case hashID == crypto.SHA384: + return rs384 + case hashID == crypto.SHA256: + fallthrough + default: + return rs256 + } +} diff --git a/vendor/github.com/containers/libtrust/jsonsign.go b/vendor/github.com/containers/libtrust/jsonsign.go new file mode 100644 index 00000000000..cb2ca9a7690 --- /dev/null +++ b/vendor/github.com/containers/libtrust/jsonsign.go @@ -0,0 +1,657 @@ +package libtrust + +import ( + "bytes" + "crypto" + "crypto/x509" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "sort" + "time" + "unicode" +) + +var ( + // ErrInvalidSignContent is used when the content to be signed is invalid. + ErrInvalidSignContent = errors.New("invalid sign content") + + // ErrInvalidJSONContent is used when invalid json is encountered. + ErrInvalidJSONContent = errors.New("invalid json content") + + // ErrMissingSignatureKey is used when the specified signature key + // does not exist in the JSON content. + ErrMissingSignatureKey = errors.New("missing signature key") +) + +type jsHeader struct { + JWK PublicKey `json:"jwk,omitempty"` + Algorithm string `json:"alg"` + Chain []string `json:"x5c,omitempty"` +} + +type jsSignature struct { + Header jsHeader `json:"header"` + Signature string `json:"signature"` + Protected string `json:"protected,omitempty"` +} + +type jsSignaturesSorted []jsSignature + +func (jsbkid jsSignaturesSorted) Swap(i, j int) { jsbkid[i], jsbkid[j] = jsbkid[j], jsbkid[i] } +func (jsbkid jsSignaturesSorted) Len() int { return len(jsbkid) } + +func (jsbkid jsSignaturesSorted) Less(i, j int) bool { + ki, kj := jsbkid[i].Header.JWK.KeyID(), jsbkid[j].Header.JWK.KeyID() + si, sj := jsbkid[i].Signature, jsbkid[j].Signature + + if ki == kj { + return si < sj + } + + return ki < kj +} + +type signKey struct { + PrivateKey + Chain []*x509.Certificate +} + +// JSONSignature represents a signature of a json object. +type JSONSignature struct { + payload string + signatures []jsSignature + indent string + formatLength int + formatTail []byte +} + +func newJSONSignature() *JSONSignature { + return &JSONSignature{ + signatures: make([]jsSignature, 0, 1), + } +} + +// Payload returns the encoded payload of the signature. This +// payload should not be signed directly +func (js *JSONSignature) Payload() ([]byte, error) { + return joseBase64UrlDecode(js.payload) +} + +func (js *JSONSignature) protectedHeader() (string, error) { + protected := map[string]interface{}{ + "formatLength": js.formatLength, + "formatTail": joseBase64UrlEncode(js.formatTail), + "time": time.Now().UTC().Format(time.RFC3339), + } + protectedBytes, err := json.Marshal(protected) + if err != nil { + return "", err + } + + return joseBase64UrlEncode(protectedBytes), nil +} + +func (js *JSONSignature) signBytes(protectedHeader string) ([]byte, error) { + buf := make([]byte, len(js.payload)+len(protectedHeader)+1) + copy(buf, protectedHeader) + buf[len(protectedHeader)] = '.' + copy(buf[len(protectedHeader)+1:], js.payload) + return buf, nil +} + +// Sign adds a signature using the given private key. +func (js *JSONSignature) Sign(key PrivateKey) error { + protected, err := js.protectedHeader() + if err != nil { + return err + } + signBytes, err := js.signBytes(protected) + if err != nil { + return err + } + sigBytes, algorithm, err := key.Sign(bytes.NewReader(signBytes), crypto.SHA256) + if err != nil { + return err + } + + js.signatures = append(js.signatures, jsSignature{ + Header: jsHeader{ + JWK: key.PublicKey(), + Algorithm: algorithm, + }, + Signature: joseBase64UrlEncode(sigBytes), + Protected: protected, + }) + + return nil +} + +// SignWithChain adds a signature using the given private key +// and setting the x509 chain. The public key of the first element +// in the chain must be the public key corresponding with the sign key. +func (js *JSONSignature) SignWithChain(key PrivateKey, chain []*x509.Certificate) error { + // Ensure key.Chain[0] is public key for key + //key.Chain.PublicKey + //key.PublicKey().CryptoPublicKey() + + // Verify chain + protected, err := js.protectedHeader() + if err != nil { + return err + } + signBytes, err := js.signBytes(protected) + if err != nil { + return err + } + sigBytes, algorithm, err := key.Sign(bytes.NewReader(signBytes), crypto.SHA256) + if err != nil { + return err + } + + header := jsHeader{ + Chain: make([]string, len(chain)), + Algorithm: algorithm, + } + + for i, cert := range chain { + header.Chain[i] = base64.StdEncoding.EncodeToString(cert.Raw) + } + + js.signatures = append(js.signatures, jsSignature{ + Header: header, + Signature: joseBase64UrlEncode(sigBytes), + Protected: protected, + }) + + return nil +} + +// Verify verifies all the signatures and returns the list of +// public keys used to sign. Any x509 chains are not checked. +func (js *JSONSignature) Verify() ([]PublicKey, error) { + keys := make([]PublicKey, len(js.signatures)) + for i, signature := range js.signatures { + signBytes, err := js.signBytes(signature.Protected) + if err != nil { + return nil, err + } + var publicKey PublicKey + if len(signature.Header.Chain) > 0 { + certBytes, err := base64.StdEncoding.DecodeString(signature.Header.Chain[0]) + if err != nil { + return nil, err + } + cert, err := x509.ParseCertificate(certBytes) + if err != nil { + return nil, err + } + publicKey, err = FromCryptoPublicKey(cert.PublicKey) + if err != nil { + return nil, err + } + } else if signature.Header.JWK != nil { + publicKey = signature.Header.JWK + } else { + return nil, errors.New("missing public key") + } + + sigBytes, err := joseBase64UrlDecode(signature.Signature) + if err != nil { + return nil, err + } + + err = publicKey.Verify(bytes.NewReader(signBytes), signature.Header.Algorithm, sigBytes) + if err != nil { + return nil, err + } + + keys[i] = publicKey + } + return keys, nil +} + +// VerifyChains verifies all the signatures and the chains associated +// with each signature and returns the list of verified chains. +// Signatures without an x509 chain are not checked. +func (js *JSONSignature) VerifyChains(ca *x509.CertPool) ([][]*x509.Certificate, error) { + chains := make([][]*x509.Certificate, 0, len(js.signatures)) + for _, signature := range js.signatures { + signBytes, err := js.signBytes(signature.Protected) + if err != nil { + return nil, err + } + var publicKey PublicKey + if len(signature.Header.Chain) > 0 { + certBytes, err := base64.StdEncoding.DecodeString(signature.Header.Chain[0]) + if err != nil { + return nil, err + } + cert, err := x509.ParseCertificate(certBytes) + if err != nil { + return nil, err + } + publicKey, err = FromCryptoPublicKey(cert.PublicKey) + if err != nil { + return nil, err + } + intermediates := x509.NewCertPool() + if len(signature.Header.Chain) > 1 { + intermediateChain := signature.Header.Chain[1:] + for i := range intermediateChain { + certBytes, err := base64.StdEncoding.DecodeString(intermediateChain[i]) + if err != nil { + return nil, err + } + intermediate, err := x509.ParseCertificate(certBytes) + if err != nil { + return nil, err + } + intermediates.AddCert(intermediate) + } + } + + verifyOptions := x509.VerifyOptions{ + Intermediates: intermediates, + Roots: ca, + } + + verifiedChains, err := cert.Verify(verifyOptions) + if err != nil { + return nil, err + } + chains = append(chains, verifiedChains...) + + sigBytes, err := joseBase64UrlDecode(signature.Signature) + if err != nil { + return nil, err + } + + err = publicKey.Verify(bytes.NewReader(signBytes), signature.Header.Algorithm, sigBytes) + if err != nil { + return nil, err + } + } + + } + return chains, nil +} + +// JWS returns JSON serialized JWS according to +// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-7.2 +func (js *JSONSignature) JWS() ([]byte, error) { + if len(js.signatures) == 0 { + return nil, errors.New("missing signature") + } + + sort.Sort(jsSignaturesSorted(js.signatures)) + + jsonMap := map[string]interface{}{ + "payload": js.payload, + "signatures": js.signatures, + } + + return json.MarshalIndent(jsonMap, "", " ") +} + +func notSpace(r rune) bool { + return !unicode.IsSpace(r) +} + +func detectJSONIndent(jsonContent []byte) (indent string) { + if len(jsonContent) > 2 && jsonContent[0] == '{' && jsonContent[1] == '\n' { + quoteIndex := bytes.IndexRune(jsonContent[1:], '"') + if quoteIndex > 0 { + indent = string(jsonContent[2 : quoteIndex+1]) + } + } + return +} + +type jsParsedHeader struct { + JWK json.RawMessage `json:"jwk"` + Algorithm string `json:"alg"` + Chain []string `json:"x5c"` +} + +type jsParsedSignature struct { + Header jsParsedHeader `json:"header"` + Signature string `json:"signature"` + Protected string `json:"protected"` +} + +// ParseJWS parses a JWS serialized JSON object into a Json Signature. +func ParseJWS(content []byte) (*JSONSignature, error) { + type jsParsed struct { + Payload string `json:"payload"` + Signatures []jsParsedSignature `json:"signatures"` + } + parsed := &jsParsed{} + err := json.Unmarshal(content, parsed) + if err != nil { + return nil, err + } + if len(parsed.Signatures) == 0 { + return nil, errors.New("missing signatures") + } + payload, err := joseBase64UrlDecode(parsed.Payload) + if err != nil { + return nil, err + } + + js, err := NewJSONSignature(payload) + if err != nil { + return nil, err + } + js.signatures = make([]jsSignature, len(parsed.Signatures)) + for i, signature := range parsed.Signatures { + header := jsHeader{ + Algorithm: signature.Header.Algorithm, + } + if signature.Header.Chain != nil { + header.Chain = signature.Header.Chain + } + if signature.Header.JWK != nil { + publicKey, err := UnmarshalPublicKeyJWK([]byte(signature.Header.JWK)) + if err != nil { + return nil, err + } + header.JWK = publicKey + } + js.signatures[i] = jsSignature{ + Header: header, + Signature: signature.Signature, + Protected: signature.Protected, + } + } + + return js, nil +} + +// NewJSONSignature returns a new unsigned JWS from a json byte array. +// JSONSignature will need to be signed before serializing or storing. +// Optionally, one or more signatures can be provided as byte buffers, +// containing serialized JWS signatures, to assemble a fully signed JWS +// package. It is the callers responsibility to ensure uniqueness of the +// provided signatures. +func NewJSONSignature(content []byte, signatures ...[]byte) (*JSONSignature, error) { + var dataMap map[string]interface{} + err := json.Unmarshal(content, &dataMap) + if err != nil { + return nil, err + } + + js := newJSONSignature() + js.indent = detectJSONIndent(content) + + js.payload = joseBase64UrlEncode(content) + + // Find trailing } and whitespace, put in protected header + closeIndex := bytes.LastIndexFunc(content, notSpace) + if content[closeIndex] != '}' { + return nil, ErrInvalidJSONContent + } + lastRuneIndex := bytes.LastIndexFunc(content[:closeIndex], notSpace) + if content[lastRuneIndex] == ',' { + return nil, ErrInvalidJSONContent + } + js.formatLength = lastRuneIndex + 1 + js.formatTail = content[js.formatLength:] + + if len(signatures) > 0 { + for _, signature := range signatures { + var parsedJSig jsParsedSignature + + if err := json.Unmarshal(signature, &parsedJSig); err != nil { + return nil, err + } + + // TODO(stevvooe): A lot of the code below is repeated in + // ParseJWS. It will require more refactoring to fix that. + jsig := jsSignature{ + Header: jsHeader{ + Algorithm: parsedJSig.Header.Algorithm, + }, + Signature: parsedJSig.Signature, + Protected: parsedJSig.Protected, + } + + if parsedJSig.Header.Chain != nil { + jsig.Header.Chain = parsedJSig.Header.Chain + } + + if parsedJSig.Header.JWK != nil { + publicKey, err := UnmarshalPublicKeyJWK([]byte(parsedJSig.Header.JWK)) + if err != nil { + return nil, err + } + jsig.Header.JWK = publicKey + } + + js.signatures = append(js.signatures, jsig) + } + } + + return js, nil +} + +// NewJSONSignatureFromMap returns a new unsigned JSONSignature from a map or +// struct. JWS will need to be signed before serializing or storing. +func NewJSONSignatureFromMap(content interface{}) (*JSONSignature, error) { + switch content.(type) { + case map[string]interface{}: + case struct{}: + default: + return nil, errors.New("invalid data type") + } + + js := newJSONSignature() + js.indent = " " + + payload, err := json.MarshalIndent(content, "", js.indent) + if err != nil { + return nil, err + } + js.payload = joseBase64UrlEncode(payload) + + // Remove '\n}' from formatted section, put in protected header + js.formatLength = len(payload) - 2 + js.formatTail = payload[js.formatLength:] + + return js, nil +} + +func readIntFromMap(key string, m map[string]interface{}) (int, bool) { + value, ok := m[key] + if !ok { + return 0, false + } + switch v := value.(type) { + case int: + return v, true + case float64: + return int(v), true + default: + return 0, false + } +} + +func readStringFromMap(key string, m map[string]interface{}) (v string, ok bool) { + value, ok := m[key] + if !ok { + return "", false + } + v, ok = value.(string) + return +} + +// ParsePrettySignature parses a formatted signature into a +// JSON signature. If the signatures are missing the format information +// an error is thrown. The formatted signature must be created by +// the same method as format signature. +func ParsePrettySignature(content []byte, signatureKey string) (*JSONSignature, error) { + var contentMap map[string]json.RawMessage + err := json.Unmarshal(content, &contentMap) + if err != nil { + return nil, fmt.Errorf("error unmarshalling content: %s", err) + } + sigMessage, ok := contentMap[signatureKey] + if !ok { + return nil, ErrMissingSignatureKey + } + + var signatureBlocks []jsParsedSignature + err = json.Unmarshal([]byte(sigMessage), &signatureBlocks) + if err != nil { + return nil, fmt.Errorf("error unmarshalling signatures: %s", err) + } + + js := newJSONSignature() + js.signatures = make([]jsSignature, len(signatureBlocks)) + + for i, signatureBlock := range signatureBlocks { + protectedBytes, err := joseBase64UrlDecode(signatureBlock.Protected) + if err != nil { + return nil, fmt.Errorf("base64 decode error: %s", err) + } + var protectedHeader map[string]interface{} + err = json.Unmarshal(protectedBytes, &protectedHeader) + if err != nil { + return nil, fmt.Errorf("error unmarshalling protected header: %s", err) + } + + formatLength, ok := readIntFromMap("formatLength", protectedHeader) + if !ok { + return nil, errors.New("missing formatted length") + } + encodedTail, ok := readStringFromMap("formatTail", protectedHeader) + if !ok { + return nil, errors.New("missing formatted tail") + } + formatTail, err := joseBase64UrlDecode(encodedTail) + if err != nil { + return nil, fmt.Errorf("base64 decode error on tail: %s", err) + } + if js.formatLength == 0 { + js.formatLength = formatLength + } else if js.formatLength != formatLength { + return nil, errors.New("conflicting format length") + } + if len(js.formatTail) == 0 { + js.formatTail = formatTail + } else if bytes.Compare(js.formatTail, formatTail) != 0 { + return nil, errors.New("conflicting format tail") + } + + header := jsHeader{ + Algorithm: signatureBlock.Header.Algorithm, + Chain: signatureBlock.Header.Chain, + } + if signatureBlock.Header.JWK != nil { + publicKey, err := UnmarshalPublicKeyJWK([]byte(signatureBlock.Header.JWK)) + if err != nil { + return nil, fmt.Errorf("error unmarshalling public key: %s", err) + } + header.JWK = publicKey + } + js.signatures[i] = jsSignature{ + Header: header, + Signature: signatureBlock.Signature, + Protected: signatureBlock.Protected, + } + } + if js.formatLength > len(content) { + return nil, errors.New("invalid format length") + } + formatted := make([]byte, js.formatLength+len(js.formatTail)) + copy(formatted, content[:js.formatLength]) + copy(formatted[js.formatLength:], js.formatTail) + js.indent = detectJSONIndent(formatted) + js.payload = joseBase64UrlEncode(formatted) + + return js, nil +} + +// PrettySignature formats a json signature into an easy to read +// single json serialized object. +func (js *JSONSignature) PrettySignature(signatureKey string) ([]byte, error) { + if len(js.signatures) == 0 { + return nil, errors.New("no signatures") + } + payload, err := joseBase64UrlDecode(js.payload) + if err != nil { + return nil, err + } + payload = payload[:js.formatLength] + + sort.Sort(jsSignaturesSorted(js.signatures)) + + var marshalled []byte + var marshallErr error + if js.indent != "" { + marshalled, marshallErr = json.MarshalIndent(js.signatures, js.indent, js.indent) + } else { + marshalled, marshallErr = json.Marshal(js.signatures) + } + if marshallErr != nil { + return nil, marshallErr + } + + buf := bytes.NewBuffer(make([]byte, 0, len(payload)+len(marshalled)+34)) + buf.Write(payload) + buf.WriteByte(',') + if js.indent != "" { + buf.WriteByte('\n') + buf.WriteString(js.indent) + buf.WriteByte('"') + buf.WriteString(signatureKey) + buf.WriteString("\": ") + buf.Write(marshalled) + buf.WriteByte('\n') + } else { + buf.WriteByte('"') + buf.WriteString(signatureKey) + buf.WriteString("\":") + buf.Write(marshalled) + } + buf.WriteByte('}') + + return buf.Bytes(), nil +} + +// Signatures provides the signatures on this JWS as opaque blobs, sorted by +// keyID. These blobs can be stored and reassembled with payloads. Internally, +// they are simply marshaled json web signatures but implementations should +// not rely on this. +func (js *JSONSignature) Signatures() ([][]byte, error) { + sort.Sort(jsSignaturesSorted(js.signatures)) + + var sb [][]byte + for _, jsig := range js.signatures { + p, err := json.Marshal(jsig) + if err != nil { + return nil, err + } + + sb = append(sb, p) + } + + return sb, nil +} + +// Merge combines the signatures from one or more other signatures into the +// method receiver. If the payloads differ for any argument, an error will be +// returned and the receiver will not be modified. +func (js *JSONSignature) Merge(others ...*JSONSignature) error { + merged := js.signatures + for _, other := range others { + if js.payload != other.payload { + return fmt.Errorf("payloads differ from merge target") + } + merged = append(merged, other.signatures...) + } + + js.signatures = merged + return nil +} diff --git a/vendor/github.com/containers/libtrust/key.go b/vendor/github.com/containers/libtrust/key.go new file mode 100644 index 00000000000..73642db2a8b --- /dev/null +++ b/vendor/github.com/containers/libtrust/key.go @@ -0,0 +1,253 @@ +package libtrust + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rsa" + "crypto/x509" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io" +) + +// PublicKey is a generic interface for a Public Key. +type PublicKey interface { + // KeyType returns the key type for this key. For elliptic curve keys, + // this value should be "EC". For RSA keys, this value should be "RSA". + KeyType() string + // KeyID returns a distinct identifier which is unique to this Public Key. + // The format generated by this library is a base32 encoding of a 240 bit + // hash of the public key data divided into 12 groups like so: + // ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP + KeyID() string + // Verify verifyies the signature of the data in the io.Reader using this + // Public Key. The alg parameter should identify the digital signature + // algorithm which was used to produce the signature and should be + // supported by this public key. Returns a nil error if the signature + // is valid. + Verify(data io.Reader, alg string, signature []byte) error + // CryptoPublicKey returns the internal object which can be used as a + // crypto.PublicKey for use with other standard library operations. The type + // is either *rsa.PublicKey or *ecdsa.PublicKey + CryptoPublicKey() crypto.PublicKey + // These public keys can be serialized to the standard JSON encoding for + // JSON Web Keys. See section 6 of the IETF draft RFC for JOSE JSON Web + // Algorithms. + MarshalJSON() ([]byte, error) + // These keys can also be serialized to the standard PEM encoding. + PEMBlock() (*pem.Block, error) + // The string representation of a key is its key type and ID. + String() string + AddExtendedField(string, interface{}) + GetExtendedField(string) interface{} +} + +// PrivateKey is a generic interface for a Private Key. +type PrivateKey interface { + // A PrivateKey contains all fields and methods of a PublicKey of the + // same type. The MarshalJSON method also outputs the private key as a + // JSON Web Key, and the PEMBlock method outputs the private key as a + // PEM block. + PublicKey + // PublicKey returns the PublicKey associated with this PrivateKey. + PublicKey() PublicKey + // Sign signs the data read from the io.Reader using a signature algorithm + // supported by the private key. If the specified hashing algorithm is + // supported by this key, that hash function is used to generate the + // signature otherwise the the default hashing algorithm for this key is + // used. Returns the signature and identifier of the algorithm used. + Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) + // CryptoPrivateKey returns the internal object which can be used as a + // crypto.PublicKey for use with other standard library operations. The + // type is either *rsa.PublicKey or *ecdsa.PublicKey + CryptoPrivateKey() crypto.PrivateKey +} + +// FromCryptoPublicKey returns a libtrust PublicKey representation of the given +// *ecdsa.PublicKey or *rsa.PublicKey. Returns a non-nil error when the given +// key is of an unsupported type. +func FromCryptoPublicKey(cryptoPublicKey crypto.PublicKey) (PublicKey, error) { + switch cryptoPublicKey := cryptoPublicKey.(type) { + case *ecdsa.PublicKey: + return fromECPublicKey(cryptoPublicKey) + case *rsa.PublicKey: + return fromRSAPublicKey(cryptoPublicKey), nil + default: + return nil, fmt.Errorf("public key type %T is not supported", cryptoPublicKey) + } +} + +// FromCryptoPrivateKey returns a libtrust PrivateKey representation of the given +// *ecdsa.PrivateKey or *rsa.PrivateKey. Returns a non-nil error when the given +// key is of an unsupported type. +func FromCryptoPrivateKey(cryptoPrivateKey crypto.PrivateKey) (PrivateKey, error) { + switch cryptoPrivateKey := cryptoPrivateKey.(type) { + case *ecdsa.PrivateKey: + return fromECPrivateKey(cryptoPrivateKey) + case *rsa.PrivateKey: + return fromRSAPrivateKey(cryptoPrivateKey), nil + default: + return nil, fmt.Errorf("private key type %T is not supported", cryptoPrivateKey) + } +} + +// UnmarshalPublicKeyPEM parses the PEM encoded data and returns a libtrust +// PublicKey or an error if there is a problem with the encoding. +func UnmarshalPublicKeyPEM(data []byte) (PublicKey, error) { + pemBlock, _ := pem.Decode(data) + if pemBlock == nil { + return nil, errors.New("unable to find PEM encoded data") + } else if pemBlock.Type != "PUBLIC KEY" { + return nil, fmt.Errorf("unable to get PublicKey from PEM type: %s", pemBlock.Type) + } + + return pubKeyFromPEMBlock(pemBlock) +} + +// UnmarshalPublicKeyPEMBundle parses the PEM encoded data as a bundle of +// PEM blocks appended one after the other and returns a slice of PublicKey +// objects that it finds. +func UnmarshalPublicKeyPEMBundle(data []byte) ([]PublicKey, error) { + pubKeys := []PublicKey{} + + for { + var pemBlock *pem.Block + pemBlock, data = pem.Decode(data) + if pemBlock == nil { + break + } else if pemBlock.Type != "PUBLIC KEY" { + return nil, fmt.Errorf("unable to get PublicKey from PEM type: %s", pemBlock.Type) + } + + pubKey, err := pubKeyFromPEMBlock(pemBlock) + if err != nil { + return nil, err + } + + pubKeys = append(pubKeys, pubKey) + } + + return pubKeys, nil +} + +// UnmarshalPrivateKeyPEM parses the PEM encoded data and returns a libtrust +// PrivateKey or an error if there is a problem with the encoding. +func UnmarshalPrivateKeyPEM(data []byte) (PrivateKey, error) { + pemBlock, _ := pem.Decode(data) + if pemBlock == nil { + return nil, errors.New("unable to find PEM encoded data") + } + + var key PrivateKey + + switch { + case pemBlock.Type == "RSA PRIVATE KEY": + rsaPrivateKey, err := x509.ParsePKCS1PrivateKey(pemBlock.Bytes) + if err != nil { + return nil, fmt.Errorf("unable to decode RSA Private Key PEM data: %s", err) + } + key = fromRSAPrivateKey(rsaPrivateKey) + case pemBlock.Type == "EC PRIVATE KEY": + ecPrivateKey, err := x509.ParseECPrivateKey(pemBlock.Bytes) + if err != nil { + return nil, fmt.Errorf("unable to decode EC Private Key PEM data: %s", err) + } + key, err = fromECPrivateKey(ecPrivateKey) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("unable to get PrivateKey from PEM type: %s", pemBlock.Type) + } + + addPEMHeadersToKey(pemBlock, key.PublicKey()) + + return key, nil +} + +// UnmarshalPublicKeyJWK unmarshals the given JSON Web Key into a generic +// Public Key to be used with libtrust. +func UnmarshalPublicKeyJWK(data []byte) (PublicKey, error) { + jwk := make(map[string]interface{}) + + err := json.Unmarshal(data, &jwk) + if err != nil { + return nil, fmt.Errorf( + "decoding JWK Public Key JSON data: %s\n", err, + ) + } + + // Get the Key Type value. + kty, err := stringFromMap(jwk, "kty") + if err != nil { + return nil, fmt.Errorf("JWK Public Key type: %s", err) + } + + switch { + case kty == "EC": + // Call out to unmarshal EC public key. + return ecPublicKeyFromMap(jwk) + case kty == "RSA": + // Call out to unmarshal RSA public key. + return rsaPublicKeyFromMap(jwk) + default: + return nil, fmt.Errorf( + "JWK Public Key type not supported: %q\n", kty, + ) + } +} + +// UnmarshalPublicKeyJWKSet parses the JSON encoded data as a JSON Web Key Set +// and returns a slice of Public Key objects. +func UnmarshalPublicKeyJWKSet(data []byte) ([]PublicKey, error) { + rawKeys, err := loadJSONKeySetRaw(data) + if err != nil { + return nil, err + } + + pubKeys := make([]PublicKey, 0, len(rawKeys)) + + for _, rawKey := range rawKeys { + pubKey, err := UnmarshalPublicKeyJWK(rawKey) + if err != nil { + return nil, err + } + pubKeys = append(pubKeys, pubKey) + } + + return pubKeys, nil +} + +// UnmarshalPrivateKeyJWK unmarshals the given JSON Web Key into a generic +// Private Key to be used with libtrust. +func UnmarshalPrivateKeyJWK(data []byte) (PrivateKey, error) { + jwk := make(map[string]interface{}) + + err := json.Unmarshal(data, &jwk) + if err != nil { + return nil, fmt.Errorf( + "decoding JWK Private Key JSON data: %s\n", err, + ) + } + + // Get the Key Type value. + kty, err := stringFromMap(jwk, "kty") + if err != nil { + return nil, fmt.Errorf("JWK Private Key type: %s", err) + } + + switch { + case kty == "EC": + // Call out to unmarshal EC private key. + return ecPrivateKeyFromMap(jwk) + case kty == "RSA": + // Call out to unmarshal RSA private key. + return rsaPrivateKeyFromMap(jwk) + default: + return nil, fmt.Errorf( + "JWK Private Key type not supported: %q\n", kty, + ) + } +} diff --git a/vendor/github.com/containers/libtrust/key_files.go b/vendor/github.com/containers/libtrust/key_files.go new file mode 100644 index 00000000000..c526de5455b --- /dev/null +++ b/vendor/github.com/containers/libtrust/key_files.go @@ -0,0 +1,255 @@ +package libtrust + +import ( + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io/ioutil" + "os" + "strings" +) + +var ( + // ErrKeyFileDoesNotExist indicates that the private key file does not exist. + ErrKeyFileDoesNotExist = errors.New("key file does not exist") +) + +func readKeyFileBytes(filename string) ([]byte, error) { + data, err := ioutil.ReadFile(filename) + if err != nil { + if os.IsNotExist(err) { + err = ErrKeyFileDoesNotExist + } else { + err = fmt.Errorf("unable to read key file %s: %s", filename, err) + } + + return nil, err + } + + return data, nil +} + +/* + Loading and Saving of Public and Private Keys in either PEM or JWK format. +*/ + +// LoadKeyFile opens the given filename and attempts to read a Private Key +// encoded in either PEM or JWK format (if .json or .jwk file extension). +func LoadKeyFile(filename string) (PrivateKey, error) { + contents, err := readKeyFileBytes(filename) + if err != nil { + return nil, err + } + + var key PrivateKey + + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + key, err = UnmarshalPrivateKeyJWK(contents) + if err != nil { + return nil, fmt.Errorf("unable to decode private key JWK: %s", err) + } + } else { + key, err = UnmarshalPrivateKeyPEM(contents) + if err != nil { + return nil, fmt.Errorf("unable to decode private key PEM: %s", err) + } + } + + return key, nil +} + +// LoadPublicKeyFile opens the given filename and attempts to read a Public Key +// encoded in either PEM or JWK format (if .json or .jwk file extension). +func LoadPublicKeyFile(filename string) (PublicKey, error) { + contents, err := readKeyFileBytes(filename) + if err != nil { + return nil, err + } + + var key PublicKey + + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + key, err = UnmarshalPublicKeyJWK(contents) + if err != nil { + return nil, fmt.Errorf("unable to decode public key JWK: %s", err) + } + } else { + key, err = UnmarshalPublicKeyPEM(contents) + if err != nil { + return nil, fmt.Errorf("unable to decode public key PEM: %s", err) + } + } + + return key, nil +} + +// SaveKey saves the given key to a file using the provided filename. +// This process will overwrite any existing file at the provided location. +func SaveKey(filename string, key PrivateKey) error { + var encodedKey []byte + var err error + + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + // Encode in JSON Web Key format. + encodedKey, err = json.MarshalIndent(key, "", " ") + if err != nil { + return fmt.Errorf("unable to encode private key JWK: %s", err) + } + } else { + // Encode in PEM format. + pemBlock, err := key.PEMBlock() + if err != nil { + return fmt.Errorf("unable to encode private key PEM: %s", err) + } + encodedKey = pem.EncodeToMemory(pemBlock) + } + + err = ioutil.WriteFile(filename, encodedKey, os.FileMode(0600)) + if err != nil { + return fmt.Errorf("unable to write private key file %s: %s", filename, err) + } + + return nil +} + +// SavePublicKey saves the given public key to the file. +func SavePublicKey(filename string, key PublicKey) error { + var encodedKey []byte + var err error + + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + // Encode in JSON Web Key format. + encodedKey, err = json.MarshalIndent(key, "", " ") + if err != nil { + return fmt.Errorf("unable to encode public key JWK: %s", err) + } + } else { + // Encode in PEM format. + pemBlock, err := key.PEMBlock() + if err != nil { + return fmt.Errorf("unable to encode public key PEM: %s", err) + } + encodedKey = pem.EncodeToMemory(pemBlock) + } + + err = ioutil.WriteFile(filename, encodedKey, os.FileMode(0644)) + if err != nil { + return fmt.Errorf("unable to write public key file %s: %s", filename, err) + } + + return nil +} + +// Public Key Set files + +type jwkSet struct { + Keys []json.RawMessage `json:"keys"` +} + +// LoadKeySetFile loads a key set +func LoadKeySetFile(filename string) ([]PublicKey, error) { + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + return loadJSONKeySetFile(filename) + } + + // Must be a PEM format file + return loadPEMKeySetFile(filename) +} + +func loadJSONKeySetRaw(data []byte) ([]json.RawMessage, error) { + if len(data) == 0 { + // This is okay, just return an empty slice. + return []json.RawMessage{}, nil + } + + keySet := jwkSet{} + + err := json.Unmarshal(data, &keySet) + if err != nil { + return nil, fmt.Errorf("unable to decode JSON Web Key Set: %s", err) + } + + return keySet.Keys, nil +} + +func loadJSONKeySetFile(filename string) ([]PublicKey, error) { + contents, err := readKeyFileBytes(filename) + if err != nil && err != ErrKeyFileDoesNotExist { + return nil, err + } + + return UnmarshalPublicKeyJWKSet(contents) +} + +func loadPEMKeySetFile(filename string) ([]PublicKey, error) { + data, err := readKeyFileBytes(filename) + if err != nil && err != ErrKeyFileDoesNotExist { + return nil, err + } + + return UnmarshalPublicKeyPEMBundle(data) +} + +// AddKeySetFile adds a key to a key set +func AddKeySetFile(filename string, key PublicKey) error { + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + return addKeySetJSONFile(filename, key) + } + + // Must be a PEM format file + return addKeySetPEMFile(filename, key) +} + +func addKeySetJSONFile(filename string, key PublicKey) error { + encodedKey, err := json.Marshal(key) + if err != nil { + return fmt.Errorf("unable to encode trusted client key: %s", err) + } + + contents, err := readKeyFileBytes(filename) + if err != nil && err != ErrKeyFileDoesNotExist { + return err + } + + rawEntries, err := loadJSONKeySetRaw(contents) + if err != nil { + return err + } + + rawEntries = append(rawEntries, json.RawMessage(encodedKey)) + entriesWrapper := jwkSet{Keys: rawEntries} + + encodedEntries, err := json.MarshalIndent(entriesWrapper, "", " ") + if err != nil { + return fmt.Errorf("unable to encode trusted client keys: %s", err) + } + + err = ioutil.WriteFile(filename, encodedEntries, os.FileMode(0644)) + if err != nil { + return fmt.Errorf("unable to write trusted client keys file %s: %s", filename, err) + } + + return nil +} + +func addKeySetPEMFile(filename string, key PublicKey) error { + // Encode to PEM, open file for appending, write PEM. + file, err := os.OpenFile(filename, os.O_CREATE|os.O_APPEND|os.O_RDWR, os.FileMode(0644)) + if err != nil { + return fmt.Errorf("unable to open trusted client keys file %s: %s", filename, err) + } + defer file.Close() + + pemBlock, err := key.PEMBlock() + if err != nil { + return fmt.Errorf("unable to encoded trusted key: %s", err) + } + + _, err = file.Write(pem.EncodeToMemory(pemBlock)) + if err != nil { + return fmt.Errorf("unable to write trusted keys file: %s", err) + } + + return nil +} diff --git a/vendor/github.com/containers/libtrust/key_manager.go b/vendor/github.com/containers/libtrust/key_manager.go new file mode 100644 index 00000000000..9a98ae3574f --- /dev/null +++ b/vendor/github.com/containers/libtrust/key_manager.go @@ -0,0 +1,175 @@ +package libtrust + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "net" + "os" + "path" + "sync" +) + +// ClientKeyManager manages client keys on the filesystem +type ClientKeyManager struct { + key PrivateKey + clientFile string + clientDir string + + clientLock sync.RWMutex + clients []PublicKey + + configLock sync.Mutex + configs []*tls.Config +} + +// NewClientKeyManager loads a new manager from a set of key files +// and managed by the given private key. +func NewClientKeyManager(trustKey PrivateKey, clientFile, clientDir string) (*ClientKeyManager, error) { + m := &ClientKeyManager{ + key: trustKey, + clientFile: clientFile, + clientDir: clientDir, + } + if err := m.loadKeys(); err != nil { + return nil, err + } + // TODO Start watching file and directory + + return m, nil +} + +func (c *ClientKeyManager) loadKeys() (err error) { + // Load authorized keys file + var clients []PublicKey + if c.clientFile != "" { + clients, err = LoadKeySetFile(c.clientFile) + if err != nil { + return fmt.Errorf("unable to load authorized keys: %s", err) + } + } + + // Add clients from authorized keys directory + files, err := ioutil.ReadDir(c.clientDir) + if err != nil && !os.IsNotExist(err) { + return fmt.Errorf("unable to open authorized keys directory: %s", err) + } + for _, f := range files { + if !f.IsDir() { + publicKey, err := LoadPublicKeyFile(path.Join(c.clientDir, f.Name())) + if err != nil { + return fmt.Errorf("unable to load authorized key file: %s", err) + } + clients = append(clients, publicKey) + } + } + + c.clientLock.Lock() + c.clients = clients + c.clientLock.Unlock() + + return nil +} + +// RegisterTLSConfig registers a tls configuration to manager +// such that any changes to the keys may be reflected in +// the tls client CA pool +func (c *ClientKeyManager) RegisterTLSConfig(tlsConfig *tls.Config) error { + c.clientLock.RLock() + certPool, err := GenerateCACertPool(c.key, c.clients) + if err != nil { + return fmt.Errorf("CA pool generation error: %s", err) + } + c.clientLock.RUnlock() + + tlsConfig.ClientCAs = certPool + + c.configLock.Lock() + c.configs = append(c.configs, tlsConfig) + c.configLock.Unlock() + + return nil +} + +// NewIdentityAuthTLSConfig creates a tls.Config for the server to use for +// libtrust identity authentication for the domain specified +func NewIdentityAuthTLSConfig(trustKey PrivateKey, clients *ClientKeyManager, addr string, domain string) (*tls.Config, error) { + tlsConfig := newTLSConfig() + + tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert + if err := clients.RegisterTLSConfig(tlsConfig); err != nil { + return nil, err + } + + // Generate cert + ips, domains, err := parseAddr(addr) + if err != nil { + return nil, err + } + // add domain that it expects clients to use + domains = append(domains, domain) + x509Cert, err := GenerateSelfSignedServerCert(trustKey, domains, ips) + if err != nil { + return nil, fmt.Errorf("certificate generation error: %s", err) + } + tlsConfig.Certificates = []tls.Certificate{{ + Certificate: [][]byte{x509Cert.Raw}, + PrivateKey: trustKey.CryptoPrivateKey(), + Leaf: x509Cert, + }} + + return tlsConfig, nil +} + +// NewCertAuthTLSConfig creates a tls.Config for the server to use for +// certificate authentication +func NewCertAuthTLSConfig(caPath, certPath, keyPath string) (*tls.Config, error) { + tlsConfig := newTLSConfig() + + cert, err := tls.LoadX509KeyPair(certPath, keyPath) + if err != nil { + return nil, fmt.Errorf("Couldn't load X509 key pair (%s, %s): %s. Key encrypted?", certPath, keyPath, err) + } + tlsConfig.Certificates = []tls.Certificate{cert} + + // Verify client certificates against a CA? + if caPath != "" { + certPool := x509.NewCertPool() + file, err := ioutil.ReadFile(caPath) + if err != nil { + return nil, fmt.Errorf("Couldn't read CA certificate: %s", err) + } + certPool.AppendCertsFromPEM(file) + + tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert + tlsConfig.ClientCAs = certPool + } + + return tlsConfig, nil +} + +func newTLSConfig() *tls.Config { + return &tls.Config{ + NextProtos: []string{"http/1.1"}, + // Avoid fallback on insecure SSL protocols + MinVersion: tls.VersionTLS10, + } +} + +// parseAddr parses an address into an array of IPs and domains +func parseAddr(addr string) ([]net.IP, []string, error) { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return nil, nil, err + } + var domains []string + var ips []net.IP + ip := net.ParseIP(host) + if ip != nil { + ips = []net.IP{ip} + } else { + domains = []string{host} + } + return ips, domains, nil +} diff --git a/vendor/github.com/containers/libtrust/rsa_key.go b/vendor/github.com/containers/libtrust/rsa_key.go new file mode 100644 index 00000000000..dac4cacf20e --- /dev/null +++ b/vendor/github.com/containers/libtrust/rsa_key.go @@ -0,0 +1,427 @@ +package libtrust + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io" + "math/big" +) + +/* + * RSA DSA PUBLIC KEY + */ + +// rsaPublicKey implements a JWK Public Key using RSA digital signature algorithms. +type rsaPublicKey struct { + *rsa.PublicKey + extended map[string]interface{} +} + +func fromRSAPublicKey(cryptoPublicKey *rsa.PublicKey) *rsaPublicKey { + return &rsaPublicKey{cryptoPublicKey, map[string]interface{}{}} +} + +// KeyType returns the JWK key type for RSA keys, i.e., "RSA". +func (k *rsaPublicKey) KeyType() string { + return "RSA" +} + +// KeyID returns a distinct identifier which is unique to this Public Key. +func (k *rsaPublicKey) KeyID() string { + return keyIDFromCryptoKey(k) +} + +func (k *rsaPublicKey) String() string { + return fmt.Sprintf("RSA Public Key <%s>", k.KeyID()) +} + +// Verify verifyies the signature of the data in the io.Reader using this Public Key. +// The alg parameter should be the name of the JWA digital signature algorithm +// which was used to produce the signature and should be supported by this +// public key. Returns a nil error if the signature is valid. +func (k *rsaPublicKey) Verify(data io.Reader, alg string, signature []byte) error { + // Verify the signature of the given date, return non-nil error if valid. + sigAlg, err := rsaSignatureAlgorithmByName(alg) + if err != nil { + return fmt.Errorf("unable to verify Signature: %s", err) + } + + hasher := sigAlg.HashID().New() + _, err = io.Copy(hasher, data) + if err != nil { + return fmt.Errorf("error reading data to sign: %s", err) + } + hash := hasher.Sum(nil) + + err = rsa.VerifyPKCS1v15(k.PublicKey, sigAlg.HashID(), hash, signature) + if err != nil { + return fmt.Errorf("invalid %s signature: %s", sigAlg.HeaderParam(), err) + } + + return nil +} + +// CryptoPublicKey returns the internal object which can be used as a +// crypto.PublicKey for use with other standard library operations. The type +// is either *rsa.PublicKey or *ecdsa.PublicKey +func (k *rsaPublicKey) CryptoPublicKey() crypto.PublicKey { + return k.PublicKey +} + +func (k *rsaPublicKey) toMap() map[string]interface{} { + jwk := make(map[string]interface{}) + for k, v := range k.extended { + jwk[k] = v + } + jwk["kty"] = k.KeyType() + jwk["kid"] = k.KeyID() + jwk["n"] = joseBase64UrlEncode(k.N.Bytes()) + jwk["e"] = joseBase64UrlEncode(serializeRSAPublicExponentParam(k.E)) + + return jwk +} + +// MarshalJSON serializes this Public Key using the JWK JSON serialization format for +// RSA keys. +func (k *rsaPublicKey) MarshalJSON() (data []byte, err error) { + return json.Marshal(k.toMap()) +} + +// PEMBlock serializes this Public Key to DER-encoded PKIX format. +func (k *rsaPublicKey) PEMBlock() (*pem.Block, error) { + derBytes, err := x509.MarshalPKIXPublicKey(k.PublicKey) + if err != nil { + return nil, fmt.Errorf("unable to serialize RSA PublicKey to DER-encoded PKIX format: %s", err) + } + k.extended["kid"] = k.KeyID() // For display purposes. + return createPemBlock("PUBLIC KEY", derBytes, k.extended) +} + +func (k *rsaPublicKey) AddExtendedField(field string, value interface{}) { + k.extended[field] = value +} + +func (k *rsaPublicKey) GetExtendedField(field string) interface{} { + v, ok := k.extended[field] + if !ok { + return nil + } + return v +} + +func rsaPublicKeyFromMap(jwk map[string]interface{}) (*rsaPublicKey, error) { + // JWK key type (kty) has already been determined to be "RSA". + // Need to extract 'n', 'e', and 'kid' and check for + // consistency. + + // Get the modulus parameter N. + nB64Url, err := stringFromMap(jwk, "n") + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key modulus: %s", err) + } + + n, err := parseRSAModulusParam(nB64Url) + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key modulus: %s", err) + } + + // Get the public exponent E. + eB64Url, err := stringFromMap(jwk, "e") + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key exponent: %s", err) + } + + e, err := parseRSAPublicExponentParam(eB64Url) + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key exponent: %s", err) + } + + key := &rsaPublicKey{ + PublicKey: &rsa.PublicKey{N: n, E: e}, + } + + // Key ID is optional, but if it exists, it should match the key. + _, ok := jwk["kid"] + if ok { + kid, err := stringFromMap(jwk, "kid") + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key ID: %s", err) + } + if kid != key.KeyID() { + return nil, fmt.Errorf("JWK RSA Public Key ID does not match: %s", kid) + } + } + + if _, ok := jwk["d"]; ok { + return nil, fmt.Errorf("JWK RSA Public Key cannot contain private exponent") + } + + key.extended = jwk + + return key, nil +} + +/* + * RSA DSA PRIVATE KEY + */ + +// rsaPrivateKey implements a JWK Private Key using RSA digital signature algorithms. +type rsaPrivateKey struct { + rsaPublicKey + *rsa.PrivateKey +} + +func fromRSAPrivateKey(cryptoPrivateKey *rsa.PrivateKey) *rsaPrivateKey { + return &rsaPrivateKey{ + *fromRSAPublicKey(&cryptoPrivateKey.PublicKey), + cryptoPrivateKey, + } +} + +// PublicKey returns the Public Key data associated with this Private Key. +func (k *rsaPrivateKey) PublicKey() PublicKey { + return &k.rsaPublicKey +} + +func (k *rsaPrivateKey) String() string { + return fmt.Sprintf("RSA Private Key <%s>", k.KeyID()) +} + +// Sign signs the data read from the io.Reader using a signature algorithm supported +// by the RSA private key. If the specified hashing algorithm is supported by +// this key, that hash function is used to generate the signature otherwise the +// the default hashing algorithm for this key is used. Returns the signature +// and the name of the JWK signature algorithm used, e.g., "RS256", "RS384", +// "RS512". +func (k *rsaPrivateKey) Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) { + // Generate a signature of the data using the internal alg. + sigAlg := rsaPKCS1v15SignatureAlgorithmForHashID(hashID) + hasher := sigAlg.HashID().New() + + _, err = io.Copy(hasher, data) + if err != nil { + return nil, "", fmt.Errorf("error reading data to sign: %s", err) + } + hash := hasher.Sum(nil) + + signature, err = rsa.SignPKCS1v15(rand.Reader, k.PrivateKey, sigAlg.HashID(), hash) + if err != nil { + return nil, "", fmt.Errorf("error producing signature: %s", err) + } + + alg = sigAlg.HeaderParam() + + return +} + +// CryptoPrivateKey returns the internal object which can be used as a +// crypto.PublicKey for use with other standard library operations. The type +// is either *rsa.PublicKey or *ecdsa.PublicKey +func (k *rsaPrivateKey) CryptoPrivateKey() crypto.PrivateKey { + return k.PrivateKey +} + +func (k *rsaPrivateKey) toMap() map[string]interface{} { + k.Precompute() // Make sure the precomputed values are stored. + jwk := k.rsaPublicKey.toMap() + + jwk["d"] = joseBase64UrlEncode(k.D.Bytes()) + jwk["p"] = joseBase64UrlEncode(k.Primes[0].Bytes()) + jwk["q"] = joseBase64UrlEncode(k.Primes[1].Bytes()) + jwk["dp"] = joseBase64UrlEncode(k.Precomputed.Dp.Bytes()) + jwk["dq"] = joseBase64UrlEncode(k.Precomputed.Dq.Bytes()) + jwk["qi"] = joseBase64UrlEncode(k.Precomputed.Qinv.Bytes()) + + otherPrimes := k.Primes[2:] + + if len(otherPrimes) > 0 { + otherPrimesInfo := make([]interface{}, len(otherPrimes)) + for i, r := range otherPrimes { + otherPrimeInfo := make(map[string]string, 3) + otherPrimeInfo["r"] = joseBase64UrlEncode(r.Bytes()) + crtVal := k.Precomputed.CRTValues[i] + otherPrimeInfo["d"] = joseBase64UrlEncode(crtVal.Exp.Bytes()) + otherPrimeInfo["t"] = joseBase64UrlEncode(crtVal.Coeff.Bytes()) + otherPrimesInfo[i] = otherPrimeInfo + } + jwk["oth"] = otherPrimesInfo + } + + return jwk +} + +// MarshalJSON serializes this Private Key using the JWK JSON serialization format for +// RSA keys. +func (k *rsaPrivateKey) MarshalJSON() (data []byte, err error) { + return json.Marshal(k.toMap()) +} + +// PEMBlock serializes this Private Key to DER-encoded PKIX format. +func (k *rsaPrivateKey) PEMBlock() (*pem.Block, error) { + derBytes := x509.MarshalPKCS1PrivateKey(k.PrivateKey) + k.extended["keyID"] = k.KeyID() // For display purposes. + return createPemBlock("RSA PRIVATE KEY", derBytes, k.extended) +} + +func rsaPrivateKeyFromMap(jwk map[string]interface{}) (*rsaPrivateKey, error) { + // The JWA spec for RSA Private Keys (draft rfc section 5.3.2) states that + // only the private key exponent 'd' is REQUIRED, the others are just for + // signature/decryption optimizations and SHOULD be included when the JWK + // is produced. We MAY choose to accept a JWK which only includes 'd', but + // we're going to go ahead and not choose to accept it without the extra + // fields. Only the 'oth' field will be optional (for multi-prime keys). + privateExponent, err := parseRSAPrivateKeyParamFromMap(jwk, "d") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key exponent: %s", err) + } + firstPrimeFactor, err := parseRSAPrivateKeyParamFromMap(jwk, "p") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err) + } + secondPrimeFactor, err := parseRSAPrivateKeyParamFromMap(jwk, "q") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err) + } + firstFactorCRT, err := parseRSAPrivateKeyParamFromMap(jwk, "dp") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err) + } + secondFactorCRT, err := parseRSAPrivateKeyParamFromMap(jwk, "dq") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err) + } + crtCoeff, err := parseRSAPrivateKeyParamFromMap(jwk, "qi") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT coefficient: %s", err) + } + + var oth interface{} + if _, ok := jwk["oth"]; ok { + oth = jwk["oth"] + delete(jwk, "oth") + } + + // JWK key type (kty) has already been determined to be "RSA". + // Need to extract the public key information, then extract the private + // key values. + publicKey, err := rsaPublicKeyFromMap(jwk) + if err != nil { + return nil, err + } + + privateKey := &rsa.PrivateKey{ + PublicKey: *publicKey.PublicKey, + D: privateExponent, + Primes: []*big.Int{firstPrimeFactor, secondPrimeFactor}, + Precomputed: rsa.PrecomputedValues{ + Dp: firstFactorCRT, + Dq: secondFactorCRT, + Qinv: crtCoeff, + }, + } + + if oth != nil { + // Should be an array of more JSON objects. + otherPrimesInfo, ok := oth.([]interface{}) + if !ok { + return nil, errors.New("JWK RSA Private Key: Invalid other primes info: must be an array") + } + numOtherPrimeFactors := len(otherPrimesInfo) + if numOtherPrimeFactors == 0 { + return nil, errors.New("JWK RSA Privake Key: Invalid other primes info: must be absent or non-empty") + } + otherPrimeFactors := make([]*big.Int, numOtherPrimeFactors) + productOfPrimes := new(big.Int).Mul(firstPrimeFactor, secondPrimeFactor) + crtValues := make([]rsa.CRTValue, numOtherPrimeFactors) + + for i, val := range otherPrimesInfo { + otherPrimeinfo, ok := val.(map[string]interface{}) + if !ok { + return nil, errors.New("JWK RSA Private Key: Invalid other prime info: must be a JSON object") + } + + otherPrimeFactor, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "r") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err) + } + otherFactorCRT, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "d") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err) + } + otherCrtCoeff, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "t") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT coefficient: %s", err) + } + + crtValue := crtValues[i] + crtValue.Exp = otherFactorCRT + crtValue.Coeff = otherCrtCoeff + crtValue.R = productOfPrimes + otherPrimeFactors[i] = otherPrimeFactor + productOfPrimes = new(big.Int).Mul(productOfPrimes, otherPrimeFactor) + } + + privateKey.Primes = append(privateKey.Primes, otherPrimeFactors...) + privateKey.Precomputed.CRTValues = crtValues + } + + key := &rsaPrivateKey{ + rsaPublicKey: *publicKey, + PrivateKey: privateKey, + } + + return key, nil +} + +/* + * Key Generation Functions. + */ + +func generateRSAPrivateKey(bits int) (k *rsaPrivateKey, err error) { + k = new(rsaPrivateKey) + k.PrivateKey, err = rsa.GenerateKey(rand.Reader, bits) + if err != nil { + return nil, err + } + + k.rsaPublicKey.PublicKey = &k.PrivateKey.PublicKey + k.extended = make(map[string]interface{}) + + return +} + +// GenerateRSA2048PrivateKey generates a key pair using 2048-bit RSA. +func GenerateRSA2048PrivateKey() (PrivateKey, error) { + k, err := generateRSAPrivateKey(2048) + if err != nil { + return nil, fmt.Errorf("error generating RSA 2048-bit key: %s", err) + } + + return k, nil +} + +// GenerateRSA3072PrivateKey generates a key pair using 3072-bit RSA. +func GenerateRSA3072PrivateKey() (PrivateKey, error) { + k, err := generateRSAPrivateKey(3072) + if err != nil { + return nil, fmt.Errorf("error generating RSA 3072-bit key: %s", err) + } + + return k, nil +} + +// GenerateRSA4096PrivateKey generates a key pair using 4096-bit RSA. +func GenerateRSA4096PrivateKey() (PrivateKey, error) { + k, err := generateRSAPrivateKey(4096) + if err != nil { + return nil, fmt.Errorf("error generating RSA 4096-bit key: %s", err) + } + + return k, nil +} diff --git a/vendor/github.com/containers/libtrust/util.go b/vendor/github.com/containers/libtrust/util.go new file mode 100644 index 00000000000..a5a101d3f11 --- /dev/null +++ b/vendor/github.com/containers/libtrust/util.go @@ -0,0 +1,363 @@ +package libtrust + +import ( + "bytes" + "crypto" + "crypto/elliptic" + "crypto/tls" + "crypto/x509" + "encoding/base32" + "encoding/base64" + "encoding/binary" + "encoding/pem" + "errors" + "fmt" + "math/big" + "net/url" + "os" + "path/filepath" + "strings" + "time" +) + +// LoadOrCreateTrustKey will load a PrivateKey from the specified path +func LoadOrCreateTrustKey(trustKeyPath string) (PrivateKey, error) { + if err := os.MkdirAll(filepath.Dir(trustKeyPath), 0700); err != nil { + return nil, err + } + + trustKey, err := LoadKeyFile(trustKeyPath) + if err == ErrKeyFileDoesNotExist { + trustKey, err = GenerateECP256PrivateKey() + if err != nil { + return nil, fmt.Errorf("error generating key: %s", err) + } + + if err := SaveKey(trustKeyPath, trustKey); err != nil { + return nil, fmt.Errorf("error saving key file: %s", err) + } + + dir, file := filepath.Split(trustKeyPath) + if err := SavePublicKey(filepath.Join(dir, "public-"+file), trustKey.PublicKey()); err != nil { + return nil, fmt.Errorf("error saving public key file: %s", err) + } + } else if err != nil { + return nil, fmt.Errorf("error loading key file: %s", err) + } + return trustKey, nil +} + +// NewIdentityAuthTLSClientConfig returns a tls.Config configured to use identity +// based authentication from the specified dockerUrl, the rootConfigPath and +// the server name to which it is connecting. +// If trustUnknownHosts is true it will automatically add the host to the +// known-hosts.json in rootConfigPath. +func NewIdentityAuthTLSClientConfig(dockerUrl string, trustUnknownHosts bool, rootConfigPath string, serverName string) (*tls.Config, error) { + tlsConfig := newTLSConfig() + + trustKeyPath := filepath.Join(rootConfigPath, "key.json") + knownHostsPath := filepath.Join(rootConfigPath, "known-hosts.json") + + u, err := url.Parse(dockerUrl) + if err != nil { + return nil, fmt.Errorf("unable to parse machine url") + } + + if u.Scheme == "unix" { + return nil, nil + } + + addr := u.Host + proto := "tcp" + + trustKey, err := LoadOrCreateTrustKey(trustKeyPath) + if err != nil { + return nil, fmt.Errorf("unable to load trust key: %s", err) + } + + knownHosts, err := LoadKeySetFile(knownHostsPath) + if err != nil { + return nil, fmt.Errorf("could not load trusted hosts file: %s", err) + } + + allowedHosts, err := FilterByHosts(knownHosts, addr, false) + if err != nil { + return nil, fmt.Errorf("error filtering hosts: %s", err) + } + + certPool, err := GenerateCACertPool(trustKey, allowedHosts) + if err != nil { + return nil, fmt.Errorf("Could not create CA pool: %s", err) + } + + tlsConfig.ServerName = serverName + tlsConfig.RootCAs = certPool + + x509Cert, err := GenerateSelfSignedClientCert(trustKey) + if err != nil { + return nil, fmt.Errorf("certificate generation error: %s", err) + } + + tlsConfig.Certificates = []tls.Certificate{{ + Certificate: [][]byte{x509Cert.Raw}, + PrivateKey: trustKey.CryptoPrivateKey(), + Leaf: x509Cert, + }} + + tlsConfig.InsecureSkipVerify = true + + testConn, err := tls.Dial(proto, addr, tlsConfig) + if err != nil { + return nil, fmt.Errorf("tls Handshake error: %s", err) + } + + opts := x509.VerifyOptions{ + Roots: tlsConfig.RootCAs, + CurrentTime: time.Now(), + DNSName: tlsConfig.ServerName, + Intermediates: x509.NewCertPool(), + } + + certs := testConn.ConnectionState().PeerCertificates + for i, cert := range certs { + if i == 0 { + continue + } + opts.Intermediates.AddCert(cert) + } + + if _, err := certs[0].Verify(opts); err != nil { + if _, ok := err.(x509.UnknownAuthorityError); ok { + if trustUnknownHosts { + pubKey, err := FromCryptoPublicKey(certs[0].PublicKey) + if err != nil { + return nil, fmt.Errorf("error extracting public key from cert: %s", err) + } + + pubKey.AddExtendedField("hosts", []string{addr}) + + if err := AddKeySetFile(knownHostsPath, pubKey); err != nil { + return nil, fmt.Errorf("error adding machine to known hosts: %s", err) + } + } else { + return nil, fmt.Errorf("unable to connect. unknown host: %s", addr) + } + } + } + + testConn.Close() + tlsConfig.InsecureSkipVerify = false + + return tlsConfig, nil +} + +// joseBase64UrlEncode encodes the given data using the standard base64 url +// encoding format but with all trailing '=' characters omitted in accordance +// with the jose specification. +// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 +func joseBase64UrlEncode(b []byte) string { + return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=") +} + +// joseBase64UrlDecode decodes the given string using the standard base64 url +// decoder but first adds the appropriate number of trailing '=' characters in +// accordance with the jose specification. +// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 +func joseBase64UrlDecode(s string) ([]byte, error) { + s = strings.Replace(s, "\n", "", -1) + s = strings.Replace(s, " ", "", -1) + switch len(s) % 4 { + case 0: + case 2: + s += "==" + case 3: + s += "=" + default: + return nil, errors.New("illegal base64url string") + } + return base64.URLEncoding.DecodeString(s) +} + +func keyIDEncode(b []byte) string { + s := strings.TrimRight(base32.StdEncoding.EncodeToString(b), "=") + var buf bytes.Buffer + var i int + for i = 0; i < len(s)/4-1; i++ { + start := i * 4 + end := start + 4 + buf.WriteString(s[start:end] + ":") + } + buf.WriteString(s[i*4:]) + return buf.String() +} + +func keyIDFromCryptoKey(pubKey PublicKey) string { + // Generate and return a 'libtrust' fingerprint of the public key. + // For an RSA key this should be: + // SHA256(DER encoded ASN1) + // Then truncated to 240 bits and encoded into 12 base32 groups like so: + // ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP + derBytes, err := x509.MarshalPKIXPublicKey(pubKey.CryptoPublicKey()) + if err != nil { + return "" + } + hasher := crypto.SHA256.New() + hasher.Write(derBytes) + return keyIDEncode(hasher.Sum(nil)[:30]) +} + +func stringFromMap(m map[string]interface{}, key string) (string, error) { + val, ok := m[key] + if !ok { + return "", fmt.Errorf("%q value not specified", key) + } + + str, ok := val.(string) + if !ok { + return "", fmt.Errorf("%q value must be a string", key) + } + delete(m, key) + + return str, nil +} + +func parseECCoordinate(cB64Url string, curve elliptic.Curve) (*big.Int, error) { + curveByteLen := (curve.Params().BitSize + 7) >> 3 + + cBytes, err := joseBase64UrlDecode(cB64Url) + if err != nil { + return nil, fmt.Errorf("invalid base64 URL encoding: %s", err) + } + cByteLength := len(cBytes) + if cByteLength != curveByteLen { + return nil, fmt.Errorf("invalid number of octets: got %d, should be %d", cByteLength, curveByteLen) + } + return new(big.Int).SetBytes(cBytes), nil +} + +func parseECPrivateParam(dB64Url string, curve elliptic.Curve) (*big.Int, error) { + dBytes, err := joseBase64UrlDecode(dB64Url) + if err != nil { + return nil, fmt.Errorf("invalid base64 URL encoding: %s", err) + } + + // The length of this octet string MUST be ceiling(log-base-2(n)/8) + // octets (where n is the order of the curve). This is because the private + // key d must be in the interval [1, n-1] so the bitlength of d should be + // no larger than the bitlength of n-1. The easiest way to find the octet + // length is to take bitlength(n-1), add 7 to force a carry, and shift this + // bit sequence right by 3, which is essentially dividing by 8 and adding + // 1 if there is any remainder. Thus, the private key value d should be + // output to (bitlength(n-1)+7)>>3 octets. + n := curve.Params().N + octetLength := (new(big.Int).Sub(n, big.NewInt(1)).BitLen() + 7) >> 3 + dByteLength := len(dBytes) + + if dByteLength != octetLength { + return nil, fmt.Errorf("invalid number of octets: got %d, should be %d", dByteLength, octetLength) + } + + return new(big.Int).SetBytes(dBytes), nil +} + +func parseRSAModulusParam(nB64Url string) (*big.Int, error) { + nBytes, err := joseBase64UrlDecode(nB64Url) + if err != nil { + return nil, fmt.Errorf("invalid base64 URL encoding: %s", err) + } + + return new(big.Int).SetBytes(nBytes), nil +} + +func serializeRSAPublicExponentParam(e int) []byte { + // We MUST use the minimum number of octets to represent E. + // E is supposed to be 65537 for performance and security reasons + // and is what golang's rsa package generates, but it might be + // different if imported from some other generator. + buf := make([]byte, 4) + binary.BigEndian.PutUint32(buf, uint32(e)) + var i int + for i = 0; i < 8; i++ { + if buf[i] != 0 { + break + } + } + return buf[i:] +} + +func parseRSAPublicExponentParam(eB64Url string) (int, error) { + eBytes, err := joseBase64UrlDecode(eB64Url) + if err != nil { + return 0, fmt.Errorf("invalid base64 URL encoding: %s", err) + } + // Only the minimum number of bytes were used to represent E, but + // binary.BigEndian.Uint32 expects at least 4 bytes, so we need + // to add zero padding if necassary. + byteLen := len(eBytes) + buf := make([]byte, 4-byteLen, 4) + eBytes = append(buf, eBytes...) + + return int(binary.BigEndian.Uint32(eBytes)), nil +} + +func parseRSAPrivateKeyParamFromMap(m map[string]interface{}, key string) (*big.Int, error) { + b64Url, err := stringFromMap(m, key) + if err != nil { + return nil, err + } + + paramBytes, err := joseBase64UrlDecode(b64Url) + if err != nil { + return nil, fmt.Errorf("invaled base64 URL encoding: %s", err) + } + + return new(big.Int).SetBytes(paramBytes), nil +} + +func createPemBlock(name string, derBytes []byte, headers map[string]interface{}) (*pem.Block, error) { + pemBlock := &pem.Block{Type: name, Bytes: derBytes, Headers: map[string]string{}} + for k, v := range headers { + switch val := v.(type) { + case string: + pemBlock.Headers[k] = val + case []string: + if k == "hosts" { + pemBlock.Headers[k] = strings.Join(val, ",") + } else { + // Return error, non-encodable type + } + default: + // Return error, non-encodable type + } + } + + return pemBlock, nil +} + +func pubKeyFromPEMBlock(pemBlock *pem.Block) (PublicKey, error) { + cryptoPublicKey, err := x509.ParsePKIXPublicKey(pemBlock.Bytes) + if err != nil { + return nil, fmt.Errorf("unable to decode Public Key PEM data: %s", err) + } + + pubKey, err := FromCryptoPublicKey(cryptoPublicKey) + if err != nil { + return nil, err + } + + addPEMHeadersToKey(pemBlock, pubKey) + + return pubKey, nil +} + +func addPEMHeadersToKey(pemBlock *pem.Block, pubKey PublicKey) { + for key, value := range pemBlock.Headers { + var safeVal interface{} + if key == "hosts" { + safeVal = strings.Split(value, ",") + } else { + safeVal = value + } + pubKey.AddExtendedField(key, safeVal) + } +} diff --git a/vendor/github.com/containers/ocicrypt/.travis.yml b/vendor/github.com/containers/ocicrypt/.travis.yml new file mode 100644 index 00000000000..e4dd4a4021c --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/.travis.yml @@ -0,0 +1,29 @@ +dist: bionic +language: go + +os: +- linux + +go: + - "1.13.x" + - "1.16.x" + +matrix: + include: + - os: linux + +addons: + apt: + packages: + - gnutls-bin + - softhsm2 + +go_import_path: github.com/containers/ocicrypt + +install: + - curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.30.0 + +script: + - make + - make check + - make test diff --git a/vendor/github.com/containers/ocicrypt/ADOPTERS.md b/vendor/github.com/containers/ocicrypt/ADOPTERS.md new file mode 100644 index 00000000000..fa4b03bb881 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/ADOPTERS.md @@ -0,0 +1,10 @@ +Below are list of adopters of the `ocicrypt` library or supports use of OCI encrypted images: +- [skopeo](https://github.com/containers/skopeo) +- [buildah](https://github.com/containers/buildah) +- [containerd](https://github.com/containerd/imgcrypt) +- [nerdctl](https://github.com/containerd/nerdctl) +- [distribution](https://github.com/distribution/distribution) + +Below are the list of projects that are in the process of adopting support: +- [quay](https://github.com/quay/quay) +- [kata-containers](https://github.com/kata-containers/kata-containers) diff --git a/vendor/github.com/containers/ocicrypt/CODE-OF-CONDUCT.md b/vendor/github.com/containers/ocicrypt/CODE-OF-CONDUCT.md new file mode 100644 index 00000000000..5131b5a371e --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/CODE-OF-CONDUCT.md @@ -0,0 +1,3 @@ +## The OCIcrypt Library Project Community Code of Conduct + +The OCIcrypt Library project follows the [Containers Community Code of Conduct](https://github.com/containers/common/blob/master/CODE-OF-CONDUCT.md). diff --git a/vendor/github.com/containers/ocicrypt/LICENSE b/vendor/github.com/containers/ocicrypt/LICENSE new file mode 100644 index 00000000000..95356353060 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/LICENSE @@ -0,0 +1,189 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containers/ocicrypt/MAINTAINERS b/vendor/github.com/containers/ocicrypt/MAINTAINERS new file mode 100644 index 00000000000..af38d03bffb --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/MAINTAINERS @@ -0,0 +1,6 @@ +# ocicrypt maintainers +# +# Github ID, Name, Email Address +lumjjb, Brandon Lum, lumjjb@gmail.com +stefanberger, Stefan Berger, stefanb@linux.ibm.com +arronwy, Arron Wang, arron.wang@intel.com diff --git a/vendor/github.com/containers/ocicrypt/Makefile b/vendor/github.com/containers/ocicrypt/Makefile new file mode 100644 index 00000000000..dc9d9853754 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/Makefile @@ -0,0 +1,34 @@ +# Copyright The containerd Authors. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +.PHONY: check build decoder generate-protobuf + +all: build + +FORCE: + +check: + golangci-lint run + +build: vendor + go build ./... + +vendor: + go mod tidy + +test: + go test ./... -test.v + +generate-protobuf: + protoc -I utils/keyprovider/ utils/keyprovider/keyprovider.proto --go_out=plugins=grpc:utils/keyprovider diff --git a/vendor/github.com/containers/ocicrypt/README.md b/vendor/github.com/containers/ocicrypt/README.md new file mode 100644 index 00000000000..b69d14e3b81 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/README.md @@ -0,0 +1,50 @@ +# OCIcrypt Library + +The `ocicrypt` library is the OCI image spec implementation of container image encryption. More details of the spec can be seen in the [OCI repository](https://github.com/opencontainers/image-spec/pull/775). The purpose of this library is to encode spec structures and consts in code, as well as provide a consistent implementation of image encryption across container runtimes and build tools. + +Consumers of OCIcrypt: + +- [containerd/imgcrypt](https://github.com/containerd/imgcrypt) +- [cri-o](https://github.com/cri-o/cri-o) +- [skopeo](https://github.com/containers/skopeo) + + +## Usage + +There are various levels of usage for this library. The main consumers of these would be runtime/build tools, and a more specific use would be in the ability to extend cryptographic function. + +### Runtime/Build tool usage + +The general exposed interface a runtime/build tool would use, would be to perform encryption or decryption of layers: + +``` +package "github.com/containers/ocicrypt" +func EncryptLayer(ec *config.EncryptConfig, encOrPlainLayerReader io.Reader, desc ocispec.Descriptor) (io.Reader, EncryptLayerFinalizer, error) +func DecryptLayer(dc *config.DecryptConfig, encLayerReader io.Reader, desc ocispec.Descriptor, unwrapOnly bool) (io.Reader, digest.Digest, error) +``` + +The settings/parameters to these functions can be specified via creation of an encryption config with the `github.com/containers/ocicrypt/config` package. We note that because setting of annotations and other fields of the layer descriptor is done through various means in different runtimes/build tools, it is the responsibility of the caller to still ensure that the layer descriptor follows the OCI specification (i.e. encoding, setting annotations, etc.). + + +### Crypto Agility and Extensibility + +The implementation for both symmetric and asymmetric encryption used in this library are behind 2 main interfaces, which users can extend if need be. These are in the following packages: +- github.com/containers/ocicrypt/blockcipher - LayerBlockCipher interface for block ciphers +- github.com/containers/ocicrypt/keywrap - KeyWrapper interface for key wrapping + +We note that adding interfaces here is risky outside the OCI spec is not recommended, unless for very specialized and confined usecases. Please open an issue or PR if there is a general usecase that could be added to the OCI spec. + + +#### Keyprovider interface + +As part of the keywrap interface, there is a [keyprovider](https://github.com/containers/ocicrypt/blob/main/docs/keyprovider.md) implementation that allows one to call out to a binary or service. + + +## Security Issues + +We consider security issues related to this library critical. Please report and security related issues by emailing maintainers in the [MAINTAINERS](MAINTAINERS) file. + + +## Ocicrypt Pkcs11 Support + +Ocicrypt Pkcs11 support is currently experiemental. For more details, please refer to the [this document](docs/pkcs11.md). diff --git a/vendor/github.com/containers/ocicrypt/SECURITY.md b/vendor/github.com/containers/ocicrypt/SECURITY.md new file mode 100644 index 00000000000..30124c89fd7 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/SECURITY.md @@ -0,0 +1,3 @@ +## Security and Disclosure Information Policy for the OCIcrypt Library Project + +The OCIcrypt Library Project follows the [Security and Disclosure Information Policy](https://github.com/containers/common/blob/master/SECURITY.md) for the Containers Projects. diff --git a/vendor/github.com/containers/ocicrypt/blockcipher/blockcipher.go b/vendor/github.com/containers/ocicrypt/blockcipher/blockcipher.go new file mode 100644 index 00000000000..da403d95dad --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/blockcipher/blockcipher.go @@ -0,0 +1,160 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package blockcipher + +import ( + "io" + + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +// LayerCipherType is the ciphertype as specified in the layer metadata +type LayerCipherType string + +// TODO: Should be obtained from OCI spec once included +const ( + AES256CTR LayerCipherType = "AES_256_CTR_HMAC_SHA256" +) + +// PrivateLayerBlockCipherOptions includes the information required to encrypt/decrypt +// an image which are sensitive and should not be in plaintext +type PrivateLayerBlockCipherOptions struct { + // SymmetricKey represents the symmetric key used for encryption/decryption + // This field should be populated by Encrypt/Decrypt calls + SymmetricKey []byte `json:"symkey"` + + // Digest is the digest of the original data for verification. + // This is NOT populated by Encrypt/Decrypt calls + Digest digest.Digest `json:"digest"` + + // CipherOptions contains the cipher metadata used for encryption/decryption + // This field should be populated by Encrypt/Decrypt calls + CipherOptions map[string][]byte `json:"cipheroptions"` +} + +// PublicLayerBlockCipherOptions includes the information required to encrypt/decrypt +// an image which are public and can be deduplicated in plaintext across multiple +// recipients +type PublicLayerBlockCipherOptions struct { + // CipherType denotes the cipher type according to the list of OCI suppported + // cipher types. + CipherType LayerCipherType `json:"cipher"` + + // Hmac contains the hmac string to help verify encryption + Hmac []byte `json:"hmac"` + + // CipherOptions contains the cipher metadata used for encryption/decryption + // This field should be populated by Encrypt/Decrypt calls + CipherOptions map[string][]byte `json:"cipheroptions"` +} + +// LayerBlockCipherOptions contains the public and private LayerBlockCipherOptions +// required to encrypt/decrypt an image +type LayerBlockCipherOptions struct { + Public PublicLayerBlockCipherOptions + Private PrivateLayerBlockCipherOptions +} + +// LayerBlockCipher returns a provider for encrypt/decrypt functionality +// for handling the layer data for a specific algorithm +type LayerBlockCipher interface { + // GenerateKey creates a symmetric key + GenerateKey() ([]byte, error) + // Encrypt takes in layer data and returns the ciphertext and relevant LayerBlockCipherOptions + Encrypt(layerDataReader io.Reader, opt LayerBlockCipherOptions) (io.Reader, Finalizer, error) + // Decrypt takes in layer ciphertext data and returns the plaintext and relevant LayerBlockCipherOptions + Decrypt(layerDataReader io.Reader, opt LayerBlockCipherOptions) (io.Reader, LayerBlockCipherOptions, error) +} + +// LayerBlockCipherHandler is the handler for encrypt/decrypt for layers +type LayerBlockCipherHandler struct { + cipherMap map[LayerCipherType]LayerBlockCipher +} + +// Finalizer is called after data blobs are written, and returns the LayerBlockCipherOptions for the encrypted blob +type Finalizer func() (LayerBlockCipherOptions, error) + +// GetOpt returns the value of the cipher option and if the option exists +func (lbco LayerBlockCipherOptions) GetOpt(key string) (value []byte, ok bool) { + if v, ok := lbco.Public.CipherOptions[key]; ok { + return v, ok + } else if v, ok := lbco.Private.CipherOptions[key]; ok { + return v, ok + } else { + return nil, false + } +} + +func wrapFinalizerWithType(fin Finalizer, typ LayerCipherType) Finalizer { + return func() (LayerBlockCipherOptions, error) { + lbco, err := fin() + if err != nil { + return LayerBlockCipherOptions{}, err + } + lbco.Public.CipherType = typ + return lbco, err + } +} + +// Encrypt is the handler for the layer decryption routine +func (h *LayerBlockCipherHandler) Encrypt(plainDataReader io.Reader, typ LayerCipherType) (io.Reader, Finalizer, error) { + if c, ok := h.cipherMap[typ]; ok { + sk, err := c.GenerateKey() + if err != nil { + return nil, nil, err + } + opt := LayerBlockCipherOptions{ + Private: PrivateLayerBlockCipherOptions{ + SymmetricKey: sk, + }, + } + encDataReader, fin, err := c.Encrypt(plainDataReader, opt) + if err == nil { + fin = wrapFinalizerWithType(fin, typ) + } + return encDataReader, fin, err + } + return nil, nil, errors.Errorf("unsupported cipher type: %s", typ) +} + +// Decrypt is the handler for the layer decryption routine +func (h *LayerBlockCipherHandler) Decrypt(encDataReader io.Reader, opt LayerBlockCipherOptions) (io.Reader, LayerBlockCipherOptions, error) { + typ := opt.Public.CipherType + if typ == "" { + return nil, LayerBlockCipherOptions{}, errors.New("no cipher type provided") + } + if c, ok := h.cipherMap[LayerCipherType(typ)]; ok { + return c.Decrypt(encDataReader, opt) + } + return nil, LayerBlockCipherOptions{}, errors.Errorf("unsupported cipher type: %s", typ) +} + +// NewLayerBlockCipherHandler returns a new default handler +func NewLayerBlockCipherHandler() (*LayerBlockCipherHandler, error) { + h := LayerBlockCipherHandler{ + cipherMap: map[LayerCipherType]LayerBlockCipher{}, + } + + var err error + h.cipherMap[AES256CTR], err = NewAESCTRLayerBlockCipher(256) + if err != nil { + return nil, errors.Wrap(err, "unable to set up Cipher AES-256-CTR") + } + + return &h, nil +} diff --git a/vendor/github.com/containers/ocicrypt/blockcipher/blockcipher_aes_ctr.go b/vendor/github.com/containers/ocicrypt/blockcipher/blockcipher_aes_ctr.go new file mode 100644 index 00000000000..095a53e354d --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/blockcipher/blockcipher_aes_ctr.go @@ -0,0 +1,193 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package blockcipher + +import ( + "crypto/aes" + "crypto/cipher" + "crypto/hmac" + "crypto/rand" + "crypto/sha256" + "fmt" + "hash" + "io" + + "github.com/containers/ocicrypt/utils" + "github.com/pkg/errors" +) + +// AESCTRLayerBlockCipher implements the AES CTR stream cipher +type AESCTRLayerBlockCipher struct { + keylen int // in bytes + reader io.Reader + encrypt bool + stream cipher.Stream + err error + hmac hash.Hash + expHmac []byte + doneEncrypting bool +} + +type aesctrcryptor struct { + bc *AESCTRLayerBlockCipher +} + +// NewAESCTRLayerBlockCipher returns a new AES SIV block cipher of 256 or 512 bits +func NewAESCTRLayerBlockCipher(bits int) (LayerBlockCipher, error) { + if bits != 256 { + return nil, errors.New("AES CTR bit count not supported") + } + return &AESCTRLayerBlockCipher{keylen: bits / 8}, nil +} + +func (r *aesctrcryptor) Read(p []byte) (int, error) { + var ( + o int + ) + + if r.bc.err != nil { + return 0, r.bc.err + } + + o, err := utils.FillBuffer(r.bc.reader, p) + if err != nil { + if err == io.EOF { + r.bc.err = err + } else { + return 0, err + } + } + + if !r.bc.encrypt { + if _, err := r.bc.hmac.Write(p[:o]); err != nil { + r.bc.err = errors.Wrapf(err, "could not write to hmac") + return 0, r.bc.err + } + + if r.bc.err == io.EOF { + // Before we return EOF we let the HMAC comparison + // provide a verdict + if !hmac.Equal(r.bc.hmac.Sum(nil), r.bc.expHmac) { + r.bc.err = fmt.Errorf("could not properly decrypt byte stream; exp hmac: '%x', actual hmac: '%s'", r.bc.expHmac, r.bc.hmac.Sum(nil)) + return 0, r.bc.err + } + } + } + + r.bc.stream.XORKeyStream(p[:o], p[:o]) + + if r.bc.encrypt { + if _, err := r.bc.hmac.Write(p[:o]); err != nil { + r.bc.err = errors.Wrapf(err, "could not write to hmac") + return 0, r.bc.err + } + + if r.bc.err == io.EOF { + // Final data encrypted; Do the 'then-MAC' part + r.bc.doneEncrypting = true + } + } + + return o, r.bc.err +} + +// init initializes an instance +func (bc *AESCTRLayerBlockCipher) init(encrypt bool, reader io.Reader, opts LayerBlockCipherOptions) (LayerBlockCipherOptions, error) { + var ( + err error + ) + + key := opts.Private.SymmetricKey + if len(key) != bc.keylen { + return LayerBlockCipherOptions{}, fmt.Errorf("invalid key length of %d bytes; need %d bytes", len(key), bc.keylen) + } + + nonce, ok := opts.GetOpt("nonce") + if !ok { + nonce = make([]byte, aes.BlockSize) + if _, err := io.ReadFull(rand.Reader, nonce); err != nil { + return LayerBlockCipherOptions{}, errors.Wrap(err, "unable to generate random nonce") + } + } + + block, err := aes.NewCipher(key) + if err != nil { + return LayerBlockCipherOptions{}, errors.Wrap(err, "aes.NewCipher failed") + } + + bc.reader = reader + bc.encrypt = encrypt + bc.stream = cipher.NewCTR(block, nonce) + bc.err = nil + bc.hmac = hmac.New(sha256.New, key) + bc.expHmac = opts.Public.Hmac + bc.doneEncrypting = false + + if !encrypt && len(bc.expHmac) == 0 { + return LayerBlockCipherOptions{}, errors.New("HMAC is not provided for decryption process") + } + + lbco := LayerBlockCipherOptions{ + Private: PrivateLayerBlockCipherOptions{ + SymmetricKey: key, + CipherOptions: map[string][]byte{ + "nonce": nonce, + }, + }, + } + + return lbco, nil +} + +// GenerateKey creates a synmmetric key +func (bc *AESCTRLayerBlockCipher) GenerateKey() ([]byte, error) { + key := make([]byte, bc.keylen) + if _, err := io.ReadFull(rand.Reader, key); err != nil { + return nil, err + } + return key, nil +} + +// Encrypt takes in layer data and returns the ciphertext and relevant LayerBlockCipherOptions +func (bc *AESCTRLayerBlockCipher) Encrypt(plainDataReader io.Reader, opt LayerBlockCipherOptions) (io.Reader, Finalizer, error) { + lbco, err := bc.init(true, plainDataReader, opt) + if err != nil { + return nil, nil, err + } + + finalizer := func() (LayerBlockCipherOptions, error) { + if !bc.doneEncrypting { + return LayerBlockCipherOptions{}, errors.New("Read()ing not complete, unable to finalize") + } + if lbco.Public.CipherOptions == nil { + lbco.Public.CipherOptions = map[string][]byte{} + } + lbco.Public.Hmac = bc.hmac.Sum(nil) + return lbco, nil + } + return &aesctrcryptor{bc}, finalizer, nil +} + +// Decrypt takes in layer ciphertext data and returns the plaintext and relevant LayerBlockCipherOptions +func (bc *AESCTRLayerBlockCipher) Decrypt(encDataReader io.Reader, opt LayerBlockCipherOptions) (io.Reader, LayerBlockCipherOptions, error) { + lbco, err := bc.init(false, encDataReader, opt) + if err != nil { + return nil, LayerBlockCipherOptions{}, err + } + + return utils.NewDelayedReader(&aesctrcryptor{bc}, 1024*10), lbco, nil +} diff --git a/vendor/github.com/containers/ocicrypt/config/config.go b/vendor/github.com/containers/ocicrypt/config/config.go new file mode 100644 index 00000000000..d960766ebee --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/config/config.go @@ -0,0 +1,114 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package config + +// EncryptConfig is the container image PGP encryption configuration holding +// the identifiers of those that will be able to decrypt the container and +// the PGP public keyring file data that contains their public keys. +type EncryptConfig struct { + // map holding 'gpg-recipients', 'gpg-pubkeyringfile', 'pubkeys', 'x509s' + Parameters map[string][][]byte + + DecryptConfig DecryptConfig +} + +// DecryptConfig wraps the Parameters map that holds the decryption key +type DecryptConfig struct { + // map holding 'privkeys', 'x509s', 'gpg-privatekeys' + Parameters map[string][][]byte +} + +// CryptoConfig is a common wrapper for EncryptConfig and DecrypConfig that can +// be passed through functions that share much code for encryption and decryption +type CryptoConfig struct { + EncryptConfig *EncryptConfig + DecryptConfig *DecryptConfig +} + +// InitDecryption initialized a CryptoConfig object with parameters used for decryption +func InitDecryption(dcparameters map[string][][]byte) CryptoConfig { + return CryptoConfig{ + DecryptConfig: &DecryptConfig{ + Parameters: dcparameters, + }, + } +} + +// InitEncryption initializes a CryptoConfig object with parameters used for encryption +// It also takes dcparameters that may be needed for decryption when adding a recipient +// to an already encrypted image +func InitEncryption(parameters, dcparameters map[string][][]byte) CryptoConfig { + return CryptoConfig{ + EncryptConfig: &EncryptConfig{ + Parameters: parameters, + DecryptConfig: DecryptConfig{ + Parameters: dcparameters, + }, + }, + } +} + +// CombineCryptoConfigs takes a CryptoConfig list and creates a single CryptoConfig +// containing the crypto configuration of all the key bundles +func CombineCryptoConfigs(ccs []CryptoConfig) CryptoConfig { + ecparam := map[string][][]byte{} + ecdcparam := map[string][][]byte{} + dcparam := map[string][][]byte{} + + for _, cc := range ccs { + if ec := cc.EncryptConfig; ec != nil { + addToMap(ecparam, ec.Parameters) + addToMap(ecdcparam, ec.DecryptConfig.Parameters) + } + + if dc := cc.DecryptConfig; dc != nil { + addToMap(dcparam, dc.Parameters) + } + } + + return CryptoConfig{ + EncryptConfig: &EncryptConfig{ + Parameters: ecparam, + DecryptConfig: DecryptConfig{ + Parameters: ecdcparam, + }, + }, + DecryptConfig: &DecryptConfig{ + Parameters: dcparam, + }, + } + +} + +// AttachDecryptConfig adds DecryptConfig to the field of EncryptConfig so that +// the decryption parameters can be used to add recipients to an existing image +// if the user is able to decrypt it. +func (ec *EncryptConfig) AttachDecryptConfig(dc *DecryptConfig) { + if dc != nil { + addToMap(ec.DecryptConfig.Parameters, dc.Parameters) + } +} + +func addToMap(orig map[string][][]byte, add map[string][][]byte) { + for k, v := range add { + if ov, ok := orig[k]; ok { + orig[k] = append(ov, v...) + } else { + orig[k] = v + } + } +} diff --git a/vendor/github.com/containers/ocicrypt/config/constructors.go b/vendor/github.com/containers/ocicrypt/config/constructors.go new file mode 100644 index 00000000000..a789d052dc5 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/config/constructors.go @@ -0,0 +1,245 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package config + +import ( + "github.com/containers/ocicrypt/crypto/pkcs11" + "strings" + + "github.com/pkg/errors" + "gopkg.in/yaml.v2" +) + +// EncryptWithJwe returns a CryptoConfig to encrypt with jwe public keys +func EncryptWithJwe(pubKeys [][]byte) (CryptoConfig, error) { + dc := DecryptConfig{} + ep := map[string][][]byte{ + "pubkeys": pubKeys, + } + + return CryptoConfig{ + EncryptConfig: &EncryptConfig{ + Parameters: ep, + DecryptConfig: dc, + }, + DecryptConfig: &dc, + }, nil +} + +// EncryptWithPkcs7 returns a CryptoConfig to encrypt with pkcs7 x509 certs +func EncryptWithPkcs7(x509s [][]byte) (CryptoConfig, error) { + dc := DecryptConfig{} + + ep := map[string][][]byte{ + "x509s": x509s, + } + + return CryptoConfig{ + EncryptConfig: &EncryptConfig{ + Parameters: ep, + DecryptConfig: dc, + }, + DecryptConfig: &dc, + }, nil +} + +// EncryptWithGpg returns a CryptoConfig to encrypt with configured gpg parameters +func EncryptWithGpg(gpgRecipients [][]byte, gpgPubRingFile []byte) (CryptoConfig, error) { + dc := DecryptConfig{} + ep := map[string][][]byte{ + "gpg-recipients": gpgRecipients, + "gpg-pubkeyringfile": {gpgPubRingFile}, + } + + return CryptoConfig{ + EncryptConfig: &EncryptConfig{ + Parameters: ep, + DecryptConfig: dc, + }, + DecryptConfig: &dc, + }, nil +} + +// EncryptWithPkcs11 returns a CryptoConfig to encrypt with configured pkcs11 parameters +func EncryptWithPkcs11(pkcs11Config *pkcs11.Pkcs11Config, pkcs11Pubkeys, pkcs11Yamls [][]byte) (CryptoConfig, error) { + dc := DecryptConfig{} + ep := map[string][][]byte{} + + if len(pkcs11Yamls) > 0 { + if pkcs11Config == nil { + return CryptoConfig{}, errors.New("pkcs11Config must not be nil") + } + p11confYaml, err := yaml.Marshal(pkcs11Config) + if err != nil { + return CryptoConfig{}, errors.Wrapf(err, "Could not marshal Pkcs11Config to Yaml") + } + + dc = DecryptConfig{ + Parameters: map[string][][]byte{ + "pkcs11-config": {p11confYaml}, + }, + } + ep["pkcs11-yamls"] = pkcs11Yamls + } + if len(pkcs11Pubkeys) > 0 { + ep["pkcs11-pubkeys"] = pkcs11Pubkeys + } + + return CryptoConfig{ + EncryptConfig: &EncryptConfig{ + Parameters: ep, + DecryptConfig: dc, + }, + DecryptConfig: &dc, + }, nil +} + +// EncryptWithKeyProvider returns a CryptoConfig to encrypt with configured keyprovider parameters +func EncryptWithKeyProvider(keyProviders [][]byte) (CryptoConfig, error) { + dc := DecryptConfig{} + ep := make(map[string][][]byte) + for _, keyProvider := range keyProviders { + keyProvidersStr := string(keyProvider) + idx := strings.Index(keyProvidersStr, ":") + if idx > 0 { + ep[keyProvidersStr[:idx]] = append(ep[keyProvidersStr[:idx]], []byte(keyProvidersStr[idx+1:])) + } else { + ep[keyProvidersStr] = append(ep[keyProvidersStr], []byte("Enabled")) + } + } + + return CryptoConfig{ + EncryptConfig: &EncryptConfig{ + Parameters: ep, + DecryptConfig: dc, + }, + DecryptConfig: &dc, + }, nil +} + +// DecryptWithKeyProvider returns a CryptoConfig to decrypt with configured keyprovider parameters +func DecryptWithKeyProvider(keyProviders [][]byte) (CryptoConfig, error) { + dp := make(map[string][][]byte) + ep := map[string][][]byte{} + for _, keyProvider := range keyProviders { + keyProvidersStr := string(keyProvider) + idx := strings.Index(keyProvidersStr, ":") + if idx > 0 { + dp[keyProvidersStr[:idx]] = append(dp[keyProvidersStr[:idx]], []byte(keyProvidersStr[idx+1:])) + } else { + dp[keyProvidersStr] = append(dp[keyProvidersStr], []byte("Enabled")) + } + } + dc := DecryptConfig{ + Parameters: dp, + } + return CryptoConfig{ + EncryptConfig: &EncryptConfig{ + Parameters: ep, + DecryptConfig: dc, + }, + DecryptConfig: &dc, + }, nil +} + +// DecryptWithPrivKeys returns a CryptoConfig to decrypt with configured private keys +func DecryptWithPrivKeys(privKeys [][]byte, privKeysPasswords [][]byte) (CryptoConfig, error) { + if len(privKeys) != len(privKeysPasswords) { + return CryptoConfig{}, errors.New("Length of privKeys should match length of privKeysPasswords") + } + + dc := DecryptConfig{ + Parameters: map[string][][]byte{ + "privkeys": privKeys, + "privkeys-passwords": privKeysPasswords, + }, + } + + ep := map[string][][]byte{} + + return CryptoConfig{ + EncryptConfig: &EncryptConfig{ + Parameters: ep, + DecryptConfig: dc, + }, + DecryptConfig: &dc, + }, nil +} + +// DecryptWithX509s returns a CryptoConfig to decrypt with configured x509 certs +func DecryptWithX509s(x509s [][]byte) (CryptoConfig, error) { + dc := DecryptConfig{ + Parameters: map[string][][]byte{ + "x509s": x509s, + }, + } + + ep := map[string][][]byte{} + + return CryptoConfig{ + EncryptConfig: &EncryptConfig{ + Parameters: ep, + DecryptConfig: dc, + }, + DecryptConfig: &dc, + }, nil +} + +// DecryptWithGpgPrivKeys returns a CryptoConfig to decrypt with configured gpg private keys +func DecryptWithGpgPrivKeys(gpgPrivKeys, gpgPrivKeysPwds [][]byte) (CryptoConfig, error) { + dc := DecryptConfig{ + Parameters: map[string][][]byte{ + "gpg-privatekeys": gpgPrivKeys, + "gpg-privatekeys-passwords": gpgPrivKeysPwds, + }, + } + + ep := map[string][][]byte{} + + return CryptoConfig{ + EncryptConfig: &EncryptConfig{ + Parameters: ep, + DecryptConfig: dc, + }, + DecryptConfig: &dc, + }, nil +} + +// DecryptWithPkcs11Yaml returns a CryptoConfig to decrypt with pkcs11 YAML formatted key files +func DecryptWithPkcs11Yaml(pkcs11Config *pkcs11.Pkcs11Config, pkcs11Yamls [][]byte) (CryptoConfig, error) { + p11confYaml, err := yaml.Marshal(pkcs11Config) + if err != nil { + return CryptoConfig{}, errors.Wrapf(err, "Could not marshal Pkcs11Config to Yaml") + } + + dc := DecryptConfig{ + Parameters: map[string][][]byte{ + "pkcs11-yamls": pkcs11Yamls, + "pkcs11-config": {p11confYaml}, + }, + } + + ep := map[string][][]byte{} + + return CryptoConfig{ + EncryptConfig: &EncryptConfig{ + Parameters: ep, + DecryptConfig: dc, + }, + DecryptConfig: &dc, + }, nil +} diff --git a/vendor/github.com/containers/ocicrypt/config/keyprovider-config/config.go b/vendor/github.com/containers/ocicrypt/config/keyprovider-config/config.go new file mode 100644 index 00000000000..b454b371631 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/config/keyprovider-config/config.go @@ -0,0 +1,81 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package config + +import ( + "encoding/json" + "github.com/pkg/errors" + "io/ioutil" + "os" +) + +// Command describes the structure of command, it consist of path and args, where path defines the location of +// binary executable and args are passed on to the binary executable +type Command struct { + Path string `json:"path,omitempty"` + Args []string `json:"args,omitempty"` +} + +// KeyProviderAttrs describes the structure of key provider, it defines the way of invocation to key provider +type KeyProviderAttrs struct { + Command *Command `json:"cmd,omitempty"` + Grpc string `json:"grpc,omitempty"` +} + +// OcicryptConfig represents the format of an ocicrypt_provider.conf config file +type OcicryptConfig struct { + KeyProviderConfig map[string]KeyProviderAttrs `json:"key-providers"` +} + +const ENVVARNAME = "OCICRYPT_KEYPROVIDER_CONFIG" + +// parseConfigFile parses a configuration file; it is not an error if the configuration file does +// not exist, so no error is returned. +func parseConfigFile(filename string) (*OcicryptConfig, error) { + // a non-existent config file is not an error + _, err := os.Stat(filename) + if os.IsNotExist(err) { + return nil, nil + } + + data, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + + ic := &OcicryptConfig{} + err = json.Unmarshal(data, ic) + return ic, err +} + +// getConfiguration tries to read the configuration file at the following locations +// ${OCICRYPT_KEYPROVIDER_CONFIG} == "/etc/ocicrypt_keyprovider.yaml" +// If no configuration file could be found or read a null pointer is returned +func GetConfiguration() (*OcicryptConfig, error) { + var ic *OcicryptConfig + var err error + filename := os.Getenv(ENVVARNAME) + if len(filename) > 0 { + ic, err = parseConfigFile(filename) + if err != nil { + return nil, errors.Wrap(err, "Error while parsing keyprovider config file") + } + } else { + return nil, nil + } + return ic, nil +} diff --git a/vendor/github.com/containers/ocicrypt/config/pkcs11config/config.go b/vendor/github.com/containers/ocicrypt/config/pkcs11config/config.go new file mode 100644 index 00000000000..76be3413822 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/config/pkcs11config/config.go @@ -0,0 +1,124 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package pkcs11config + +import ( + "fmt" + "io/ioutil" + "os" + "path" + + "github.com/containers/ocicrypt/crypto/pkcs11" + "github.com/pkg/errors" + "gopkg.in/yaml.v2" +) + +// OcicryptConfig represents the format of an imgcrypt.conf config file +type OcicryptConfig struct { + Pkcs11Config pkcs11.Pkcs11Config `yaml:"pkcs11"` +} + +const CONFIGFILE = "ocicrypt.conf" +const ENVVARNAME = "OCICRYPT_CONFIG" + +// parseConfigFile parses a configuration file; it is not an error if the configuration file does +// not exist, so no error is returned. +// A config file may look like this: +// module-directories: +// - /usr/lib64/pkcs11/ +// - /usr/lib/pkcs11/ +// allowed-module-paths: +// - /usr/lib64/pkcs11/ +// - /usr/lib/pkcs11/ +func parseConfigFile(filename string) (*OcicryptConfig, error) { + // a non-existent config file is not an error + _, err := os.Stat(filename) + if os.IsNotExist(err) { + return nil, nil + } + + data, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + + ic := &OcicryptConfig{} + err = yaml.Unmarshal(data, ic) + return ic, err +} + +// getConfiguration tries to read the configuration file at the following locations +// 1) ${OCICRYPT_CONFIG} == "internal": use internal default allow-all policy +// 2) ${OCICRYPT_CONFIG} +// 3) ${XDG_CONFIG_HOME}/ocicrypt-pkcs11.conf +// 4) ${HOME}/.config/ocicrypt-pkcs11.conf +// 5) /etc/ocicrypt-pkcs11.conf +// If no configuration file could be found or read a null pointer is returned +func getConfiguration() (*OcicryptConfig, error) { + filename := os.Getenv(ENVVARNAME) + if len(filename) > 0 { + if filename == "internal" { + return getDefaultCryptoConfigOpts() + } + ic, err := parseConfigFile(filename) + if err != nil || ic != nil { + return ic, err + } + } + envvar := os.Getenv("XDG_CONFIG_HOME") + if len(envvar) > 0 { + ic, err := parseConfigFile(path.Join(envvar, CONFIGFILE)) + if err != nil || ic != nil { + return ic, err + } + } + envvar = os.Getenv("HOME") + if len(envvar) > 0 { + ic, err := parseConfigFile(path.Join(envvar, ".config", CONFIGFILE)) + if err != nil || ic != nil { + return ic, err + } + } + return parseConfigFile(path.Join("etc", CONFIGFILE)) +} + +// getDefaultCryptoConfigOpts returns default crypto config opts needed for pkcs11 module access +func getDefaultCryptoConfigOpts() (*OcicryptConfig, error) { + mdyaml := pkcs11.GetDefaultModuleDirectoriesYaml("") + config := fmt.Sprintf("module-directories:\n"+ + "%s"+ + "allowed-module-paths:\n"+ + "%s", mdyaml, mdyaml) + p11conf, err := pkcs11.ParsePkcs11ConfigFile([]byte(config)) + return &OcicryptConfig{ + Pkcs11Config: *p11conf, + }, err +} + +// GetUserPkcs11Config gets the user's Pkcs11Conig either from a configuration file or if none is +// found the default ones are returned +func GetUserPkcs11Config() (*pkcs11.Pkcs11Config, error) { + fmt.Print("Note: pkcs11 support is currently experimental\n") + ic, err := getConfiguration() + if err != nil { + return &pkcs11.Pkcs11Config{}, err + } + if ic == nil { + return &pkcs11.Pkcs11Config{}, errors.New("No ocicrypt config file was found") + } + return &ic.Pkcs11Config, nil +} diff --git a/vendor/github.com/containers/ocicrypt/crypto/pkcs11/common.go b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/common.go new file mode 100644 index 00000000000..7fcd2e3af68 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/common.go @@ -0,0 +1,134 @@ +/* + Copyright The ocicrypt Authors. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package pkcs11 + +import ( + "fmt" + "github.com/pkg/errors" + pkcs11uri "github.com/stefanberger/go-pkcs11uri" + "gopkg.in/yaml.v2" +) + +// Pkcs11KeyFile describes the format of the pkcs11 (private) key file. +// It also carries pkcs11 module related environment variables that are transferred to the +// Pkcs11URI object and activated when the pkcs11 module is used. +type Pkcs11KeyFile struct { + Pkcs11 struct { + Uri string `yaml:"uri"` + } `yaml:"pkcs11"` + Module struct { + Env map[string]string `yaml:"env,omitempty"` + } `yaml:"module"` +} + +// Pkcs11KeyFileObject is a representation of the Pkcs11KeyFile with the pkcs11 URI as an object +type Pkcs11KeyFileObject struct { + Uri *pkcs11uri.Pkcs11URI +} + +// ParsePkcs11Uri parses a pkcs11 URI +func ParsePkcs11Uri(uri string) (*pkcs11uri.Pkcs11URI, error) { + p11uri := pkcs11uri.New() + err := p11uri.Parse(uri) + if err != nil { + return nil, errors.Wrapf(err, "Could not parse Pkcs11URI from file") + } + return p11uri, err +} + +// ParsePkcs11KeyFile parses a pkcs11 key file holding a pkcs11 URI describing a private key. +// The file has the following yaml format: +// pkcs11: +// - uri : +// An error is returned if the pkcs11 URI is malformed +func ParsePkcs11KeyFile(yamlstr []byte) (*Pkcs11KeyFileObject, error) { + p11keyfile := Pkcs11KeyFile{} + + err := yaml.Unmarshal([]byte(yamlstr), &p11keyfile) + if err != nil { + return nil, errors.Wrapf(err, "Could not unmarshal pkcs11 keyfile") + } + + p11uri, err := ParsePkcs11Uri(p11keyfile.Pkcs11.Uri) + if err != nil { + return nil, err + } + p11uri.SetEnvMap(p11keyfile.Module.Env) + + return &Pkcs11KeyFileObject{Uri: p11uri}, err +} + +// IsPkcs11PrivateKey checks whether the given YAML represents a Pkcs11 private key +func IsPkcs11PrivateKey(yamlstr []byte) bool { + _, err := ParsePkcs11KeyFile(yamlstr) + return err == nil +} + +// IsPkcs11PublicKey checks whether the given YAML represents a Pkcs11 public key +func IsPkcs11PublicKey(yamlstr []byte) bool { + _, err := ParsePkcs11KeyFile(yamlstr) + return err == nil +} + +// Pkcs11Config describes the layout of a pkcs11 config file +// The file has the following yaml format: +// module-directories: +// - /usr/lib64/pkcs11/ +// allowd-module-paths +// - /usr/lib64/pkcs11/libsofthsm2.so +type Pkcs11Config struct { + ModuleDirectories []string `yaml:"module-directories"` + AllowedModulePaths []string `yaml:"allowed-module-paths"` +} + +// GetDefaultModuleDirectories returns module directories covering +// a variety of Linux distros +func GetDefaultModuleDirectories() []string { + dirs := []string{ + "/usr/lib64/pkcs11/", // Fedora,RHEL,openSUSE + "/usr/lib/pkcs11/", // Fedora,ArchLinux + "/usr/local/lib/pkcs11/", + "/usr/lib/softhsm/", // Debian,Ubuntu + } + + // Debian directory: /usr/lib/(x86_64|aarch64|arm|powerpc64le|s390x)-linux-gnu/ + hosttype, ostype, q := getHostAndOsType() + if len(hosttype) > 0 { + dir := fmt.Sprintf("/usr/lib/%s-%s-%s/", hosttype, ostype, q) + dirs = append(dirs, dir) + } + return dirs +} + +// GetDefaultModuleDirectoresFormatted returns the default module directories formatted for YAML +func GetDefaultModuleDirectoriesYaml(indent string) string { + res := "" + + for _, dir := range GetDefaultModuleDirectories() { + res += indent + "- " + dir + "\n" + } + return res +} + +// ParsePkcs11ConfigFile parses a pkcs11 config file hat influences the module search behavior +// as well as the set of modules that users are allowed to use +func ParsePkcs11ConfigFile(yamlstr []byte) (*Pkcs11Config, error) { + p11conf := Pkcs11Config{} + + err := yaml.Unmarshal([]byte(yamlstr), &p11conf) + if err != nil { + return &p11conf, errors.Wrapf(err, "Could not parse Pkcs11Config") + } + return &p11conf, nil +} diff --git a/vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers.go b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers.go new file mode 100644 index 00000000000..7d80f5f844b --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers.go @@ -0,0 +1,485 @@ +// +build cgo + +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package pkcs11 + +import ( + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "fmt" + "hash" + "net/url" + "os" + "strconv" + "strings" + + "github.com/miekg/pkcs11" + "github.com/pkg/errors" + pkcs11uri "github.com/stefanberger/go-pkcs11uri" +) + +var ( + // OAEPLabel defines the label we use for OAEP encryption; this cannot be changed + OAEPLabel = []byte("") + + // OAEPSha1Params describes the OAEP parameters with sha1 hash algorithm; needed by SoftHSM + OAEPSha1Params = &pkcs11.OAEPParams{ + HashAlg: pkcs11.CKM_SHA_1, + MGF: pkcs11.CKG_MGF1_SHA1, + SourceType: pkcs11.CKZ_DATA_SPECIFIED, + SourceData: OAEPLabel, + } + // OAEPSha256Params describes the OAEP parameters with sha256 hash algorithm + OAEPSha256Params = &pkcs11.OAEPParams{ + HashAlg: pkcs11.CKM_SHA256, + MGF: pkcs11.CKG_MGF1_SHA256, + SourceType: pkcs11.CKZ_DATA_SPECIFIED, + SourceData: OAEPLabel, + } +) + +// rsaPublicEncryptOAEP encrypts the given plaintext with the given *rsa.PublicKey; the +// environment variable OCICRYPT_OAEP_HASHALG can be set to 'sha1' to force usage of sha1 for OAEP (SoftHSM). +// This function is needed by clients who are using a public key file for pkcs11 encryption +func rsaPublicEncryptOAEP(pubKey *rsa.PublicKey, plaintext []byte) ([]byte, string, error) { + var ( + hashfunc hash.Hash + hashalg string + ) + + oaephash := os.Getenv("OCICRYPT_OAEP_HASHALG") + // The default is sha256 (previously was sha1) + switch strings.ToLower(oaephash) { + case "sha1": + hashfunc = sha1.New() + hashalg = "sha1" + case "sha256", "": + hashfunc = sha256.New() + hashalg = "sha256" + default: + return nil, "", errors.Errorf("Unsupported OAEP hash '%s'", oaephash) + } + ciphertext, err := rsa.EncryptOAEP(hashfunc, rand.Reader, pubKey, plaintext, OAEPLabel) + if err != nil { + return nil, "", errors.Wrapf(err, "rss.EncryptOAEP failed") + } + + return ciphertext, hashalg, nil +} + +// pkcs11UriGetLoginParameters gets the parameters necessary for login from the Pkcs11URI +// PIN and module are mandatory; slot-id is optional and if not found -1 will be returned +// For a privateKeyOperation a PIN is required and if none is given, this function will return an error +func pkcs11UriGetLoginParameters(p11uri *pkcs11uri.Pkcs11URI, privateKeyOperation bool) (string, string, int64, error) { + var ( + pin string + err error + ) + if privateKeyOperation { + if !p11uri.HasPIN() { + return "", "", 0, errors.New("Missing PIN for private key operation") + } + } + // some devices require a PIN to find a *public* key object, others don't + pin, _ = p11uri.GetPIN() + + module, err := p11uri.GetModule() + if err != nil { + return "", "", 0, errors.Wrap(err, "No module available in pkcs11 URI") + } + + slotid := int64(-1) + + slot, ok := p11uri.GetPathAttribute("slot-id", false) + if ok { + slotid, err = strconv.ParseInt(slot, 10, 64) + if err != nil { + return "", "", 0, errors.Wrap(err, "slot-id is not a valid number") + } + if slotid < 0 { + return "", "", 0, fmt.Errorf("slot-id is a negative number") + } + if uint64(slotid) > 0xffffffff { + return "", "", 0, fmt.Errorf("slot-id is larger than 32 bit") + } + } + + return pin, module, slotid, nil +} + +// pkcs11UriGetKeyIdAndLabel gets the key label by retrieving the value of the 'object' attribute +func pkcs11UriGetKeyIdAndLabel(p11uri *pkcs11uri.Pkcs11URI) (string, string, error) { + keyid, ok2 := p11uri.GetPathAttribute("id", false) + label, ok1 := p11uri.GetPathAttribute("object", false) + if !ok1 && !ok2 { + return "", "", errors.New("Neither 'id' nor 'object' attributes were found in pkcs11 URI") + } + return keyid, label, nil +} + +// pkcs11OpenSession opens a session with a pkcs11 device at the given slot and logs in with the given PIN +func pkcs11OpenSession(p11ctx *pkcs11.Ctx, slotid uint, pin string) (session pkcs11.SessionHandle, err error) { + session, err = p11ctx.OpenSession(uint(slotid), pkcs11.CKF_SERIAL_SESSION|pkcs11.CKF_RW_SESSION) + if err != nil { + return 0, errors.Wrapf(err, "OpenSession to slot %d failed", slotid) + } + if len(pin) > 0 { + err = p11ctx.Login(session, pkcs11.CKU_USER, pin) + if err != nil { + _ = p11ctx.CloseSession(session) + return 0, errors.Wrap(err, "Could not login to device") + } + } + return session, nil +} + +// pkcs11UriLogin uses the given pkcs11 URI to select the pkcs11 module (share libary) and to get +// the PIN to use for login; if the URI contains a slot-id, the given slot-id will be used, otherwise +// one slot after the other will be attempted and the first one where login succeeds will be used +func pkcs11UriLogin(p11uri *pkcs11uri.Pkcs11URI, privateKeyOperation bool) (ctx *pkcs11.Ctx, session pkcs11.SessionHandle, err error) { + pin, module, slotid, err := pkcs11UriGetLoginParameters(p11uri, privateKeyOperation) + if err != nil { + return nil, 0, err + } + + p11ctx := pkcs11.New(module) + if p11ctx == nil { + return nil, 0, errors.New("Please check module path, input is: " + module) + } + + err = p11ctx.Initialize() + if err != nil { + p11Err := err.(pkcs11.Error) + if p11Err != pkcs11.CKR_CRYPTOKI_ALREADY_INITIALIZED { + return nil, 0, errors.Wrap(err, "Initialize failed") + } + } + + if slotid >= 0 { + session, err := pkcs11OpenSession(p11ctx, uint(slotid), pin) + return p11ctx, session, err + } else { + slots, err := p11ctx.GetSlotList(true) + if err != nil { + return nil, 0, errors.Wrap(err, "GetSlotList failed") + } + + tokenlabel, ok := p11uri.GetPathAttribute("token", false) + if !ok { + return nil, 0, errors.New("Missing 'token' attribute since 'slot-id' was not given") + } + + for _, slot := range slots { + ti, err := p11ctx.GetTokenInfo(slot) + if err != nil || ti.Label != tokenlabel { + continue + } + + session, err = pkcs11OpenSession(p11ctx, slot, pin) + if err == nil { + return p11ctx, session, err + } + } + if len(pin) > 0 { + return nil, 0, errors.New("Could not create session to any slot and/or log in") + } + return nil, 0, errors.New("Could not create session to any slot") + } +} + +func pkcs11Logout(ctx *pkcs11.Ctx, session pkcs11.SessionHandle) { + _ = ctx.Logout(session) + _ = ctx.CloseSession(session) + _ = ctx.Finalize() + ctx.Destroy() +} + +// findObject finds an object of the given class with the given keyid and/or label +func findObject(p11ctx *pkcs11.Ctx, session pkcs11.SessionHandle, class uint, keyid, label string) (pkcs11.ObjectHandle, error) { + msg := "" + + template := []*pkcs11.Attribute{ + pkcs11.NewAttribute(pkcs11.CKA_CLASS, class), + } + if len(label) > 0 { + template = append(template, pkcs11.NewAttribute(pkcs11.CKA_LABEL, label)) + msg = fmt.Sprintf("label '%s'", label) + } + if len(keyid) > 0 { + template = append(template, pkcs11.NewAttribute(pkcs11.CKA_ID, keyid)) + if len(msg) > 0 { + msg += " and " + } + msg += url.PathEscape(keyid) + } + + if err := p11ctx.FindObjectsInit(session, template); err != nil { + return 0, errors.Wrap(err, "FindObjectsInit failed") + } + + obj, _, err := p11ctx.FindObjects(session, 100) + if err != nil { + return 0, errors.Wrap(err, "FindObjects failed") + } + + if err := p11ctx.FindObjectsFinal(session); err != nil { + return 0, errors.Wrap(err, "FindObjectsFinal failed") + } + if len(obj) > 1 { + return 0, errors.Errorf("There are too many (=%d) keys with %s", len(obj), msg) + } else if len(obj) == 1 { + return obj[0], nil + } + + return 0, errors.Errorf("Could not find any object with %s", msg) +} + +// publicEncryptOAEP uses a public key described by a pkcs11 URI to OAEP encrypt the given plaintext +func publicEncryptOAEP(pubKey *Pkcs11KeyFileObject, plaintext []byte) ([]byte, string, error) { + oldenv, err := setEnvVars(pubKey.Uri.GetEnvMap()) + if err != nil { + return nil, "", err + } + defer restoreEnv(oldenv) + + p11ctx, session, err := pkcs11UriLogin(pubKey.Uri, false) + if err != nil { + return nil, "", err + } + defer pkcs11Logout(p11ctx, session) + + keyid, label, err := pkcs11UriGetKeyIdAndLabel(pubKey.Uri) + if err != nil { + return nil, "", err + } + + p11PubKey, err := findObject(p11ctx, session, pkcs11.CKO_PUBLIC_KEY, keyid, label) + if err != nil { + return nil, "", err + } + + var hashalg string + + var oaep *pkcs11.OAEPParams + oaephash := os.Getenv("OCICRYPT_OAEP_HASHALG") + // The default is sha256 (previously was sha1) + switch strings.ToLower(oaephash) { + case "sha1": + oaep = OAEPSha1Params + hashalg = "sha1" + case "sha256", "": + oaep = OAEPSha256Params + hashalg = "sha256" + default: + return nil, "", errors.Errorf("Unsupported OAEP hash '%s'", oaephash) + } + + err = p11ctx.EncryptInit(session, []*pkcs11.Mechanism{pkcs11.NewMechanism(pkcs11.CKM_RSA_PKCS_OAEP, oaep)}, p11PubKey) + if err != nil { + return nil, "", errors.Wrap(err, "EncryptInit error") + } + + ciphertext, err := p11ctx.Encrypt(session, plaintext) + if err != nil { + return nil, "", errors.Wrap(err, "Encrypt failed") + } + return ciphertext, hashalg, nil +} + +// privateDecryptOAEP uses a pkcs11 URI describing a private key to OAEP decrypt a ciphertext +func privateDecryptOAEP(privKeyObj *Pkcs11KeyFileObject, ciphertext []byte, hashalg string) ([]byte, error) { + oldenv, err := setEnvVars(privKeyObj.Uri.GetEnvMap()) + if err != nil { + return nil, err + } + defer restoreEnv(oldenv) + + p11ctx, session, err := pkcs11UriLogin(privKeyObj.Uri, true) + if err != nil { + return nil, err + } + defer pkcs11Logout(p11ctx, session) + + keyid, label, err := pkcs11UriGetKeyIdAndLabel(privKeyObj.Uri) + if err != nil { + return nil, err + } + + p11PrivKey, err := findObject(p11ctx, session, pkcs11.CKO_PRIVATE_KEY, keyid, label) + if err != nil { + return nil, err + } + + var oaep *pkcs11.OAEPParams + + // An empty string from the Hash in the JSON historically defaults to sha1. + switch hashalg { + case "sha1", "": + oaep = OAEPSha1Params + case "sha256": + oaep = OAEPSha256Params + default: + return nil, errors.Errorf("Unsupported hash algorithm '%s' for decryption", hashalg) + } + + err = p11ctx.DecryptInit(session, []*pkcs11.Mechanism{pkcs11.NewMechanism(pkcs11.CKM_RSA_PKCS_OAEP, oaep)}, p11PrivKey) + if err != nil { + return nil, errors.Wrapf(err, "DecryptInit failed") + } + plaintext, err := p11ctx.Decrypt(session, ciphertext) + if err != nil { + return nil, errors.Wrapf(err, "Decrypt failed") + } + return plaintext, err +} + +// +// The following part deals with the JSON formatted message for multiple pkcs11 recipients +// + +// Pkcs11Blob holds the encrypted blobs for all recipients; this is what we will put into the image's annotations +type Pkcs11Blob struct { + Version uint `json:"version"` + Recipients []Pkcs11Recipient `json:"recipients"` +} + +// Pkcs11Recipient holds the b64-encoded and encrypted blob for a particular recipient +type Pkcs11Recipient struct { + Version uint `json:"version"` + Blob string `json:"blob"` + Hash string `json:"hash,omitempty"` +} + +// EncryptMultiple encrypts for one or multiple pkcs11 devices; the public keys passed to this function +// may either be *rsa.PublicKey or *pkcs11uri.Pkcs11URI; the returned byte array is a JSON string of the +// following format: +// { +// recipients: [ // recipient list +// { +// "version": 0, +// "blob": , +// "hash": +// } , +// { +// "version": 0, +// "blob": , +// "hash": +// } , +// [...] +// ] +// } +func EncryptMultiple(pubKeys []interface{}, data []byte) ([]byte, error) { + var ( + ciphertext []byte + err error + pkcs11blob Pkcs11Blob = Pkcs11Blob{Version: 0} + hashalg string + ) + + for _, pubKey := range pubKeys { + switch pkey := pubKey.(type) { + case *rsa.PublicKey: + ciphertext, hashalg, err = rsaPublicEncryptOAEP(pkey, data) + case *Pkcs11KeyFileObject: + ciphertext, hashalg, err = publicEncryptOAEP(pkey, data) + default: + err = errors.Errorf("Unsupported key object type for pkcs11 public key") + } + if err != nil { + return nil, err + } + + recipient := Pkcs11Recipient{ + Version: 0, + Blob: base64.StdEncoding.EncodeToString(ciphertext), + Hash: hashalg, + } + + pkcs11blob.Recipients = append(pkcs11blob.Recipients, recipient) + } + return json.Marshal(&pkcs11blob) +} + +// Decrypt tries to decrypt one of the recipients' blobs using a pkcs11 private key. +// The input pkcs11blobstr is a string with the following format: +// { +// recipients: [ // recipient list +// { +// "version": 0, +// "blob": , +// "hash": +// } , +// { +// "version": 0, +// "blob": , +// "hash": +// } , +// [...] +// } +// Note: More recent versions of this code explicitly write 'sha1' +// while older versions left it empty in case of 'sha1'. +// +func Decrypt(privKeyObjs []*Pkcs11KeyFileObject, pkcs11blobstr []byte) ([]byte, error) { + pkcs11blob := Pkcs11Blob{} + err := json.Unmarshal(pkcs11blobstr, &pkcs11blob) + if err != nil { + return nil, errors.Wrapf(err, "Could not parse Pkcs11Blob") + } + switch pkcs11blob.Version { + case 0: + // latest supported version + default: + return nil, errors.Errorf("Found Pkcs11Blob with version %d but maximum supported version is 0.", pkcs11blob.Version) + } + // since we do trial and error, collect all encountered errors + errs := "" + + for _, recipient := range pkcs11blob.Recipients { + switch recipient.Version { + case 0: + // last supported version + default: + return nil, errors.Errorf("Found Pkcs11Recipient with version %d but maximum supported version is 0.", recipient.Version) + } + + ciphertext, err := base64.StdEncoding.DecodeString(recipient.Blob) + if err != nil || len(ciphertext) == 0 { + // This should never happen... we skip over decoding issues + errs += fmt.Sprintf("Base64 decoding failed: %s\n", err) + continue + } + // try all keys until one works + for _, privKeyObj := range privKeyObjs { + plaintext, err := privateDecryptOAEP(privKeyObj, ciphertext, recipient.Hash) + if err == nil { + return plaintext, nil + } + if uri, err2 := privKeyObj.Uri.Format(); err2 == nil { + errs += fmt.Sprintf("%s : %s\n", uri, err) + } else { + errs += fmt.Sprintf("%s\n", err) + } + } + } + + return nil, errors.Errorf("Could not find a pkcs11 key for decryption:\n%s", errs) +} diff --git a/vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers_nocgo.go b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers_nocgo.go new file mode 100644 index 00000000000..6edf75269f3 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers_nocgo.go @@ -0,0 +1,31 @@ +// +build !cgo + +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package pkcs11 + +import ( + "github.com/pkg/errors" +) + +func EncryptMultiple(pubKeys []interface{}, data []byte) ([]byte, error) { + return nil, errors.Errorf("ocicrypt pkcs11 not supported on this build") +} + +func Decrypt(privKeyObjs []*Pkcs11KeyFileObject, pkcs11blobstr []byte) ([]byte, error) { + return nil, errors.Errorf("ocicrypt pkcs11 not supported on this build") +} diff --git a/vendor/github.com/containers/ocicrypt/crypto/pkcs11/utils.go b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/utils.go new file mode 100644 index 00000000000..306e372d5f4 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/utils.go @@ -0,0 +1,114 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package pkcs11 + +import ( + "os" + "runtime" + "strings" + "sync" + + "github.com/pkg/errors" +) + +var ( + envLock sync.Mutex +) + +// setEnvVars sets the environment variables given in the map and locks the environment from +// modification with the same function; if successful, you *must* call restoreEnv with the return +// value from this function +func setEnvVars(env map[string]string) ([]string, error) { + envLock.Lock() + + if len(env) == 0 { + return nil, nil + } + + oldenv := os.Environ() + + for k, v := range env { + err := os.Setenv(k, v) + if err != nil { + restoreEnv(oldenv) + return nil, errors.Wrapf(err, "Could not set environment variable '%s' to '%s'", k, v) + } + } + + return oldenv, nil +} + +func arrayToMap(elements []string) map[string]string { + o := make(map[string]string) + + for _, element := range elements { + p := strings.SplitN(element, "=", 2) + if len(p) == 2 { + o[p[0]] = p[1] + } + } + + return o +} + +// restoreEnv restores the environment to be exactly as given in the array of strings +// and unlocks the lock +func restoreEnv(envs []string) { + if envs != nil && len(envs) >= 0 { + target := arrayToMap(envs) + curr := arrayToMap(os.Environ()) + + for nc, vc := range curr { + vt, ok := target[nc] + if !ok { + os.Unsetenv(nc) + } else if vc == vt { + delete(target, nc) + } + } + + for nt, vt := range target { + os.Setenv(nt, vt) + } + } + + envLock.Unlock() +} + +func getHostAndOsType() (string, string, string) { + ht := "" + ot := "" + st := "" + switch runtime.GOOS { + case "linux": + ot = "linux" + st = "gnu" + switch runtime.GOARCH { + case "arm": + ht = "arm" + case "arm64": + ht = "aarch64" + case "amd64": + ht = "x86_64" + case "ppc64le": + ht = "powerpc64le" + case "s390x": + ht = "s390x" + } + } + return ht, ot, st +} diff --git a/vendor/github.com/containers/ocicrypt/encryption.go b/vendor/github.com/containers/ocicrypt/encryption.go new file mode 100644 index 00000000000..f5142cc8d06 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/encryption.go @@ -0,0 +1,350 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package ocicrypt + +import ( + "encoding/base64" + "encoding/json" + "fmt" + keyproviderconfig "github.com/containers/ocicrypt/config/keyprovider-config" + "github.com/containers/ocicrypt/keywrap/keyprovider" + "io" + "strings" + + "github.com/containers/ocicrypt/blockcipher" + "github.com/containers/ocicrypt/config" + "github.com/containers/ocicrypt/keywrap" + "github.com/containers/ocicrypt/keywrap/jwe" + "github.com/containers/ocicrypt/keywrap/pgp" + "github.com/containers/ocicrypt/keywrap/pkcs11" + "github.com/containers/ocicrypt/keywrap/pkcs7" + "github.com/opencontainers/go-digest" + log "github.com/sirupsen/logrus" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +// EncryptLayerFinalizer is a finalizer run to return the annotations to set for +// the encrypted layer +type EncryptLayerFinalizer func() (map[string]string, error) + +func init() { + keyWrappers = make(map[string]keywrap.KeyWrapper) + keyWrapperAnnotations = make(map[string]string) + RegisterKeyWrapper("pgp", pgp.NewKeyWrapper()) + RegisterKeyWrapper("jwe", jwe.NewKeyWrapper()) + RegisterKeyWrapper("pkcs7", pkcs7.NewKeyWrapper()) + RegisterKeyWrapper("pkcs11", pkcs11.NewKeyWrapper()) + ic, err := keyproviderconfig.GetConfiguration() + if err != nil { + log.Error(err) + } else if ic != nil { + for provider, attrs := range ic.KeyProviderConfig { + RegisterKeyWrapper("provider."+provider, keyprovider.NewKeyWrapper(provider, attrs)) + } + } +} + +var keyWrappers map[string]keywrap.KeyWrapper +var keyWrapperAnnotations map[string]string + +// RegisterKeyWrapper allows to register key wrappers by their encryption scheme +func RegisterKeyWrapper(scheme string, iface keywrap.KeyWrapper) { + keyWrappers[scheme] = iface + keyWrapperAnnotations[iface.GetAnnotationID()] = scheme +} + +// GetKeyWrapper looks up the encryptor interface given an encryption scheme (gpg, jwe) +func GetKeyWrapper(scheme string) keywrap.KeyWrapper { + return keyWrappers[scheme] +} + +// GetWrappedKeysMap returns a map of wrappedKeys as values in a +// map with the encryption scheme(s) as the key(s) +func GetWrappedKeysMap(desc ocispec.Descriptor) map[string]string { + wrappedKeysMap := make(map[string]string) + + for annotationsID, scheme := range keyWrapperAnnotations { + if annotation, ok := desc.Annotations[annotationsID]; ok { + wrappedKeysMap[scheme] = annotation + } + } + return wrappedKeysMap +} + +// EncryptLayer encrypts the layer by running one encryptor after the other +func EncryptLayer(ec *config.EncryptConfig, encOrPlainLayerReader io.Reader, desc ocispec.Descriptor) (io.Reader, EncryptLayerFinalizer, error) { + var ( + encLayerReader io.Reader + err error + encrypted bool + bcFin blockcipher.Finalizer + privOptsData []byte + pubOptsData []byte + ) + + if ec == nil { + return nil, nil, errors.New("EncryptConfig must not be nil") + } + + for annotationsID := range keyWrapperAnnotations { + annotation := desc.Annotations[annotationsID] + if annotation != "" { + privOptsData, err = decryptLayerKeyOptsData(&ec.DecryptConfig, desc) + if err != nil { + return nil, nil, err + } + pubOptsData, err = getLayerPubOpts(desc) + if err != nil { + return nil, nil, err + } + // already encrypted! + encrypted = true + } + } + + if !encrypted { + encLayerReader, bcFin, err = commonEncryptLayer(encOrPlainLayerReader, desc.Digest, blockcipher.AES256CTR) + if err != nil { + return nil, nil, err + } + } + + encLayerFinalizer := func() (map[string]string, error) { + // If layer was already encrypted, bcFin should be nil, use existing optsData + if bcFin != nil { + opts, err := bcFin() + if err != nil { + return nil, err + } + privOptsData, err = json.Marshal(opts.Private) + if err != nil { + return nil, errors.Wrapf(err, "could not JSON marshal opts") + } + pubOptsData, err = json.Marshal(opts.Public) + if err != nil { + return nil, errors.Wrapf(err, "could not JSON marshal opts") + } + } + + newAnnotations := make(map[string]string) + keysWrapped := false + for annotationsID, scheme := range keyWrapperAnnotations { + b64Annotations := desc.Annotations[annotationsID] + keywrapper := GetKeyWrapper(scheme) + b64Annotations, err = preWrapKeys(keywrapper, ec, b64Annotations, privOptsData) + if err != nil { + return nil, err + } + if b64Annotations != "" { + keysWrapped = true + newAnnotations[annotationsID] = b64Annotations + } + } + + if !keysWrapped { + return nil, errors.New("no wrapped keys produced by encryption") + } + newAnnotations["org.opencontainers.image.enc.pubopts"] = base64.StdEncoding.EncodeToString(pubOptsData) + + if len(newAnnotations) == 0 { + return nil, errors.New("no encryptor found to handle encryption") + } + + return newAnnotations, err + } + + // if nothing was encrypted, we just return encLayer = nil + return encLayerReader, encLayerFinalizer, err + +} + +// preWrapKeys calls WrapKeys and handles the base64 encoding and concatenation of the +// annotation data +func preWrapKeys(keywrapper keywrap.KeyWrapper, ec *config.EncryptConfig, b64Annotations string, optsData []byte) (string, error) { + newAnnotation, err := keywrapper.WrapKeys(ec, optsData) + if err != nil || len(newAnnotation) == 0 { + return b64Annotations, err + } + b64newAnnotation := base64.StdEncoding.EncodeToString(newAnnotation) + if b64Annotations == "" { + return b64newAnnotation, nil + } + return b64Annotations + "," + b64newAnnotation, nil +} + +// DecryptLayer decrypts a layer trying one keywrap.KeyWrapper after the other to see whether it +// can apply the provided private key +// If unwrapOnly is set we will only try to decrypt the layer encryption key and return +func DecryptLayer(dc *config.DecryptConfig, encLayerReader io.Reader, desc ocispec.Descriptor, unwrapOnly bool) (io.Reader, digest.Digest, error) { + if dc == nil { + return nil, "", errors.New("DecryptConfig must not be nil") + } + privOptsData, err := decryptLayerKeyOptsData(dc, desc) + if err != nil || unwrapOnly { + return nil, "", err + } + + var pubOptsData []byte + pubOptsData, err = getLayerPubOpts(desc) + if err != nil { + return nil, "", err + } + + return commonDecryptLayer(encLayerReader, privOptsData, pubOptsData) +} + +func decryptLayerKeyOptsData(dc *config.DecryptConfig, desc ocispec.Descriptor) ([]byte, error) { + privKeyGiven := false + errs := "" + for annotationsID, scheme := range keyWrapperAnnotations { + b64Annotation := desc.Annotations[annotationsID] + if b64Annotation != "" { + keywrapper := GetKeyWrapper(scheme) + + if keywrapper.NoPossibleKeys(dc.Parameters) { + continue + } + + if len(keywrapper.GetPrivateKeys(dc.Parameters)) > 0 { + privKeyGiven = true + } + optsData, err := preUnwrapKey(keywrapper, dc, b64Annotation) + if err != nil { + // try next keywrap.KeyWrapper + errs += fmt.Sprintf("%s\n", err) + continue + } + if optsData == nil { + // try next keywrap.KeyWrapper + continue + } + return optsData, nil + } + } + if !privKeyGiven { + return nil, errors.New("missing private key needed for decryption") + } + return nil, errors.Errorf("no suitable key unwrapper found or none of the private keys could be used for decryption:\n%s", errs) +} + +func getLayerPubOpts(desc ocispec.Descriptor) ([]byte, error) { + pubOptsString := desc.Annotations["org.opencontainers.image.enc.pubopts"] + if pubOptsString == "" { + return json.Marshal(blockcipher.PublicLayerBlockCipherOptions{}) + } + return base64.StdEncoding.DecodeString(pubOptsString) +} + +// preUnwrapKey decodes the comma separated base64 strings and calls the Unwrap function +// of the given keywrapper with it and returns the result in case the Unwrap functions +// does not return an error. If all attempts fail, an error is returned. +func preUnwrapKey(keywrapper keywrap.KeyWrapper, dc *config.DecryptConfig, b64Annotations string) ([]byte, error) { + if b64Annotations == "" { + return nil, nil + } + errs := "" + for _, b64Annotation := range strings.Split(b64Annotations, ",") { + annotation, err := base64.StdEncoding.DecodeString(b64Annotation) + if err != nil { + return nil, errors.New("could not base64 decode the annotation") + } + optsData, err := keywrapper.UnwrapKey(dc, annotation) + if err != nil { + errs += fmt.Sprintf("- %s\n", err) + continue + } + return optsData, nil + } + return nil, errors.Errorf("no suitable key found for decrypting layer key:\n%s", errs) +} + +// commonEncryptLayer is a function to encrypt the plain layer using a new random +// symmetric key and return the LayerBlockCipherHandler's JSON in string form for +// later use during decryption +func commonEncryptLayer(plainLayerReader io.Reader, d digest.Digest, typ blockcipher.LayerCipherType) (io.Reader, blockcipher.Finalizer, error) { + lbch, err := blockcipher.NewLayerBlockCipherHandler() + if err != nil { + return nil, nil, err + } + + encLayerReader, bcFin, err := lbch.Encrypt(plainLayerReader, typ) + if err != nil { + return nil, nil, err + } + + newBcFin := func() (blockcipher.LayerBlockCipherOptions, error) { + lbco, err := bcFin() + if err != nil { + return blockcipher.LayerBlockCipherOptions{}, err + } + lbco.Private.Digest = d + return lbco, nil + } + + return encLayerReader, newBcFin, err +} + +// commonDecryptLayer decrypts an encrypted layer previously encrypted with commonEncryptLayer +// by passing along the optsData +func commonDecryptLayer(encLayerReader io.Reader, privOptsData []byte, pubOptsData []byte) (io.Reader, digest.Digest, error) { + privOpts := blockcipher.PrivateLayerBlockCipherOptions{} + err := json.Unmarshal(privOptsData, &privOpts) + if err != nil { + return nil, "", errors.Wrapf(err, "could not JSON unmarshal privOptsData") + } + + lbch, err := blockcipher.NewLayerBlockCipherHandler() + if err != nil { + return nil, "", err + } + + pubOpts := blockcipher.PublicLayerBlockCipherOptions{} + if len(pubOptsData) > 0 { + err := json.Unmarshal(pubOptsData, &pubOpts) + if err != nil { + return nil, "", errors.Wrapf(err, "could not JSON unmarshal pubOptsData") + } + } + + opts := blockcipher.LayerBlockCipherOptions{ + Private: privOpts, + Public: pubOpts, + } + + plainLayerReader, opts, err := lbch.Decrypt(encLayerReader, opts) + if err != nil { + return nil, "", err + } + + return plainLayerReader, opts.Private.Digest, nil +} + +// FilterOutAnnotations filters out the annotations belonging to the image encryption 'namespace' +// and returns a map with those taken out +func FilterOutAnnotations(annotations map[string]string) map[string]string { + a := make(map[string]string) + if len(annotations) > 0 { + for k, v := range annotations { + if strings.HasPrefix(k, "org.opencontainers.image.enc.") { + continue + } + a[k] = v + } + } + return a +} diff --git a/vendor/github.com/containers/ocicrypt/go.mod b/vendor/github.com/containers/ocicrypt/go.mod new file mode 100644 index 00000000000..8837d288ee3 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/go.mod @@ -0,0 +1,21 @@ +module github.com/containers/ocicrypt + +go 1.12 + +require ( + github.com/golang/protobuf v1.4.3 + github.com/google/go-cmp v0.5.2 // indirect + github.com/miekg/pkcs11 v1.1.1 + github.com/opencontainers/go-digest v1.0.0 + github.com/opencontainers/image-spec v1.0.2 + github.com/pkg/errors v0.9.1 + github.com/sirupsen/logrus v1.7.0 + github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 + github.com/stretchr/testify v1.3.0 + go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1 + golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2 + golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 + google.golang.org/grpc v1.33.2 + gopkg.in/square/go-jose.v2 v2.5.1 + gopkg.in/yaml.v2 v2.4.0 +) diff --git a/vendor/github.com/containers/ocicrypt/go.sum b/vendor/github.com/containers/ocicrypt/go.sum new file mode 100644 index 00000000000..a621a145c00 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/go.sum @@ -0,0 +1,117 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU= +github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= +github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 h1:lIOOHPEbXzO3vnmx2gok1Tfs31Q8GQqKLc8vVqyQq/I= +github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1 h1:A/5uWzF44DlIgdm/PQFwfMkW0JX+cIcQi/SwLAmZP5M= +go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2 h1:It14KIkyBFYkHkwZ7k45minvA9aorojkyjGk9KJ5B/w= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 h1:qWPm9rbaAMKs8Bq/9LRpbMqxWRVUAQwMI9fVrssnTfw= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68 h1:nxC68pudNYkKU6jWhgrqdreuFiOQWj1Fs7T3VrH4Pjw= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.33.2 h1:EQyQC3sa8M+p6Ulc8yy9SWSS2GVwyRc83gAbG8lrl4o= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w= +gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/github.com/containers/ocicrypt/gpg.go b/vendor/github.com/containers/ocicrypt/gpg.go new file mode 100644 index 00000000000..b9d55539a11 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/gpg.go @@ -0,0 +1,425 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package ocicrypt + +import ( + "fmt" + "io/ioutil" + "os" + "os/exec" + "regexp" + "strconv" + "strings" + + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "golang.org/x/term" +) + +// GPGVersion enum representing the GPG client version to use. +type GPGVersion int + +const ( + // GPGv2 signifies gpgv2+ + GPGv2 GPGVersion = iota + // GPGv1 signifies gpgv1+ + GPGv1 + // GPGVersionUndetermined signifies gpg client version undetermined + GPGVersionUndetermined +) + +// GPGClient defines an interface for wrapping the gpg command line tools +type GPGClient interface { + // ReadGPGPubRingFile gets the byte sequence of the gpg public keyring + ReadGPGPubRingFile() ([]byte, error) + // GetGPGPrivateKey gets the private key bytes of a keyid given a passphrase + GetGPGPrivateKey(keyid uint64, passphrase string) ([]byte, error) + // GetSecretKeyDetails gets the details of a secret key + GetSecretKeyDetails(keyid uint64) ([]byte, bool, error) + // GetKeyDetails gets the details of a public key + GetKeyDetails(keyid uint64) ([]byte, bool, error) + // ResolveRecipients resolves PGP key ids to user names + ResolveRecipients([]string) []string +} + +// gpgClient contains generic gpg client information +type gpgClient struct { + gpgHomeDir string +} + +// gpgv2Client is a gpg2 client +type gpgv2Client struct { + gpgClient +} + +// gpgv1Client is a gpg client +type gpgv1Client struct { + gpgClient +} + +// GuessGPGVersion guesses the version of gpg. Defaults to gpg2 if exists, if +// not defaults to regular gpg. +func GuessGPGVersion() GPGVersion { + if err := exec.Command("gpg2", "--version").Run(); err == nil { + return GPGv2 + } else if err := exec.Command("gpg", "--version").Run(); err == nil { + return GPGv1 + } else { + return GPGVersionUndetermined + } +} + +// NewGPGClient creates a new GPGClient object representing the given version +// and using the given home directory +func NewGPGClient(gpgVersion, gpgHomeDir string) (GPGClient, error) { + v := new(GPGVersion) + switch gpgVersion { + case "v1": + *v = GPGv1 + case "v2": + *v = GPGv2 + default: + v = nil + } + return newGPGClient(v, gpgHomeDir) +} + +func newGPGClient(version *GPGVersion, homedir string) (GPGClient, error) { + var gpgVersion GPGVersion + if version != nil { + gpgVersion = *version + } else { + gpgVersion = GuessGPGVersion() + } + + switch gpgVersion { + case GPGv1: + return &gpgv1Client{ + gpgClient: gpgClient{gpgHomeDir: homedir}, + }, nil + case GPGv2: + return &gpgv2Client{ + gpgClient: gpgClient{gpgHomeDir: homedir}, + }, nil + case GPGVersionUndetermined: + return nil, fmt.Errorf("unable to determine GPG version") + default: + return nil, fmt.Errorf("unhandled case: NewGPGClient") + } +} + +// GetGPGPrivateKey gets the bytes of a specified keyid, supplying a passphrase +func (gc *gpgv2Client) GetGPGPrivateKey(keyid uint64, passphrase string) ([]byte, error) { + var args []string + + if gc.gpgHomeDir != "" { + args = append(args, []string{"--homedir", gc.gpgHomeDir}...) + } + + rfile, wfile, err := os.Pipe() + if err != nil { + return nil, errors.Wrapf(err, "could not create pipe") + } + defer func() { + rfile.Close() + wfile.Close() + }() + // fill pipe in background + go func(passphrase string) { + _, _ = wfile.Write([]byte(passphrase)) + wfile.Close() + }(passphrase) + + args = append(args, []string{"--pinentry-mode", "loopback", "--batch", "--passphrase-fd", fmt.Sprintf("%d", 3), "--export-secret-key", fmt.Sprintf("0x%x", keyid)}...) + + cmd := exec.Command("gpg2", args...) + cmd.ExtraFiles = []*os.File{rfile} + + return runGPGGetOutput(cmd) +} + +// ReadGPGPubRingFile reads the GPG public key ring file +func (gc *gpgv2Client) ReadGPGPubRingFile() ([]byte, error) { + var args []string + + if gc.gpgHomeDir != "" { + args = append(args, []string{"--homedir", gc.gpgHomeDir}...) + } + args = append(args, []string{"--batch", "--export"}...) + + cmd := exec.Command("gpg2", args...) + + return runGPGGetOutput(cmd) +} + +func (gc *gpgv2Client) getKeyDetails(option string, keyid uint64) ([]byte, bool, error) { + var args []string + + if gc.gpgHomeDir != "" { + args = []string{"--homedir", gc.gpgHomeDir} + } + args = append(args, option, fmt.Sprintf("0x%x", keyid)) + + cmd := exec.Command("gpg2", args...) + + keydata, err := runGPGGetOutput(cmd) + return keydata, err == nil, err +} + +// GetSecretKeyDetails retrieves the secret key details of key with keyid. +// returns a byte array of the details and a bool if the key exists +func (gc *gpgv2Client) GetSecretKeyDetails(keyid uint64) ([]byte, bool, error) { + return gc.getKeyDetails("-K", keyid) +} + +// GetKeyDetails retrieves the public key details of key with keyid. +// returns a byte array of the details and a bool if the key exists +func (gc *gpgv2Client) GetKeyDetails(keyid uint64) ([]byte, bool, error) { + return gc.getKeyDetails("-k", keyid) +} + +// ResolveRecipients converts PGP keyids to email addresses, if possible +func (gc *gpgv2Client) ResolveRecipients(recipients []string) []string { + return resolveRecipients(gc, recipients) +} + +// GetGPGPrivateKey gets the bytes of a specified keyid, supplying a passphrase +func (gc *gpgv1Client) GetGPGPrivateKey(keyid uint64, _ string) ([]byte, error) { + var args []string + + if gc.gpgHomeDir != "" { + args = append(args, []string{"--homedir", gc.gpgHomeDir}...) + } + args = append(args, []string{"--batch", "--export-secret-key", fmt.Sprintf("0x%x", keyid)}...) + + cmd := exec.Command("gpg", args...) + + return runGPGGetOutput(cmd) +} + +// ReadGPGPubRingFile reads the GPG public key ring file +func (gc *gpgv1Client) ReadGPGPubRingFile() ([]byte, error) { + var args []string + + if gc.gpgHomeDir != "" { + args = append(args, []string{"--homedir", gc.gpgHomeDir}...) + } + args = append(args, []string{"--batch", "--export"}...) + + cmd := exec.Command("gpg", args...) + + return runGPGGetOutput(cmd) +} + +func (gc *gpgv1Client) getKeyDetails(option string, keyid uint64) ([]byte, bool, error) { + var args []string + + if gc.gpgHomeDir != "" { + args = []string{"--homedir", gc.gpgHomeDir} + } + args = append(args, option, fmt.Sprintf("0x%x", keyid)) + + cmd := exec.Command("gpg", args...) + + keydata, err := runGPGGetOutput(cmd) + + return keydata, err == nil, err +} + +// GetSecretKeyDetails retrieves the secret key details of key with keyid. +// returns a byte array of the details and a bool if the key exists +func (gc *gpgv1Client) GetSecretKeyDetails(keyid uint64) ([]byte, bool, error) { + return gc.getKeyDetails("-K", keyid) +} + +// GetKeyDetails retrieves the public key details of key with keyid. +// returns a byte array of the details and a bool if the key exists +func (gc *gpgv1Client) GetKeyDetails(keyid uint64) ([]byte, bool, error) { + return gc.getKeyDetails("-k", keyid) +} + +// ResolveRecipients converts PGP keyids to email addresses, if possible +func (gc *gpgv1Client) ResolveRecipients(recipients []string) []string { + return resolveRecipients(gc, recipients) +} + +// runGPGGetOutput runs the GPG commandline and returns stdout as byte array +// and any stderr in the error +func runGPGGetOutput(cmd *exec.Cmd) ([]byte, error) { + stdout, err := cmd.StdoutPipe() + if err != nil { + return nil, err + } + stderr, err := cmd.StderrPipe() + if err != nil { + return nil, err + } + if err := cmd.Start(); err != nil { + return nil, err + } + + stdoutstr, err2 := ioutil.ReadAll(stdout) + stderrstr, _ := ioutil.ReadAll(stderr) + + if err := cmd.Wait(); err != nil { + return nil, fmt.Errorf("error from %s: %s", cmd.Path, string(stderrstr)) + } + + return stdoutstr, err2 +} + +// resolveRecipients walks the list of recipients and attempts to convert +// all keyIds to email addresses; if something goes wrong during the +// conversion of a recipient, the original string is returned for that +// recpient +func resolveRecipients(gc GPGClient, recipients []string) []string { + var result []string + + for _, recipient := range recipients { + keyID, err := strconv.ParseUint(recipient, 0, 64) + if err != nil { + result = append(result, recipient) + } else { + details, found, _ := gc.GetKeyDetails(keyID) + if !found { + result = append(result, recipient) + } else { + email := extractEmailFromDetails(details) + if email == "" { + result = append(result, recipient) + } else { + result = append(result, email) + } + } + } + } + return result +} + +var emailPattern = regexp.MustCompile(`uid\s+\[.*\]\s.*\s<(?P.+)>`) + +func extractEmailFromDetails(details []byte) string { + loc := emailPattern.FindSubmatchIndex(details) + if len(loc) == 0 { + return "" + } + return string(emailPattern.Expand(nil, []byte("$email"), details, loc)) +} + +// uint64ToStringArray converts an array of uint64's to an array of strings +// by applying a format string to each uint64 +func uint64ToStringArray(format string, in []uint64) []string { + var ret []string + + for _, v := range in { + ret = append(ret, fmt.Sprintf(format, v)) + } + return ret +} + +// GPGGetPrivateKey walks the list of layerInfos and tries to decrypt the +// wrapped symmetric keys. For this it determines whether a private key is +// in the GPGVault or on this system and prompts for the passwords for those +// that are available. If we do not find a private key on the system for +// getting to the symmetric key of a layer then an error is generated. +func GPGGetPrivateKey(descs []ocispec.Descriptor, gpgClient GPGClient, gpgVault GPGVault, mustFindKey bool) (gpgPrivKeys [][]byte, gpgPrivKeysPwds [][]byte, err error) { + // PrivateKeyData describes a private key + type PrivateKeyData struct { + KeyData []byte + KeyDataPassword []byte + } + var pkd PrivateKeyData + keyIDPasswordMap := make(map[uint64]PrivateKeyData) + + for _, desc := range descs { + for scheme, b64pgpPackets := range GetWrappedKeysMap(desc) { + if scheme != "pgp" { + continue + } + keywrapper := GetKeyWrapper(scheme) + if keywrapper == nil { + return nil, nil, errors.Errorf("could not get KeyWrapper for %s\n", scheme) + } + keyIds, err := keywrapper.GetKeyIdsFromPacket(b64pgpPackets) + if err != nil { + return nil, nil, err + } + + found := false + for _, keyid := range keyIds { + // do we have this key? -- first check the vault + if gpgVault != nil { + _, keydata := gpgVault.GetGPGPrivateKey(keyid) + if len(keydata) > 0 { + pkd = PrivateKeyData{ + KeyData: keydata, + KeyDataPassword: nil, // password not supported in this case + } + keyIDPasswordMap[keyid] = pkd + found = true + break + } + } else if gpgClient != nil { + // check the local system's gpg installation + keyinfo, haveKey, _ := gpgClient.GetSecretKeyDetails(keyid) + // this may fail if the key is not here; we ignore the error + if !haveKey { + // key not on this system + continue + } + + _, found = keyIDPasswordMap[keyid] + if !found { + fmt.Printf("Passphrase required for Key id 0x%x: \n%v", keyid, string(keyinfo)) + fmt.Printf("Enter passphrase for key with Id 0x%x: ", keyid) + + password, err := term.ReadPassword(int(os.Stdin.Fd())) + fmt.Printf("\n") + if err != nil { + return nil, nil, err + } + keydata, err := gpgClient.GetGPGPrivateKey(keyid, string(password)) + if err != nil { + return nil, nil, err + } + pkd = PrivateKeyData{ + KeyData: keydata, + KeyDataPassword: password, + } + keyIDPasswordMap[keyid] = pkd + found = true + } + break + } else { + return nil, nil, errors.New("no GPGVault or GPGClient passed") + } + } + if !found && len(b64pgpPackets) > 0 && mustFindKey { + ids := uint64ToStringArray("0x%x", keyIds) + + return nil, nil, errors.Errorf("missing key for decryption of layer %x of %s. Need one of the following keys: %s", desc.Digest, desc.Platform, strings.Join(ids, ", ")) + } + } + } + + for _, pkd := range keyIDPasswordMap { + gpgPrivKeys = append(gpgPrivKeys, pkd.KeyData) + gpgPrivKeysPwds = append(gpgPrivKeysPwds, pkd.KeyDataPassword) + } + + return gpgPrivKeys, gpgPrivKeysPwds, nil +} diff --git a/vendor/github.com/containers/ocicrypt/gpgvault.go b/vendor/github.com/containers/ocicrypt/gpgvault.go new file mode 100644 index 00000000000..dd9a10007c8 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/gpgvault.go @@ -0,0 +1,100 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package ocicrypt + +import ( + "bytes" + "io/ioutil" + + "github.com/pkg/errors" + "golang.org/x/crypto/openpgp" + "golang.org/x/crypto/openpgp/packet" +) + +// GPGVault defines an interface for wrapping multiple secret key rings +type GPGVault interface { + // AddSecretKeyRingData adds a secret keyring via its raw byte array + AddSecretKeyRingData(gpgSecretKeyRingData []byte) error + // AddSecretKeyRingDataArray adds secret keyring via its raw byte arrays + AddSecretKeyRingDataArray(gpgSecretKeyRingDataArray [][]byte) error + // AddSecretKeyRingFiles adds secret keyrings given their filenames + AddSecretKeyRingFiles(filenames []string) error + // GetGPGPrivateKey gets the private key bytes of a keyid given a passphrase + GetGPGPrivateKey(keyid uint64) ([]openpgp.Key, []byte) +} + +// gpgVault wraps an array of gpgSecretKeyRing +type gpgVault struct { + entityLists []openpgp.EntityList + keyDataList [][]byte // the raw data original passed in +} + +// NewGPGVault creates an empty GPGVault +func NewGPGVault() GPGVault { + return &gpgVault{} +} + +// AddSecretKeyRingData adds a secret keyring's to the gpgVault; the raw byte +// array read from the file must be passed and will be parsed by this function +func (g *gpgVault) AddSecretKeyRingData(gpgSecretKeyRingData []byte) error { + // read the private keys + r := bytes.NewReader(gpgSecretKeyRingData) + entityList, err := openpgp.ReadKeyRing(r) + if err != nil { + return errors.Wrapf(err, "could not read keyring") + } + g.entityLists = append(g.entityLists, entityList) + g.keyDataList = append(g.keyDataList, gpgSecretKeyRingData) + return nil +} + +// AddSecretKeyRingDataArray adds secret keyrings to the gpgVault; the raw byte +// arrays read from files must be passed +func (g *gpgVault) AddSecretKeyRingDataArray(gpgSecretKeyRingDataArray [][]byte) error { + for _, gpgSecretKeyRingData := range gpgSecretKeyRingDataArray { + if err := g.AddSecretKeyRingData(gpgSecretKeyRingData); err != nil { + return err + } + } + return nil +} + +// AddSecretKeyRingFiles adds the secret key rings given their filenames +func (g *gpgVault) AddSecretKeyRingFiles(filenames []string) error { + for _, filename := range filenames { + gpgSecretKeyRingData, err := ioutil.ReadFile(filename) + if err != nil { + return err + } + err = g.AddSecretKeyRingData(gpgSecretKeyRingData) + if err != nil { + return err + } + } + return nil +} + +// GetGPGPrivateKey gets the bytes of a specified keyid, supplying a passphrase +func (g *gpgVault) GetGPGPrivateKey(keyid uint64) ([]openpgp.Key, []byte) { + for i, el := range g.entityLists { + decKeys := el.KeysByIdUsage(keyid, packet.KeyFlagEncryptCommunications) + if len(decKeys) > 0 { + return decKeys, g.keyDataList[i] + } + } + return nil, nil +} diff --git a/vendor/github.com/containers/ocicrypt/helpers/parse_helpers.go b/vendor/github.com/containers/ocicrypt/helpers/parse_helpers.go new file mode 100644 index 00000000000..717e7f21871 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/helpers/parse_helpers.go @@ -0,0 +1,380 @@ +package helpers + +import ( + "fmt" + "io/ioutil" + "os" + "strconv" + "strings" + + "github.com/containers/ocicrypt" + encconfig "github.com/containers/ocicrypt/config" + "github.com/containers/ocicrypt/config/pkcs11config" + "github.com/containers/ocicrypt/crypto/pkcs11" + encutils "github.com/containers/ocicrypt/utils" + + "github.com/pkg/errors" +) + +// processRecipientKeys sorts the array of recipients by type. Recipients may be either +// x509 certificates, public keys, or PGP public keys identified by email address or name +func processRecipientKeys(recipients []string) ([][]byte, [][]byte, [][]byte, [][]byte, [][]byte, [][]byte, error) { + var ( + gpgRecipients [][]byte + pubkeys [][]byte + x509s [][]byte + pkcs11Pubkeys [][]byte + pkcs11Yamls [][]byte + keyProviders [][]byte + ) + + for _, recipient := range recipients { + + idx := strings.Index(recipient, ":") + if idx < 0 { + return nil, nil, nil, nil, nil, nil, errors.New("Invalid recipient format") + } + + protocol := recipient[:idx] + value := recipient[idx+1:] + + switch protocol { + case "pgp": + gpgRecipients = append(gpgRecipients, []byte(value)) + + case "jwe": + tmp, err := ioutil.ReadFile(value) + if err != nil { + return nil, nil, nil, nil, nil, nil, errors.Wrap(err, "Unable to read file") + } + if !encutils.IsPublicKey(tmp) { + return nil, nil, nil, nil, nil, nil, errors.New("File provided is not a public key") + } + pubkeys = append(pubkeys, tmp) + + case "pkcs7": + tmp, err := ioutil.ReadFile(value) + if err != nil { + return nil, nil, nil, nil, nil, nil, errors.Wrap(err, "Unable to read file") + } + if !encutils.IsCertificate(tmp) { + return nil, nil, nil, nil, nil, nil, errors.New("File provided is not an x509 cert") + } + x509s = append(x509s, tmp) + + case "pkcs11": + tmp, err := ioutil.ReadFile(value) + if err != nil { + return nil, nil, nil, nil, nil, nil, errors.Wrap(err, "Unable to read file") + } + if encutils.IsPkcs11PublicKey(tmp) { + pkcs11Yamls = append(pkcs11Yamls, tmp) + } else if encutils.IsPublicKey(tmp) { + pkcs11Pubkeys = append(pkcs11Pubkeys, tmp) + } else { + return nil, nil, nil, nil, nil, nil, errors.New("Provided file is not a public key") + } + + case "provider": + keyProviders = append(keyProviders, []byte(value)) + + default: + return nil, nil, nil, nil, nil, nil, errors.New("Provided protocol not recognized") + } + } + return gpgRecipients, pubkeys, x509s, pkcs11Pubkeys, pkcs11Yamls, keyProviders, nil +} + +// processx509Certs processes x509 certificate files +func processx509Certs(keys []string) ([][]byte, error) { + var x509s [][]byte + for _, key := range keys { + fileName := strings.Split(key, ":")[0] + if _, err := os.Stat(fileName); os.IsNotExist(err) { + continue + } + tmp, err := ioutil.ReadFile(fileName) + if err != nil { + return nil, errors.Wrap(err, "Unable to read file") + } + if !encutils.IsCertificate(tmp) { + continue + } + x509s = append(x509s, tmp) + + } + return x509s, nil +} + +// processPwdString process a password that may be in any of the following formats: +// - file= +// - pass= +// - fd= +// - +func processPwdString(pwdString string) ([]byte, error) { + if strings.HasPrefix(pwdString, "file=") { + return ioutil.ReadFile(pwdString[5:]) + } else if strings.HasPrefix(pwdString, "pass=") { + return []byte(pwdString[5:]), nil + } else if strings.HasPrefix(pwdString, "fd=") { + fdStr := pwdString[3:] + fd, err := strconv.Atoi(fdStr) + if err != nil { + return nil, errors.Wrapf(err, "could not parse file descriptor %s", fdStr) + } + f := os.NewFile(uintptr(fd), "pwdfile") + if f == nil { + return nil, fmt.Errorf("%s is not a valid file descriptor", fdStr) + } + defer f.Close() + pwd := make([]byte, 64) + n, err := f.Read(pwd) + if err != nil { + return nil, errors.Wrapf(err, "could not read from file descriptor") + } + return pwd[:n], nil + } + return []byte(pwdString), nil +} + +// processPrivateKeyFiles sorts the different types of private key files; private key files may either be +// private keys or GPG private key ring files. The private key files may include the password for the +// private key and take any of the following forms: +// - +// - :file= +// - :pass= +// - :fd= +// - : +// - keyprovider:<...> +func processPrivateKeyFiles(keyFilesAndPwds []string) ([][]byte, [][]byte, [][]byte, [][]byte, [][]byte, [][]byte, error) { + var ( + gpgSecretKeyRingFiles [][]byte + gpgSecretKeyPasswords [][]byte + privkeys [][]byte + privkeysPasswords [][]byte + pkcs11Yamls [][]byte + keyProviders [][]byte + err error + ) + // keys needed for decryption in case of adding a recipient + for _, keyfileAndPwd := range keyFilesAndPwds { + var password []byte + + // treat "provider" protocol separately + if strings.HasPrefix(keyfileAndPwd, "provider:") { + keyProviders = append(keyProviders, []byte(keyfileAndPwd[len("provider:"):])) + continue + } + parts := strings.Split(keyfileAndPwd, ":") + if len(parts) == 2 { + password, err = processPwdString(parts[1]) + if err != nil { + return nil, nil, nil, nil, nil, nil, err + } + } + + keyfile := parts[0] + tmp, err := ioutil.ReadFile(keyfile) + if err != nil { + return nil, nil, nil, nil, nil, nil, err + } + isPrivKey, err := encutils.IsPrivateKey(tmp, password) + if encutils.IsPasswordError(err) { + return nil, nil, nil, nil, nil, nil, err + } + + if encutils.IsPkcs11PrivateKey(tmp) { + pkcs11Yamls = append(pkcs11Yamls, tmp) + } else if isPrivKey { + privkeys = append(privkeys, tmp) + privkeysPasswords = append(privkeysPasswords, password) + } else if encutils.IsGPGPrivateKeyRing(tmp) { + gpgSecretKeyRingFiles = append(gpgSecretKeyRingFiles, tmp) + gpgSecretKeyPasswords = append(gpgSecretKeyPasswords, password) + } else { + // ignore if file is not recognized, so as not to error if additional + // metadata/cert files exists + continue + } + } + return gpgSecretKeyRingFiles, gpgSecretKeyPasswords, privkeys, privkeysPasswords, pkcs11Yamls, keyProviders, nil +} + +// CreateDecryptCryptoConfig creates the CryptoConfig object that contains the necessary +// information to perform decryption from command line options. +func CreateDecryptCryptoConfig(keys []string, decRecipients []string) (encconfig.CryptoConfig, error) { + ccs := []encconfig.CryptoConfig{} + + // x509 cert is needed for PKCS7 decryption + _, _, x509s, _, _, _, err := processRecipientKeys(decRecipients) + if err != nil { + return encconfig.CryptoConfig{}, err + } + + // x509 certs can also be passed in via keys + x509FromKeys, err := processx509Certs(keys) + if err != nil { + return encconfig.CryptoConfig{}, err + } + x509s = append(x509s, x509FromKeys...) + + gpgSecretKeyRingFiles, gpgSecretKeyPasswords, privKeys, privKeysPasswords, pkcs11Yamls, keyProviders, err := processPrivateKeyFiles(keys) + if err != nil { + return encconfig.CryptoConfig{}, err + } + + if len(gpgSecretKeyRingFiles) > 0 { + gpgCc, err := encconfig.DecryptWithGpgPrivKeys(gpgSecretKeyRingFiles, gpgSecretKeyPasswords) + if err != nil { + return encconfig.CryptoConfig{}, err + } + ccs = append(ccs, gpgCc) + } + + /* TODO: Add in GPG client query for secret keys in the future. + _, err = createGPGClient(context) + gpgInstalled := err == nil + if gpgInstalled { + if len(gpgSecretKeyRingFiles) == 0 && len(privKeys) == 0 && len(pkcs11Yamls) == 0 && len(keyProviders) == 0 && descs != nil { + // Get pgp private keys from keyring only if no private key was passed + gpgPrivKeys, gpgPrivKeyPasswords, err := getGPGPrivateKeys(context, gpgSecretKeyRingFiles, descs, true) + if err != nil { + return encconfig.CryptoConfig{}, err + } + + gpgCc, err := encconfig.DecryptWithGpgPrivKeys(gpgPrivKeys, gpgPrivKeyPasswords) + if err != nil { + return encconfig.CryptoConfig{}, err + } + ccs = append(ccs, gpgCc) + + } else if len(gpgSecretKeyRingFiles) > 0 { + gpgCc, err := encconfig.DecryptWithGpgPrivKeys(gpgSecretKeyRingFiles, gpgSecretKeyPasswords) + if err != nil { + return encconfig.CryptoConfig{}, err + } + ccs = append(ccs, gpgCc) + + } + } + */ + + if len(x509s) > 0 { + x509sCc, err := encconfig.DecryptWithX509s(x509s) + if err != nil { + return encconfig.CryptoConfig{}, err + } + ccs = append(ccs, x509sCc) + } + if len(privKeys) > 0 { + privKeysCc, err := encconfig.DecryptWithPrivKeys(privKeys, privKeysPasswords) + if err != nil { + return encconfig.CryptoConfig{}, err + } + ccs = append(ccs, privKeysCc) + } + if len(pkcs11Yamls) > 0 { + p11conf, err := pkcs11config.GetUserPkcs11Config() + if err != nil { + return encconfig.CryptoConfig{}, err + } + pkcs11PrivKeysCc, err := encconfig.DecryptWithPkcs11Yaml(p11conf, pkcs11Yamls) + if err != nil { + return encconfig.CryptoConfig{}, err + } + ccs = append(ccs, pkcs11PrivKeysCc) + } + if len(keyProviders) > 0 { + keyProviderCc, err := encconfig.DecryptWithKeyProvider(keyProviders) + if err != nil { + return encconfig.CryptoConfig{}, err + } + ccs = append(ccs, keyProviderCc) + } + return encconfig.CombineCryptoConfigs(ccs), nil +} + +// CreateCryptoConfig from the list of recipient strings and list of key paths of private keys +func CreateCryptoConfig(recipients []string, keys []string) (encconfig.CryptoConfig, error) { + var decryptCc *encconfig.CryptoConfig + ccs := []encconfig.CryptoConfig{} + if len(keys) > 0 { + dcc, err := CreateDecryptCryptoConfig(keys, []string{}) + if err != nil { + return encconfig.CryptoConfig{}, err + } + decryptCc = &dcc + ccs = append(ccs, dcc) + } + + if len(recipients) > 0 { + gpgRecipients, pubKeys, x509s, pkcs11Pubkeys, pkcs11Yamls, keyProvider, err := processRecipientKeys(recipients) + if err != nil { + return encconfig.CryptoConfig{}, err + } + encryptCcs := []encconfig.CryptoConfig{} + + // Create GPG client with guessed GPG version and default homedir + gpgClient, err := ocicrypt.NewGPGClient("", "") + gpgInstalled := err == nil + if len(gpgRecipients) > 0 && gpgInstalled { + gpgPubRingFile, err := gpgClient.ReadGPGPubRingFile() + if err != nil { + return encconfig.CryptoConfig{}, err + } + + gpgCc, err := encconfig.EncryptWithGpg(gpgRecipients, gpgPubRingFile) + if err != nil { + return encconfig.CryptoConfig{}, err + } + encryptCcs = append(encryptCcs, gpgCc) + } + + // Create Encryption Crypto Config + if len(x509s) > 0 { + pkcs7Cc, err := encconfig.EncryptWithPkcs7(x509s) + if err != nil { + return encconfig.CryptoConfig{}, err + } + encryptCcs = append(encryptCcs, pkcs7Cc) + } + if len(pubKeys) > 0 { + jweCc, err := encconfig.EncryptWithJwe(pubKeys) + if err != nil { + return encconfig.CryptoConfig{}, err + } + encryptCcs = append(encryptCcs, jweCc) + } + var p11conf *pkcs11.Pkcs11Config + if len(pkcs11Yamls) > 0 || len(pkcs11Pubkeys) > 0 { + p11conf, err = pkcs11config.GetUserPkcs11Config() + if err != nil { + return encconfig.CryptoConfig{}, err + } + pkcs11Cc, err := encconfig.EncryptWithPkcs11(p11conf, pkcs11Pubkeys, pkcs11Yamls) + if err != nil { + return encconfig.CryptoConfig{}, err + } + encryptCcs = append(encryptCcs, pkcs11Cc) + } + + if len(keyProvider) > 0 { + keyProviderCc, err := encconfig.EncryptWithKeyProvider(keyProvider) + if err != nil { + return encconfig.CryptoConfig{}, err + } + encryptCcs = append(encryptCcs, keyProviderCc) + } + ecc := encconfig.CombineCryptoConfigs(encryptCcs) + if decryptCc != nil { + ecc.EncryptConfig.AttachDecryptConfig(decryptCc.DecryptConfig) + } + ccs = append(ccs, ecc) + } + + if len(ccs) > 0 { + return encconfig.CombineCryptoConfigs(ccs), nil + } else { + return encconfig.CryptoConfig{}, nil + } +} diff --git a/vendor/github.com/containers/ocicrypt/keywrap/jwe/keywrapper_jwe.go b/vendor/github.com/containers/ocicrypt/keywrap/jwe/keywrapper_jwe.go new file mode 100644 index 00000000000..41d0f1b3adf --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/keywrap/jwe/keywrapper_jwe.go @@ -0,0 +1,136 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package jwe + +import ( + "crypto/ecdsa" + + "github.com/containers/ocicrypt/config" + "github.com/containers/ocicrypt/keywrap" + "github.com/containers/ocicrypt/utils" + "github.com/pkg/errors" + jose "gopkg.in/square/go-jose.v2" +) + +type jweKeyWrapper struct { +} + +func (kw *jweKeyWrapper) GetAnnotationID() string { + return "org.opencontainers.image.enc.keys.jwe" +} + +// NewKeyWrapper returns a new key wrapping interface using jwe +func NewKeyWrapper() keywrap.KeyWrapper { + return &jweKeyWrapper{} +} + +// WrapKeys wraps the session key for recpients and encrypts the optsData, which +// describe the symmetric key used for encrypting the layer +func (kw *jweKeyWrapper) WrapKeys(ec *config.EncryptConfig, optsData []byte) ([]byte, error) { + var joseRecipients []jose.Recipient + + err := addPubKeys(&joseRecipients, ec.Parameters["pubkeys"]) + if err != nil { + return nil, err + } + // no recipients is not an error... + if len(joseRecipients) == 0 { + return nil, nil + } + + encrypter, err := jose.NewMultiEncrypter(jose.A256GCM, joseRecipients, nil) + if err != nil { + return nil, errors.Wrapf(err, "jose.NewMultiEncrypter failed") + } + jwe, err := encrypter.Encrypt(optsData) + if err != nil { + return nil, errors.Wrapf(err, "JWE Encrypt failed") + } + return []byte(jwe.FullSerialize()), nil +} + +func (kw *jweKeyWrapper) UnwrapKey(dc *config.DecryptConfig, jweString []byte) ([]byte, error) { + jwe, err := jose.ParseEncrypted(string(jweString)) + if err != nil { + return nil, errors.New("jose.ParseEncrypted failed") + } + + privKeys := kw.GetPrivateKeys(dc.Parameters) + if len(privKeys) == 0 { + return nil, errors.New("No private keys found for JWE decryption") + } + privKeysPasswords := kw.getPrivateKeysPasswords(dc.Parameters) + if len(privKeysPasswords) != len(privKeys) { + return nil, errors.New("Private key password array length must be same as that of private keys") + } + + for idx, privKey := range privKeys { + key, err := utils.ParsePrivateKey(privKey, privKeysPasswords[idx], "JWE") + if err != nil { + return nil, err + } + _, _, plain, err := jwe.DecryptMulti(key) + if err == nil { + return plain, nil + } + } + return nil, errors.New("JWE: No suitable private key found for decryption") +} + +func (kw *jweKeyWrapper) NoPossibleKeys(dcparameters map[string][][]byte) bool { + return len(kw.GetPrivateKeys(dcparameters)) == 0 +} + +func (kw *jweKeyWrapper) GetPrivateKeys(dcparameters map[string][][]byte) [][]byte { + return dcparameters["privkeys"] +} + +func (kw *jweKeyWrapper) getPrivateKeysPasswords(dcparameters map[string][][]byte) [][]byte { + return dcparameters["privkeys-passwords"] +} + +func (kw *jweKeyWrapper) GetKeyIdsFromPacket(b64jwes string) ([]uint64, error) { + return nil, nil +} + +func (kw *jweKeyWrapper) GetRecipients(b64jwes string) ([]string, error) { + return []string{"[jwe]"}, nil +} + +func addPubKeys(joseRecipients *[]jose.Recipient, pubKeys [][]byte) error { + if len(pubKeys) == 0 { + return nil + } + for _, pubKey := range pubKeys { + key, err := utils.ParsePublicKey(pubKey, "JWE") + if err != nil { + return err + } + + alg := jose.RSA_OAEP + switch key.(type) { + case *ecdsa.PublicKey: + alg = jose.ECDH_ES_A256KW + } + + *joseRecipients = append(*joseRecipients, jose.Recipient{ + Algorithm: alg, + Key: key, + }) + } + return nil +} diff --git a/vendor/github.com/containers/ocicrypt/keywrap/keyprovider/keyprovider.go b/vendor/github.com/containers/ocicrypt/keywrap/keyprovider/keyprovider.go new file mode 100644 index 00000000000..3b4c47ed4f4 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/keywrap/keyprovider/keyprovider.go @@ -0,0 +1,242 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package keyprovider + +import ( + "context" + "encoding/json" + "github.com/containers/ocicrypt/config" + keyproviderconfig "github.com/containers/ocicrypt/config/keyprovider-config" + "github.com/containers/ocicrypt/keywrap" + "github.com/containers/ocicrypt/utils" + keyproviderpb "github.com/containers/ocicrypt/utils/keyprovider" + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" + "google.golang.org/grpc" +) + +type keyProviderKeyWrapper struct { + provider string + attrs keyproviderconfig.KeyProviderAttrs +} + +func (kw *keyProviderKeyWrapper) GetAnnotationID() string { + return "org.opencontainers.image.enc.keys.provider." + kw.provider +} + +// NewKeyWrapper returns a new key wrapping interface using keyprovider +func NewKeyWrapper(p string, a keyproviderconfig.KeyProviderAttrs) keywrap.KeyWrapper { + return &keyProviderKeyWrapper{provider: p, attrs: a} +} + +type KeyProviderKeyWrapProtocolOperation string + +var ( + OpKeyWrap KeyProviderKeyWrapProtocolOperation = "keywrap" + OpKeyUnwrap KeyProviderKeyWrapProtocolOperation = "keyunwrap" +) + +// KeyProviderKeyWrapProtocolInput defines the input to the key provider binary or grpc method. +type KeyProviderKeyWrapProtocolInput struct { + // Operation is either "keywrap" or "keyunwrap" + Operation KeyProviderKeyWrapProtocolOperation `json:"op"` + // KeyWrapParams encodes the arguments to key wrap if operation is set to wrap + KeyWrapParams KeyWrapParams `json:"keywrapparams,omitempty"` + // KeyUnwrapParams encodes the arguments to key unwrap if operation is set to unwrap + KeyUnwrapParams KeyUnwrapParams `json:"keyunwrapparams,omitempty"` +} + +// KeyProviderKeyWrapProtocolOutput defines the output of the key provider binary or grpc method. +type KeyProviderKeyWrapProtocolOutput struct { + // KeyWrapResult encodes the results to key wrap if operation is to wrap + KeyWrapResults KeyWrapResults `json:"keywrapresults,omitempty"` + // KeyUnwrapResult encodes the result to key unwrap if operation is to unwrap + KeyUnwrapResults KeyUnwrapResults `json:"keyunwrapresults,omitempty"` +} + +type KeyWrapParams struct { + Ec *config.EncryptConfig `json:"ec"` + OptsData []byte `json:"optsdata"` +} + +type KeyUnwrapParams struct { + Dc *config.DecryptConfig `json:"dc"` + Annotation []byte `json:"annotation"` +} + +type KeyUnwrapResults struct { + OptsData []byte `json:"optsdata"` +} + +type KeyWrapResults struct { + Annotation []byte `json:"annotation"` +} + +var runner utils.CommandExecuter + +func init() { + runner = utils.Runner{} +} + +// WrapKeys calls appropriate binary executable/grpc server for wrapping the session key for recipients and gets encrypted optsData, which +// describe the symmetric key used for encrypting the layer +func (kw *keyProviderKeyWrapper) WrapKeys(ec *config.EncryptConfig, optsData []byte) ([]byte, error) { + + input, err := json.Marshal(KeyProviderKeyWrapProtocolInput{ + Operation: OpKeyWrap, + KeyWrapParams: KeyWrapParams{ + Ec: ec, + OptsData: optsData, + }, + }) + + if err != nil { + return nil, err + } + + if _, ok := ec.Parameters[kw.provider]; ok { + if kw.attrs.Command != nil { + protocolOuput, err := getProviderCommandOutput(input, kw.attrs.Command) + if err != nil { + return nil, errors.Wrap(err, "error while retrieving keyprovider protocol command output") + } + return protocolOuput.KeyWrapResults.Annotation, nil + } else if kw.attrs.Grpc != "" { + protocolOuput, err := getProviderGRPCOutput(input, kw.attrs.Grpc, OpKeyWrap) + if err != nil { + return nil, errors.Wrap(err, "error while retrieving keyprovider protocol grpc output") + } + + return protocolOuput.KeyWrapResults.Annotation, nil + } else { + return nil, errors.New("Unsupported keyprovider invocation. Supported invocation methods are grpc and cmd") + } + } + + return nil, nil +} + +// UnwrapKey calls appropriate binary executable/grpc server for unwrapping the session key based on the protocol given in annotation for recipients and gets decrypted optsData, +// which describe the symmetric key used for decrypting the layer +func (kw *keyProviderKeyWrapper) UnwrapKey(dc *config.DecryptConfig, jsonString []byte) ([]byte, error) { + input, err := json.Marshal(KeyProviderKeyWrapProtocolInput{ + Operation: OpKeyUnwrap, + KeyUnwrapParams: KeyUnwrapParams{ + Dc: dc, + Annotation: jsonString, + }, + }) + if err != nil { + return nil, err + } + + if kw.attrs.Command != nil { + protocolOuput, err := getProviderCommandOutput(input, kw.attrs.Command) + if err != nil { + // If err is not nil, then ignore it and continue with rest of the given keyproviders + return nil, err + } + + return protocolOuput.KeyUnwrapResults.OptsData, nil + } else if kw.attrs.Grpc != "" { + protocolOuput, err := getProviderGRPCOutput(input, kw.attrs.Grpc, OpKeyUnwrap) + if err != nil { + // If err is not nil, then ignore it and continue with rest of the given keyproviders + return nil, err + } + + return protocolOuput.KeyUnwrapResults.OptsData, nil + } else { + return nil, errors.New("Unsupported keyprovider invocation. Supported invocation methods are grpc and cmd") + } +} + +func getProviderGRPCOutput(input []byte, connString string, operation KeyProviderKeyWrapProtocolOperation) (*KeyProviderKeyWrapProtocolOutput, error) { + var protocolOuput KeyProviderKeyWrapProtocolOutput + var grpcOutput *keyproviderpb.KeyProviderKeyWrapProtocolOutput + cc, err := grpc.Dial(connString, grpc.WithInsecure()) + if err != nil { + return nil, errors.Wrap(err, "error while dialing rpc server") + } + defer func() { + derr := cc.Close() + if derr != nil { + log.WithError(derr).Error("Error closing grpc socket") + } + }() + + client := keyproviderpb.NewKeyProviderServiceClient(cc) + req := &keyproviderpb.KeyProviderKeyWrapProtocolInput{ + KeyProviderKeyWrapProtocolInput: input, + } + + if operation == OpKeyWrap { + grpcOutput, err = client.WrapKey(context.Background(), req) + if err != nil { + return nil, errors.Wrap(err, "Error from grpc method") + } + } else if operation == OpKeyUnwrap { + grpcOutput, err = client.UnWrapKey(context.Background(), req) + if err != nil { + return nil, errors.Wrap(err, "Error from grpc method") + } + } else { + return nil, errors.New("Unsupported operation") + } + + respBytes := grpcOutput.GetKeyProviderKeyWrapProtocolOutput() + err = json.Unmarshal(respBytes, &protocolOuput) + if err != nil { + return nil, errors.Wrap(err, "Error while unmarshalling grpc method output") + } + + return &protocolOuput, nil +} + +func getProviderCommandOutput(input []byte, command *keyproviderconfig.Command) (*KeyProviderKeyWrapProtocolOutput, error) { + var protocolOuput KeyProviderKeyWrapProtocolOutput + // Convert interface to command structure + respBytes, err := runner.Exec(command.Path, command.Args, input) + if err != nil { + return nil, err + } + err = json.Unmarshal(respBytes, &protocolOuput) + if err != nil { + return nil, errors.Wrap(err, "Error while unmarshalling binary executable command output") + } + return &protocolOuput, nil +} + +// Return false as it is not applicable to keyprovider protocol +func (kw *keyProviderKeyWrapper) NoPossibleKeys(dcparameters map[string][][]byte) bool { + return false +} + +// Return nil as it is not applicable to keyprovider protocol +func (kw *keyProviderKeyWrapper) GetPrivateKeys(dcparameters map[string][][]byte) [][]byte { + return nil +} + +// Return nil as it is not applicable to keyprovider protocol +func (kw *keyProviderKeyWrapper) GetKeyIdsFromPacket(_ string) ([]uint64, error) { + return nil, nil +} + +// Return nil as it is not applicable to keyprovider protocol +func (kw *keyProviderKeyWrapper) GetRecipients(_ string) ([]string, error) { + return nil, nil +} diff --git a/vendor/github.com/containers/ocicrypt/keywrap/keywrap.go b/vendor/github.com/containers/ocicrypt/keywrap/keywrap.go new file mode 100644 index 00000000000..ed25e7dac3d --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/keywrap/keywrap.go @@ -0,0 +1,48 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package keywrap + +import ( + "github.com/containers/ocicrypt/config" +) + +// KeyWrapper is the interface used for wrapping keys using +// a specific encryption technology (pgp, jwe) +type KeyWrapper interface { + WrapKeys(ec *config.EncryptConfig, optsData []byte) ([]byte, error) + UnwrapKey(dc *config.DecryptConfig, annotation []byte) ([]byte, error) + GetAnnotationID() string + + // NoPossibleKeys returns true if there is no possibility of performing + // decryption for parameters provided. + NoPossibleKeys(dcparameters map[string][][]byte) bool + + // GetPrivateKeys (optional) gets the array of private keys. It is an optional implementation + // as in some key services, a private key may not be exportable (i.e. HSM) + // If not implemented, return nil + GetPrivateKeys(dcparameters map[string][][]byte) [][]byte + + // GetKeyIdsFromPacket (optional) gets a list of key IDs. This is optional as some encryption + // schemes may not have a notion of key IDs + // If not implemented, return the nil slice + GetKeyIdsFromPacket(packet string) ([]uint64, error) + + // GetRecipients (optional) gets a list of recipients. It is optional due to the validity of + // recipients in a particular encryptiong scheme + // If not implemented, return the nil slice + GetRecipients(packet string) ([]string, error) +} diff --git a/vendor/github.com/containers/ocicrypt/keywrap/pgp/keywrapper_gpg.go b/vendor/github.com/containers/ocicrypt/keywrap/pgp/keywrapper_gpg.go new file mode 100644 index 00000000000..275a3d8b993 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/keywrap/pgp/keywrapper_gpg.go @@ -0,0 +1,273 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package pgp + +import ( + "bytes" + "crypto" + "crypto/rand" + "encoding/base64" + "fmt" + "io" + "io/ioutil" + "net/mail" + "strconv" + "strings" + + "github.com/containers/ocicrypt/config" + "github.com/containers/ocicrypt/keywrap" + "github.com/pkg/errors" + "golang.org/x/crypto/openpgp" + "golang.org/x/crypto/openpgp/packet" +) + +type gpgKeyWrapper struct { +} + +// NewKeyWrapper returns a new key wrapping interface for pgp +func NewKeyWrapper() keywrap.KeyWrapper { + return &gpgKeyWrapper{} +} + +var ( + // GPGDefaultEncryptConfig is the default configuration for layer encryption/decryption + GPGDefaultEncryptConfig = &packet.Config{ + Rand: rand.Reader, + DefaultHash: crypto.SHA256, + DefaultCipher: packet.CipherAES256, + CompressionConfig: &packet.CompressionConfig{Level: 0}, // No compression + RSABits: 2048, + } +) + +func (kw *gpgKeyWrapper) GetAnnotationID() string { + return "org.opencontainers.image.enc.keys.pgp" +} + +// WrapKeys wraps the session key for recpients and encrypts the optsData, which +// describe the symmetric key used for encrypting the layer +func (kw *gpgKeyWrapper) WrapKeys(ec *config.EncryptConfig, optsData []byte) ([]byte, error) { + ciphertext := new(bytes.Buffer) + el, err := kw.createEntityList(ec) + if err != nil { + return nil, errors.Wrap(err, "unable to create entity list") + } + if len(el) == 0 { + // nothing to do -- not an error + return nil, nil + } + + plaintextWriter, err := openpgp.Encrypt(ciphertext, + el, /*EntityList*/ + nil, /* Sign*/ + nil, /* FileHint */ + GPGDefaultEncryptConfig) + if err != nil { + return nil, err + } + + if _, err = plaintextWriter.Write(optsData); err != nil { + return nil, err + } else if err = plaintextWriter.Close(); err != nil { + return nil, err + } + return ciphertext.Bytes(), err +} + +// UnwrapKey unwraps the symmetric key with which the layer is encrypted +// This symmetric key is encrypted in the PGP payload. +func (kw *gpgKeyWrapper) UnwrapKey(dc *config.DecryptConfig, pgpPacket []byte) ([]byte, error) { + pgpPrivateKeys, pgpPrivateKeysPwd, err := kw.getKeyParameters(dc.Parameters) + if err != nil { + return nil, err + } + + for idx, pgpPrivateKey := range pgpPrivateKeys { + r := bytes.NewBuffer(pgpPrivateKey) + entityList, err := openpgp.ReadKeyRing(r) + if err != nil { + return nil, errors.Wrap(err, "unable to parse private keys") + } + + var prompt openpgp.PromptFunction + if len(pgpPrivateKeysPwd) > idx { + responded := false + prompt = func(keys []openpgp.Key, symmetric bool) ([]byte, error) { + if responded { + return nil, fmt.Errorf("don't seem to have the right password") + } + responded = true + for _, key := range keys { + if key.PrivateKey != nil { + _ = key.PrivateKey.Decrypt(pgpPrivateKeysPwd[idx]) + } + } + return pgpPrivateKeysPwd[idx], nil + } + } + + r = bytes.NewBuffer(pgpPacket) + md, err := openpgp.ReadMessage(r, entityList, prompt, GPGDefaultEncryptConfig) + if err != nil { + continue + } + // we get the plain key options back + optsData, err := ioutil.ReadAll(md.UnverifiedBody) + if err != nil { + continue + } + return optsData, nil + } + return nil, errors.New("PGP: No suitable key found to unwrap key") +} + +// GetKeyIdsFromWrappedKeys converts the base64 encoded PGPPacket to uint64 keyIds +func (kw *gpgKeyWrapper) GetKeyIdsFromPacket(b64pgpPackets string) ([]uint64, error) { + + var keyids []uint64 + for _, b64pgpPacket := range strings.Split(b64pgpPackets, ",") { + pgpPacket, err := base64.StdEncoding.DecodeString(b64pgpPacket) + if err != nil { + return nil, errors.Wrapf(err, "could not decode base64 encoded PGP packet") + } + newids, err := kw.getKeyIDs(pgpPacket) + if err != nil { + return nil, err + } + keyids = append(keyids, newids...) + } + return keyids, nil +} + +// getKeyIDs parses a PGPPacket and gets the list of recipients' key IDs +func (kw *gpgKeyWrapper) getKeyIDs(pgpPacket []byte) ([]uint64, error) { + var keyids []uint64 + + kbuf := bytes.NewBuffer(pgpPacket) + packets := packet.NewReader(kbuf) +ParsePackets: + for { + p, err := packets.Next() + if err == io.EOF { + break ParsePackets + } + if err != nil { + return []uint64{}, errors.Wrapf(err, "packets.Next() failed") + } + switch p := p.(type) { + case *packet.EncryptedKey: + keyids = append(keyids, p.KeyId) + case *packet.SymmetricallyEncrypted: + break ParsePackets + } + } + return keyids, nil +} + +// GetRecipients converts the wrappedKeys to an array of recipients +func (kw *gpgKeyWrapper) GetRecipients(b64pgpPackets string) ([]string, error) { + keyIds, err := kw.GetKeyIdsFromPacket(b64pgpPackets) + if err != nil { + return nil, err + } + var array []string + for _, keyid := range keyIds { + array = append(array, "0x"+strconv.FormatUint(keyid, 16)) + } + return array, nil +} + +func (kw *gpgKeyWrapper) NoPossibleKeys(dcparameters map[string][][]byte) bool { + return len(kw.GetPrivateKeys(dcparameters)) == 0 +} + +func (kw *gpgKeyWrapper) GetPrivateKeys(dcparameters map[string][][]byte) [][]byte { + return dcparameters["gpg-privatekeys"] +} + +func (kw *gpgKeyWrapper) getKeyParameters(dcparameters map[string][][]byte) ([][]byte, [][]byte, error) { + + privKeys := kw.GetPrivateKeys(dcparameters) + if len(privKeys) == 0 { + return nil, nil, errors.New("GPG: Missing private key parameter") + } + + return privKeys, dcparameters["gpg-privatekeys-passwords"], nil +} + +// createEntityList creates the opengpg EntityList by reading the KeyRing +// first and then filtering out recipients' keys +func (kw *gpgKeyWrapper) createEntityList(ec *config.EncryptConfig) (openpgp.EntityList, error) { + pgpPubringFile := ec.Parameters["gpg-pubkeyringfile"] + if len(pgpPubringFile) == 0 { + return nil, nil + } + r := bytes.NewReader(pgpPubringFile[0]) + + entityList, err := openpgp.ReadKeyRing(r) + if err != nil { + return nil, err + } + + gpgRecipients := ec.Parameters["gpg-recipients"] + if len(gpgRecipients) == 0 { + return nil, nil + } + + rSet := make(map[string]int) + for _, r := range gpgRecipients { + rSet[string(r)] = 0 + } + + var filteredList openpgp.EntityList + for _, entity := range entityList { + for k := range entity.Identities { + addr, err := mail.ParseAddress(k) + if err != nil { + return nil, err + } + for _, r := range gpgRecipients { + recp := string(r) + if strings.Compare(addr.Name, recp) == 0 || strings.Compare(addr.Address, recp) == 0 { + filteredList = append(filteredList, entity) + rSet[recp] = rSet[recp] + 1 + } + } + } + } + + // make sure we found keys for all the Recipients... + var buffer bytes.Buffer + notFound := false + buffer.WriteString("PGP: No key found for the following recipients: ") + + for k, v := range rSet { + if v == 0 { + if notFound { + buffer.WriteString(", ") + } + buffer.WriteString(k) + notFound = true + } + } + + if notFound { + return nil, errors.New(buffer.String()) + } + + return filteredList, nil +} diff --git a/vendor/github.com/containers/ocicrypt/keywrap/pkcs11/keywrapper_pkcs11.go b/vendor/github.com/containers/ocicrypt/keywrap/pkcs11/keywrapper_pkcs11.go new file mode 100644 index 00000000000..803b90865bc --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/keywrap/pkcs11/keywrapper_pkcs11.go @@ -0,0 +1,147 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package pkcs11 + +import ( + "github.com/containers/ocicrypt/config" + "github.com/containers/ocicrypt/crypto/pkcs11" + "github.com/containers/ocicrypt/keywrap" + "github.com/containers/ocicrypt/utils" + + "github.com/pkg/errors" +) + +type pkcs11KeyWrapper struct { +} + +func (kw *pkcs11KeyWrapper) GetAnnotationID() string { + return "org.opencontainers.image.enc.keys.pkcs11" +} + +// NewKeyWrapper returns a new key wrapping interface using pkcs11 +func NewKeyWrapper() keywrap.KeyWrapper { + return &pkcs11KeyWrapper{} +} + +// WrapKeys wraps the session key for recpients and encrypts the optsData, which +// describe the symmetric key used for encrypting the layer +func (kw *pkcs11KeyWrapper) WrapKeys(ec *config.EncryptConfig, optsData []byte) ([]byte, error) { + pkcs11Recipients, err := addPubKeys(&ec.DecryptConfig, append(ec.Parameters["pkcs11-pubkeys"], ec.Parameters["pkcs11-yamls"]...)) + if err != nil { + return nil, err + } + // no recipients is not an error... + if len(pkcs11Recipients) == 0 { + return nil, nil + } + + jsonString, err := pkcs11.EncryptMultiple(pkcs11Recipients, optsData) + if err != nil { + return nil, errors.Wrapf(err, "PKCS11 EncryptMulitple failed") + } + return jsonString, nil +} + +func (kw *pkcs11KeyWrapper) UnwrapKey(dc *config.DecryptConfig, jsonString []byte) ([]byte, error) { + var pkcs11PrivKeys []*pkcs11.Pkcs11KeyFileObject + + privKeys := kw.GetPrivateKeys(dc.Parameters) + if len(privKeys) == 0 { + return nil, errors.New("No private keys found for PKCS11 decryption") + } + + p11conf, err := p11confFromParameters(dc.Parameters) + if err != nil { + return nil, err + } + + for _, privKey := range privKeys { + key, err := utils.ParsePrivateKey(privKey, nil, "PKCS11") + if err != nil { + return nil, err + } + switch pkcs11PrivKey := key.(type) { + case *pkcs11.Pkcs11KeyFileObject: + if p11conf != nil { + pkcs11PrivKey.Uri.SetModuleDirectories(p11conf.ModuleDirectories) + pkcs11PrivKey.Uri.SetAllowedModulePaths(p11conf.AllowedModulePaths) + } + pkcs11PrivKeys = append(pkcs11PrivKeys, pkcs11PrivKey) + default: + continue + } + } + + plaintext, err := pkcs11.Decrypt(pkcs11PrivKeys, jsonString) + if err == nil { + return plaintext, nil + } + + return nil, errors.Wrapf(err, "PKCS11: No suitable private key found for decryption") +} + +func (kw *pkcs11KeyWrapper) NoPossibleKeys(dcparameters map[string][][]byte) bool { + return len(kw.GetPrivateKeys(dcparameters)) == 0 +} + +func (kw *pkcs11KeyWrapper) GetPrivateKeys(dcparameters map[string][][]byte) [][]byte { + return dcparameters["pkcs11-yamls"] +} + +func (kw *pkcs11KeyWrapper) GetKeyIdsFromPacket(_ string) ([]uint64, error) { + return nil, nil +} + +func (kw *pkcs11KeyWrapper) GetRecipients(_ string) ([]string, error) { + return []string{"[pkcs11]"}, nil +} + +func addPubKeys(dc *config.DecryptConfig, pubKeys [][]byte) ([]interface{}, error) { + var pkcs11Keys []interface{} + + if len(pubKeys) == 0 { + return pkcs11Keys, nil + } + + p11conf, err := p11confFromParameters(dc.Parameters) + if err != nil { + return nil, err + } + + for _, pubKey := range pubKeys { + key, err := utils.ParsePublicKey(pubKey, "PKCS11") + if err != nil { + return nil, err + } + switch pkcs11PubKey := key.(type) { + case *pkcs11.Pkcs11KeyFileObject: + if p11conf != nil { + pkcs11PubKey.Uri.SetModuleDirectories(p11conf.ModuleDirectories) + pkcs11PubKey.Uri.SetAllowedModulePaths(p11conf.AllowedModulePaths) + } + } + pkcs11Keys = append(pkcs11Keys, key) + } + return pkcs11Keys, nil +} + +func p11confFromParameters(dcparameters map[string][][]byte) (*pkcs11.Pkcs11Config, error){ + if _, ok := dcparameters["pkcs11-config"]; ok { + return pkcs11.ParsePkcs11ConfigFile(dcparameters["pkcs11-config"][0]) + } + return nil, nil +} diff --git a/vendor/github.com/containers/ocicrypt/keywrap/pkcs7/keywrapper_pkcs7.go b/vendor/github.com/containers/ocicrypt/keywrap/pkcs7/keywrapper_pkcs7.go new file mode 100644 index 00000000000..1feae462bd7 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/keywrap/pkcs7/keywrapper_pkcs7.go @@ -0,0 +1,136 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package pkcs7 + +import ( + "crypto" + "crypto/x509" + + "github.com/containers/ocicrypt/config" + "github.com/containers/ocicrypt/keywrap" + "github.com/containers/ocicrypt/utils" + "github.com/pkg/errors" + "go.mozilla.org/pkcs7" +) + +type pkcs7KeyWrapper struct { +} + +// NewKeyWrapper returns a new key wrapping interface using jwe +func NewKeyWrapper() keywrap.KeyWrapper { + return &pkcs7KeyWrapper{} +} + +func (kw *pkcs7KeyWrapper) GetAnnotationID() string { + return "org.opencontainers.image.enc.keys.pkcs7" +} + +// WrapKeys wraps the session key for recpients and encrypts the optsData, which +// describe the symmetric key used for encrypting the layer +func (kw *pkcs7KeyWrapper) WrapKeys(ec *config.EncryptConfig, optsData []byte) ([]byte, error) { + x509Certs, err := collectX509s(ec.Parameters["x509s"]) + if err != nil { + return nil, err + } + // no recipients is not an error... + if len(x509Certs) == 0 { + return nil, nil + } + + pkcs7.ContentEncryptionAlgorithm = pkcs7.EncryptionAlgorithmAES128GCM + return pkcs7.Encrypt(optsData, x509Certs) +} + +func collectX509s(x509s [][]byte) ([]*x509.Certificate, error) { + if len(x509s) == 0 { + return nil, nil + } + var x509Certs []*x509.Certificate + for _, x509 := range x509s { + x509Cert, err := utils.ParseCertificate(x509, "PKCS7") + if err != nil { + return nil, err + } + x509Certs = append(x509Certs, x509Cert) + } + return x509Certs, nil +} + +func (kw *pkcs7KeyWrapper) NoPossibleKeys(dcparameters map[string][][]byte) bool { + return len(kw.GetPrivateKeys(dcparameters)) == 0 +} + +func (kw *pkcs7KeyWrapper) GetPrivateKeys(dcparameters map[string][][]byte) [][]byte { + return dcparameters["privkeys"] +} + +func (kw *pkcs7KeyWrapper) getPrivateKeysPasswords(dcparameters map[string][][]byte) [][]byte { + return dcparameters["privkeys-passwords"] +} + +// UnwrapKey unwraps the symmetric key with which the layer is encrypted +// This symmetric key is encrypted in the PKCS7 payload. +func (kw *pkcs7KeyWrapper) UnwrapKey(dc *config.DecryptConfig, pkcs7Packet []byte) ([]byte, error) { + privKeys := kw.GetPrivateKeys(dc.Parameters) + if len(privKeys) == 0 { + return nil, errors.New("no private keys found for PKCS7 decryption") + } + privKeysPasswords := kw.getPrivateKeysPasswords(dc.Parameters) + if len(privKeysPasswords) != len(privKeys) { + return nil, errors.New("private key password array length must be same as that of private keys") + } + + x509Certs, err := collectX509s(dc.Parameters["x509s"]) + if err != nil { + return nil, err + } + if len(x509Certs) == 0 { + return nil, errors.New("no x509 certificates found needed for PKCS7 decryption") + } + + p7, err := pkcs7.Parse(pkcs7Packet) + if err != nil { + return nil, errors.Wrapf(err, "could not parse PKCS7 packet") + } + + for idx, privKey := range privKeys { + key, err := utils.ParsePrivateKey(privKey, privKeysPasswords[idx], "PKCS7") + if err != nil { + return nil, err + } + for _, x509Cert := range x509Certs { + optsData, err := p7.Decrypt(x509Cert, crypto.PrivateKey(key)) + if err != nil { + continue + } + return optsData, nil + } + } + return nil, errors.New("PKCS7: No suitable private key found for decryption") +} + +// GetKeyIdsFromWrappedKeys converts the base64 encoded Packet to uint64 keyIds; +// We cannot do this with pkcs7 +func (kw *pkcs7KeyWrapper) GetKeyIdsFromPacket(b64pkcs7Packets string) ([]uint64, error) { + return nil, nil +} + +// GetRecipients converts the wrappedKeys to an array of recipients +// We cannot do this with pkcs7 +func (kw *pkcs7KeyWrapper) GetRecipients(b64pkcs7Packets string) ([]string, error) { + return []string{"[pkcs7]"}, nil +} diff --git a/vendor/github.com/containers/ocicrypt/reader.go b/vendor/github.com/containers/ocicrypt/reader.go new file mode 100644 index 00000000000..a93eec8e91c --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/reader.go @@ -0,0 +1,40 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package ocicrypt + +import ( + "io" +) + +type readerAtReader struct { + r io.ReaderAt + off int64 +} + +// ReaderFromReaderAt takes an io.ReaderAt and returns an io.Reader +func ReaderFromReaderAt(r io.ReaderAt) io.Reader { + return &readerAtReader{ + r: r, + off: 0, + } +} + +func (rar *readerAtReader) Read(p []byte) (n int, err error) { + n, err = rar.r.ReadAt(p, rar.off) + rar.off += int64(n) + return n, err +} diff --git a/vendor/github.com/containers/ocicrypt/spec/spec.go b/vendor/github.com/containers/ocicrypt/spec/spec.go new file mode 100644 index 00000000000..330069d491d --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/spec/spec.go @@ -0,0 +1,12 @@ +package spec + +const ( + // MediaTypeLayerEnc is MIME type used for encrypted layers. + MediaTypeLayerEnc = "application/vnd.oci.image.layer.v1.tar+encrypted" + // MediaTypeLayerGzipEnc is MIME type used for encrypted compressed layers. + MediaTypeLayerGzipEnc = "application/vnd.oci.image.layer.v1.tar+gzip+encrypted" + // MediaTypeLayerNonDistributableEnc is MIME type used for non distributable encrypted layers. + MediaTypeLayerNonDistributableEnc = "application/vnd.oci.image.layer.nondistributable.v1.tar+encrypted" + // MediaTypeLayerGzipEnc is MIME type used for non distributable encrypted compressed layers. + MediaTypeLayerNonDistributableGzipEnc = "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip+encrypted" +) diff --git a/vendor/github.com/containers/ocicrypt/utils/delayedreader.go b/vendor/github.com/containers/ocicrypt/utils/delayedreader.go new file mode 100644 index 00000000000..3b939bdea89 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/utils/delayedreader.go @@ -0,0 +1,109 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package utils + +import ( + "io" +) + +func min(a, b int) int { + if a < b { + return a + } + return b +} + +// DelayedReader wraps a io.Reader and allows a client to use the Reader +// interface. The DelayedReader holds back some buffer to the client +// so that it can report any error that occurred on the Reader it wraps +// early to the client while it may still have held some data back. +type DelayedReader struct { + reader io.Reader // Reader to Read() bytes from and delay them + err error // error that occurred on the reader + buffer []byte // delay buffer + bufbytes int // number of bytes in the delay buffer to give to Read(); on '0' we return 'EOF' to caller + bufoff int // offset in the delay buffer to give to Read() +} + +// NewDelayedReader wraps a io.Reader and allocates a delay buffer of bufsize bytes +func NewDelayedReader(reader io.Reader, bufsize uint) io.Reader { + return &DelayedReader{ + reader: reader, + buffer: make([]byte, bufsize), + } +} + +// Read implements the io.Reader interface +func (dr *DelayedReader) Read(p []byte) (int, error) { + if dr.err != nil && dr.err != io.EOF { + return 0, dr.err + } + + // if we are completely drained, return io.EOF + if dr.err == io.EOF && dr.bufbytes == 0 { + return 0, io.EOF + } + + // only at the beginning we fill our delay buffer in an extra step + if dr.bufbytes < len(dr.buffer) && dr.err == nil { + dr.bufbytes, dr.err = FillBuffer(dr.reader, dr.buffer) + if dr.err != nil && dr.err != io.EOF { + return 0, dr.err + } + } + // dr.err != nil means we have EOF and can drain the delay buffer + // otherwise we need to still read from the reader + + var tmpbuf []byte + tmpbufbytes := 0 + if dr.err == nil { + tmpbuf = make([]byte, len(p)) + tmpbufbytes, dr.err = FillBuffer(dr.reader, tmpbuf) + if dr.err != nil && dr.err != io.EOF { + return 0, dr.err + } + } + + // copy out of the delay buffer into 'p' + tocopy1 := min(len(p), dr.bufbytes) + c1 := copy(p[:tocopy1], dr.buffer[dr.bufoff:]) + dr.bufoff += c1 + dr.bufbytes -= c1 + + c2 := 0 + // can p still hold more data? + if c1 < len(p) { + // copy out of the tmpbuf into 'p' + c2 = copy(p[tocopy1:], tmpbuf[:tmpbufbytes]) + } + + // if tmpbuf holds data we need to hold onto, copy them + // into the delay buffer + if tmpbufbytes-c2 > 0 { + // left-shift the delay buffer and append the tmpbuf's remaining data + dr.buffer = dr.buffer[dr.bufoff : dr.bufoff+dr.bufbytes] + dr.buffer = append(dr.buffer, tmpbuf[c2:tmpbufbytes]...) + dr.bufoff = 0 + dr.bufbytes = len(dr.buffer) + } + + var err error + if dr.bufbytes == 0 { + err = io.EOF + } + return c1 + c2, err +} diff --git a/vendor/github.com/containers/ocicrypt/utils/ioutils.go b/vendor/github.com/containers/ocicrypt/utils/ioutils.go new file mode 100644 index 00000000000..078c34799f8 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/utils/ioutils.go @@ -0,0 +1,56 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package utils + +import ( + "bytes" + "io" + "os/exec" + "github.com/pkg/errors" +) + +// FillBuffer fills the given buffer with as many bytes from the reader as possible. It returns +// EOF if an EOF was encountered or any other error. +func FillBuffer(reader io.Reader, buffer []byte) (int, error) { + n, err := io.ReadFull(reader, buffer) + if err == io.ErrUnexpectedEOF { + return n, io.EOF + } + return n, err +} + +// first argument is the command, like cat or echo, +// the second is the list of args to pass to it +type CommandExecuter interface { + Exec(string, []string, []byte) ([]byte, error) +} + +type Runner struct{} + +// ExecuteCommand is used to execute a linux command line command and return the output of the command with an error if it exists. +func (r Runner) Exec(cmdName string, args []string, input []byte) ([]byte, error) { + var out bytes.Buffer + stdInputBuffer := bytes.NewBuffer(input) + cmd := exec.Command(cmdName, args...) + cmd.Stdin = stdInputBuffer + cmd.Stdout = &out + err := cmd.Run() + if err != nil { + return nil, errors.Wrapf(err, "Error while running command: %s", cmdName) + } + return out.Bytes(), nil +} diff --git a/vendor/github.com/containers/ocicrypt/utils/keyprovider/keyprovider.pb.go b/vendor/github.com/containers/ocicrypt/utils/keyprovider/keyprovider.pb.go new file mode 100644 index 00000000000..dc477d3cf8b --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/utils/keyprovider/keyprovider.pb.go @@ -0,0 +1,243 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: keyprovider.proto + +package keyprovider + +import ( + context "context" + fmt "fmt" + proto "github.com/golang/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type KeyProviderKeyWrapProtocolInput struct { + KeyProviderKeyWrapProtocolInput []byte `protobuf:"bytes,1,opt,name=KeyProviderKeyWrapProtocolInput,proto3" json:"KeyProviderKeyWrapProtocolInput,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *KeyProviderKeyWrapProtocolInput) Reset() { *m = KeyProviderKeyWrapProtocolInput{} } +func (m *KeyProviderKeyWrapProtocolInput) String() string { return proto.CompactTextString(m) } +func (*KeyProviderKeyWrapProtocolInput) ProtoMessage() {} +func (*KeyProviderKeyWrapProtocolInput) Descriptor() ([]byte, []int) { + return fileDescriptor_da74c8e785ad390c, []int{0} +} + +func (m *KeyProviderKeyWrapProtocolInput) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_KeyProviderKeyWrapProtocolInput.Unmarshal(m, b) +} +func (m *KeyProviderKeyWrapProtocolInput) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_KeyProviderKeyWrapProtocolInput.Marshal(b, m, deterministic) +} +func (m *KeyProviderKeyWrapProtocolInput) XXX_Merge(src proto.Message) { + xxx_messageInfo_KeyProviderKeyWrapProtocolInput.Merge(m, src) +} +func (m *KeyProviderKeyWrapProtocolInput) XXX_Size() int { + return xxx_messageInfo_KeyProviderKeyWrapProtocolInput.Size(m) +} +func (m *KeyProviderKeyWrapProtocolInput) XXX_DiscardUnknown() { + xxx_messageInfo_KeyProviderKeyWrapProtocolInput.DiscardUnknown(m) +} + +var xxx_messageInfo_KeyProviderKeyWrapProtocolInput proto.InternalMessageInfo + +func (m *KeyProviderKeyWrapProtocolInput) GetKeyProviderKeyWrapProtocolInput() []byte { + if m != nil { + return m.KeyProviderKeyWrapProtocolInput + } + return nil +} + +type KeyProviderKeyWrapProtocolOutput struct { + KeyProviderKeyWrapProtocolOutput []byte `protobuf:"bytes,1,opt,name=KeyProviderKeyWrapProtocolOutput,proto3" json:"KeyProviderKeyWrapProtocolOutput,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *KeyProviderKeyWrapProtocolOutput) Reset() { *m = KeyProviderKeyWrapProtocolOutput{} } +func (m *KeyProviderKeyWrapProtocolOutput) String() string { return proto.CompactTextString(m) } +func (*KeyProviderKeyWrapProtocolOutput) ProtoMessage() {} +func (*KeyProviderKeyWrapProtocolOutput) Descriptor() ([]byte, []int) { + return fileDescriptor_da74c8e785ad390c, []int{1} +} + +func (m *KeyProviderKeyWrapProtocolOutput) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_KeyProviderKeyWrapProtocolOutput.Unmarshal(m, b) +} +func (m *KeyProviderKeyWrapProtocolOutput) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_KeyProviderKeyWrapProtocolOutput.Marshal(b, m, deterministic) +} +func (m *KeyProviderKeyWrapProtocolOutput) XXX_Merge(src proto.Message) { + xxx_messageInfo_KeyProviderKeyWrapProtocolOutput.Merge(m, src) +} +func (m *KeyProviderKeyWrapProtocolOutput) XXX_Size() int { + return xxx_messageInfo_KeyProviderKeyWrapProtocolOutput.Size(m) +} +func (m *KeyProviderKeyWrapProtocolOutput) XXX_DiscardUnknown() { + xxx_messageInfo_KeyProviderKeyWrapProtocolOutput.DiscardUnknown(m) +} + +var xxx_messageInfo_KeyProviderKeyWrapProtocolOutput proto.InternalMessageInfo + +func (m *KeyProviderKeyWrapProtocolOutput) GetKeyProviderKeyWrapProtocolOutput() []byte { + if m != nil { + return m.KeyProviderKeyWrapProtocolOutput + } + return nil +} + +func init() { + proto.RegisterType((*KeyProviderKeyWrapProtocolInput)(nil), "keyprovider.keyProviderKeyWrapProtocolInput") + proto.RegisterType((*KeyProviderKeyWrapProtocolOutput)(nil), "keyprovider.keyProviderKeyWrapProtocolOutput") +} + +func init() { + proto.RegisterFile("keyprovider.proto", fileDescriptor_da74c8e785ad390c) +} + +var fileDescriptor_da74c8e785ad390c = []byte{ + // 169 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0x12, 0xcc, 0x4e, 0xad, 0x2c, + 0x28, 0xca, 0x2f, 0xcb, 0x4c, 0x49, 0x2d, 0xd2, 0x03, 0x32, 0x4a, 0xf2, 0x85, 0xb8, 0x91, 0x84, + 0x94, 0xb2, 0xb9, 0xe4, 0x81, 0xdc, 0x00, 0x28, 0xd7, 0x3b, 0xb5, 0x32, 0xbc, 0x28, 0xb1, 0x20, + 0x00, 0xa4, 0x2e, 0x39, 0x3f, 0xc7, 0x33, 0xaf, 0xa0, 0xb4, 0x44, 0xc8, 0x83, 0x4b, 0xde, 0x1b, + 0xbf, 0x12, 0x09, 0x46, 0x05, 0x46, 0x0d, 0x9e, 0x20, 0x42, 0xca, 0x94, 0xf2, 0xb8, 0x14, 0x70, + 0x5b, 0xe6, 0x5f, 0x5a, 0x02, 0xb2, 0xcd, 0x8b, 0x4b, 0xc1, 0x9b, 0x80, 0x1a, 0xa8, 0x75, 0x04, + 0xd5, 0x19, 0xbd, 0x62, 0xe4, 0x12, 0x42, 0x52, 0x14, 0x9c, 0x5a, 0x54, 0x96, 0x99, 0x9c, 0x2a, + 0x94, 0xc1, 0xc5, 0x0e, 0x52, 0x0c, 0x94, 0x11, 0xd2, 0xd1, 0x43, 0x0e, 0x1f, 0x02, 0x21, 0x21, + 0xa5, 0x4b, 0xa4, 0x6a, 0x88, 0xf5, 0x4a, 0x0c, 0x42, 0x59, 0x5c, 0x9c, 0xa1, 0x79, 0xf4, 0xb1, + 0xcb, 0x89, 0x37, 0x0a, 0x39, 0x62, 0x93, 0xd8, 0xc0, 0x91, 0x6d, 0x0c, 0x08, 0x00, 0x00, 0xff, + 0xff, 0x9a, 0x10, 0xcb, 0xf9, 0x01, 0x02, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// KeyProviderServiceClient is the client API for KeyProviderService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type KeyProviderServiceClient interface { + WrapKey(ctx context.Context, in *KeyProviderKeyWrapProtocolInput, opts ...grpc.CallOption) (*KeyProviderKeyWrapProtocolOutput, error) + UnWrapKey(ctx context.Context, in *KeyProviderKeyWrapProtocolInput, opts ...grpc.CallOption) (*KeyProviderKeyWrapProtocolOutput, error) +} + +type keyProviderServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewKeyProviderServiceClient(cc grpc.ClientConnInterface) KeyProviderServiceClient { + return &keyProviderServiceClient{cc} +} + +func (c *keyProviderServiceClient) WrapKey(ctx context.Context, in *KeyProviderKeyWrapProtocolInput, opts ...grpc.CallOption) (*KeyProviderKeyWrapProtocolOutput, error) { + out := new(KeyProviderKeyWrapProtocolOutput) + err := c.cc.Invoke(ctx, "/keyprovider.KeyProviderService/WrapKey", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyProviderServiceClient) UnWrapKey(ctx context.Context, in *KeyProviderKeyWrapProtocolInput, opts ...grpc.CallOption) (*KeyProviderKeyWrapProtocolOutput, error) { + out := new(KeyProviderKeyWrapProtocolOutput) + err := c.cc.Invoke(ctx, "/keyprovider.KeyProviderService/UnWrapKey", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// KeyProviderServiceServer is the server API for KeyProviderService service. +type KeyProviderServiceServer interface { + WrapKey(context.Context, *KeyProviderKeyWrapProtocolInput) (*KeyProviderKeyWrapProtocolOutput, error) + UnWrapKey(context.Context, *KeyProviderKeyWrapProtocolInput) (*KeyProviderKeyWrapProtocolOutput, error) +} + +// UnimplementedKeyProviderServiceServer can be embedded to have forward compatible implementations. +type UnimplementedKeyProviderServiceServer struct { +} + +func (*UnimplementedKeyProviderServiceServer) WrapKey(ctx context.Context, req *KeyProviderKeyWrapProtocolInput) (*KeyProviderKeyWrapProtocolOutput, error) { + return nil, status.Errorf(codes.Unimplemented, "method WrapKey not implemented") +} +func (*UnimplementedKeyProviderServiceServer) UnWrapKey(ctx context.Context, req *KeyProviderKeyWrapProtocolInput) (*KeyProviderKeyWrapProtocolOutput, error) { + return nil, status.Errorf(codes.Unimplemented, "method UnWrapKey not implemented") +} + +func RegisterKeyProviderServiceServer(s *grpc.Server, srv KeyProviderServiceServer) { + s.RegisterService(&_KeyProviderService_serviceDesc, srv) +} + +func _KeyProviderService_WrapKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(KeyProviderKeyWrapProtocolInput) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyProviderServiceServer).WrapKey(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/keyprovider.KeyProviderService/WrapKey", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyProviderServiceServer).WrapKey(ctx, req.(*KeyProviderKeyWrapProtocolInput)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyProviderService_UnWrapKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(KeyProviderKeyWrapProtocolInput) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyProviderServiceServer).UnWrapKey(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/keyprovider.KeyProviderService/UnWrapKey", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyProviderServiceServer).UnWrapKey(ctx, req.(*KeyProviderKeyWrapProtocolInput)) + } + return interceptor(ctx, in, info, handler) +} + +var _KeyProviderService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "keyprovider.KeyProviderService", + HandlerType: (*KeyProviderServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "WrapKey", + Handler: _KeyProviderService_WrapKey_Handler, + }, + { + MethodName: "UnWrapKey", + Handler: _KeyProviderService_UnWrapKey_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "keyprovider.proto", +} diff --git a/vendor/github.com/containers/ocicrypt/utils/keyprovider/keyprovider.proto b/vendor/github.com/containers/ocicrypt/utils/keyprovider/keyprovider.proto new file mode 100644 index 00000000000..a71f0a59227 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/utils/keyprovider/keyprovider.proto @@ -0,0 +1,17 @@ +syntax = "proto3"; + +package keyprovider; +option go_package = "keyprovider"; + +message keyProviderKeyWrapProtocolInput { + bytes KeyProviderKeyWrapProtocolInput = 1; +} + +message keyProviderKeyWrapProtocolOutput { + bytes KeyProviderKeyWrapProtocolOutput = 1; +} + +service KeyProviderService { + rpc WrapKey(keyProviderKeyWrapProtocolInput) returns (keyProviderKeyWrapProtocolOutput) {}; + rpc UnWrapKey(keyProviderKeyWrapProtocolInput) returns (keyProviderKeyWrapProtocolOutput) {}; +} \ No newline at end of file diff --git a/vendor/github.com/containers/ocicrypt/utils/testing.go b/vendor/github.com/containers/ocicrypt/utils/testing.go new file mode 100644 index 00000000000..38633b19b8b --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/utils/testing.go @@ -0,0 +1,166 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package utils + +import ( + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "math/big" + "time" + + "github.com/pkg/errors" +) + +// CreateRSAKey creates an RSA key +func CreateRSAKey(bits int) (*rsa.PrivateKey, error) { + key, err := rsa.GenerateKey(rand.Reader, bits) + if err != nil { + return nil, errors.Wrap(err, "rsa.GenerateKey failed") + } + return key, nil +} + +// CreateRSATestKey creates an RSA key of the given size and returns +// the public and private key in PEM or DER format +func CreateRSATestKey(bits int, password []byte, pemencode bool) ([]byte, []byte, error) { + key, err := CreateRSAKey(bits) + if err != nil { + return nil, nil, err + } + + pubData, err := x509.MarshalPKIXPublicKey(&key.PublicKey) + if err != nil { + return nil, nil, errors.Wrap(err, "x509.MarshalPKIXPublicKey failed") + } + privData := x509.MarshalPKCS1PrivateKey(key) + + // no more encoding needed for DER + if !pemencode { + return pubData, privData, nil + } + + publicKey := pem.EncodeToMemory(&pem.Block{ + Type: "PUBLIC KEY", + Bytes: pubData, + }) + + var block *pem.Block + + typ := "RSA PRIVATE KEY" + if len(password) > 0 { + block, err = x509.EncryptPEMBlock(rand.Reader, typ, privData, password, x509.PEMCipherAES256) //nolint:staticcheck // ignore SA1019, which is kept for backward compatibility + if err != nil { + return nil, nil, errors.Wrap(err, "x509.EncryptPEMBlock failed") + } + } else { + block = &pem.Block{ + Type: typ, + Bytes: privData, + } + } + + privateKey := pem.EncodeToMemory(block) + + return publicKey, privateKey, nil +} + +// CreateECDSATestKey creates and elliptic curve key for the given curve and returns +// the public and private key in DER format +func CreateECDSATestKey(curve elliptic.Curve) ([]byte, []byte, error) { + key, err := ecdsa.GenerateKey(curve, rand.Reader) + if err != nil { + return nil, nil, errors.Wrapf(err, "ecdsa.GenerateKey failed") + } + + pubData, err := x509.MarshalPKIXPublicKey(&key.PublicKey) + if err != nil { + return nil, nil, errors.Wrapf(err, "x509.MarshalPKIXPublicKey failed") + } + + privData, err := x509.MarshalECPrivateKey(key) + if err != nil { + return nil, nil, errors.Wrapf(err, "x509.MarshalECPrivateKey failed") + } + + return pubData, privData, nil +} + +// CreateTestCA creates a root CA for testing +func CreateTestCA() (*rsa.PrivateKey, *x509.Certificate, error) { + key, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + return nil, nil, errors.Wrap(err, "rsa.GenerateKey failed") + } + + ca := &x509.Certificate{ + SerialNumber: big.NewInt(1), + Subject: pkix.Name{ + CommonName: "test-ca", + }, + NotBefore: time.Now(), + NotAfter: time.Now().AddDate(1, 0, 0), + IsCA: true, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + BasicConstraintsValid: true, + } + caCert, err := certifyKey(&key.PublicKey, ca, key, ca) + + return key, caCert, err +} + +// CertifyKey certifies a public key using the given CA's private key and cert; +// The certificate template for the public key is optional +func CertifyKey(pubbytes []byte, template *x509.Certificate, caKey *rsa.PrivateKey, caCert *x509.Certificate) (*x509.Certificate, error) { + pubKey, err := ParsePublicKey(pubbytes, "CertifyKey") + if err != nil { + return nil, err + } + return certifyKey(pubKey, template, caKey, caCert) +} + +func certifyKey(pub interface{}, template *x509.Certificate, caKey *rsa.PrivateKey, caCert *x509.Certificate) (*x509.Certificate, error) { + if template == nil { + template = &x509.Certificate{ + SerialNumber: big.NewInt(1), + Subject: pkix.Name{ + CommonName: "testkey", + }, + NotBefore: time.Now(), + NotAfter: time.Now().Add(time.Hour), + IsCA: false, + KeyUsage: x509.KeyUsageDigitalSignature, + BasicConstraintsValid: true, + } + } + + certDER, err := x509.CreateCertificate(rand.Reader, template, caCert, pub, caKey) + if err != nil { + return nil, errors.Wrap(err, "x509.CreateCertificate failed") + } + + cert, err := x509.ParseCertificate(certDER) + if err != nil { + return nil, errors.Wrap(err, "x509.ParseCertificate failed") + } + + return cert, nil +} diff --git a/vendor/github.com/containers/ocicrypt/utils/utils.go b/vendor/github.com/containers/ocicrypt/utils/utils.go new file mode 100644 index 00000000000..07fe6d36708 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/utils/utils.go @@ -0,0 +1,250 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package utils + +import ( + "bytes" + "crypto/x509" + "encoding/base64" + "encoding/pem" + "fmt" + "strings" + + "github.com/containers/ocicrypt/crypto/pkcs11" + + "github.com/pkg/errors" + "golang.org/x/crypto/openpgp" + json "gopkg.in/square/go-jose.v2" +) + +// parseJWKPrivateKey parses the input byte array as a JWK and makes sure it's a private key +func parseJWKPrivateKey(privKey []byte, prefix string) (interface{}, error) { + jwk := json.JSONWebKey{} + err := jwk.UnmarshalJSON(privKey) + if err != nil { + return nil, errors.Wrapf(err, "%s: Could not parse input as JWK", prefix) + } + if jwk.IsPublic() { + return nil, fmt.Errorf("%s: JWK is not a private key", prefix) + } + return &jwk, nil +} + +// parseJWKPublicKey parses the input byte array as a JWK +func parseJWKPublicKey(privKey []byte, prefix string) (interface{}, error) { + jwk := json.JSONWebKey{} + err := jwk.UnmarshalJSON(privKey) + if err != nil { + return nil, errors.Wrapf(err, "%s: Could not parse input as JWK", prefix) + } + if !jwk.IsPublic() { + return nil, fmt.Errorf("%s: JWK is not a public key", prefix) + } + return &jwk, nil +} + +// parsePkcs11PrivateKeyYaml parses the input byte array as pkcs11 key file yaml format) +func parsePkcs11PrivateKeyYaml(yaml []byte, prefix string) (*pkcs11.Pkcs11KeyFileObject, error) { + // if the URI does not have enough attributes, we will throw an error when decrypting + return pkcs11.ParsePkcs11KeyFile(yaml) +} + +// parsePkcs11URIPublicKey parses the input byte array as a pkcs11 key file yaml +func parsePkcs11PublicKeyYaml(yaml []byte, prefix string) (*pkcs11.Pkcs11KeyFileObject, error) { + // if the URI does not have enough attributes, we will throw an error when decrypting + return pkcs11.ParsePkcs11KeyFile(yaml) +} + +// IsPasswordError checks whether an error is related to a missing or wrong +// password +func IsPasswordError(err error) bool { + if err == nil { + return false + } + msg := strings.ToLower(err.Error()) + + return strings.Contains(msg, "password") && + (strings.Contains(msg, "missing") || strings.Contains(msg, "wrong")) +} + +// ParsePrivateKey tries to parse a private key in DER format first and +// PEM format after, returning an error if the parsing failed +func ParsePrivateKey(privKey, privKeyPassword []byte, prefix string) (interface{}, error) { + key, err := x509.ParsePKCS8PrivateKey(privKey) + if err != nil { + key, err = x509.ParsePKCS1PrivateKey(privKey) + if err != nil { + key, err = x509.ParseECPrivateKey(privKey) + } + } + if err != nil { + block, _ := pem.Decode(privKey) + if block != nil { + var der []byte + if x509.IsEncryptedPEMBlock(block) { //nolint:staticcheck // ignore SA1019, which is kept for backward compatibility + if privKeyPassword == nil { + return nil, errors.Errorf("%s: Missing password for encrypted private key", prefix) + } + der, err = x509.DecryptPEMBlock(block, privKeyPassword) //nolint:staticcheck // ignore SA1019, which is kept for backward compatibility + if err != nil { + return nil, errors.Errorf("%s: Wrong password: could not decrypt private key", prefix) + } + } else { + der = block.Bytes + } + + key, err = x509.ParsePKCS8PrivateKey(der) + if err != nil { + key, err = x509.ParsePKCS1PrivateKey(der) + if err != nil { + return nil, errors.Wrapf(err, "%s: Could not parse private key", prefix) + } + } + } else { + key, err = parseJWKPrivateKey(privKey, prefix) + if err != nil { + key, err = parsePkcs11PrivateKeyYaml(privKey, prefix) + } + } + } + return key, err +} + +// IsPrivateKey returns true in case the given byte array represents a private key +// It returns an error if for example the password is wrong +func IsPrivateKey(data []byte, password []byte) (bool, error) { + _, err := ParsePrivateKey(data, password, "") + return err == nil, err +} + +// IsPkcs11PrivateKey returns true in case the given byte array represents a pkcs11 private key +func IsPkcs11PrivateKey(data []byte) bool { + return pkcs11.IsPkcs11PrivateKey(data) +} + +// ParsePublicKey tries to parse a public key in DER format first and +// PEM format after, returning an error if the parsing failed +func ParsePublicKey(pubKey []byte, prefix string) (interface{}, error) { + key, err := x509.ParsePKIXPublicKey(pubKey) + if err != nil { + block, _ := pem.Decode(pubKey) + if block != nil { + key, err = x509.ParsePKIXPublicKey(block.Bytes) + if err != nil { + return nil, errors.Wrapf(err, "%s: Could not parse public key", prefix) + } + } else { + key, err = parseJWKPublicKey(pubKey, prefix) + if err != nil { + key, err = parsePkcs11PublicKeyYaml(pubKey, prefix) + } + } + } + return key, err +} + +// IsPublicKey returns true in case the given byte array represents a public key +func IsPublicKey(data []byte) bool { + _, err := ParsePublicKey(data, "") + return err == nil +} + +// IsPkcs11PublicKey returns true in case the given byte array represents a pkcs11 public key +func IsPkcs11PublicKey(data []byte) bool { + return pkcs11.IsPkcs11PublicKey(data) +} + +// ParseCertificate tries to parse a public key in DER format first and +// PEM format after, returning an error if the parsing failed +func ParseCertificate(certBytes []byte, prefix string) (*x509.Certificate, error) { + x509Cert, err := x509.ParseCertificate(certBytes) + if err != nil { + block, _ := pem.Decode(certBytes) + if block == nil { + return nil, fmt.Errorf("%s: Could not PEM decode x509 certificate", prefix) + } + x509Cert, err = x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, errors.Wrapf(err, "%s: Could not parse x509 certificate", prefix) + } + } + return x509Cert, err +} + +// IsCertificate returns true in case the given byte array represents an x.509 certificate +func IsCertificate(data []byte) bool { + _, err := ParseCertificate(data, "") + return err == nil +} + +// IsGPGPrivateKeyRing returns true in case the given byte array represents a GPG private key ring file +func IsGPGPrivateKeyRing(data []byte) bool { + r := bytes.NewBuffer(data) + _, err := openpgp.ReadKeyRing(r) + return err == nil +} + +// SortDecryptionKeys parses a list of comma separated base64 entries and sorts the data into +// a map. Each entry in the list may be either a GPG private key ring, private key, or x.509 +// certificate +func SortDecryptionKeys(b64ItemList string) (map[string][][]byte, error) { + dcparameters := make(map[string][][]byte) + + for _, b64Item := range strings.Split(b64ItemList, ",") { + var password []byte + b64Data := strings.Split(b64Item, ":") + keyData, err := base64.StdEncoding.DecodeString(b64Data[0]) + if err != nil { + return nil, errors.New("Could not base64 decode a passed decryption key") + } + if len(b64Data) == 2 { + password, err = base64.StdEncoding.DecodeString(b64Data[1]) + if err != nil { + return nil, errors.New("Could not base64 decode a passed decryption key password") + } + } + var key string + isPrivKey, err := IsPrivateKey(keyData, password) + if IsPasswordError(err) { + return nil, err + } + if isPrivKey { + key = "privkeys" + if _, ok := dcparameters["privkeys-passwords"]; !ok { + dcparameters["privkeys-passwords"] = [][]byte{password} + } else { + dcparameters["privkeys-passwords"] = append(dcparameters["privkeys-passwords"], password) + } + } else if IsCertificate(keyData) { + key = "x509s" + } else if IsGPGPrivateKeyRing(keyData) { + key = "gpg-privatekeys" + } + if key != "" { + values := dcparameters[key] + if values == nil { + dcparameters[key] = [][]byte{keyData} + } else { + dcparameters[key] = append(dcparameters[key], keyData) + } + } else { + return nil, errors.New("Unknown decryption key type") + } + } + + return dcparameters, nil +} diff --git a/vendor/github.com/containers/storage/.cirrus.yml b/vendor/github.com/containers/storage/.cirrus.yml new file mode 100644 index 00000000000..726acc3aef3 --- /dev/null +++ b/vendor/github.com/containers/storage/.cirrus.yml @@ -0,0 +1,177 @@ +--- + +# Main collection of env. vars to set for all tasks and scripts. +env: + #### + #### Global variables used for all tasks + #### + # Overrides default location (/tmp/cirrus) for repo clone + CIRRUS_WORKING_DIR: "/var/tmp/go/src/github.com/containers/storage" + # Shell used to execute all script commands + CIRRUS_SHELL: "/bin/bash" + # Automation script path relative to $CIRRUS_WORKING_DIR) + SCRIPT_BASE: "./contrib/cirrus" + # No need to go crazy, but grab enough to cover most PRs + CIRRUS_CLONE_DEPTH: 50 + + #### + #### Cache-image names to test with (double-quotes around names are critical) + ### + FEDORA_NAME: "fedora-35" + PRIOR_FEDORA_NAME: "fedora-34" + UBUNTU_NAME: "ubuntu-2104" + + # GCE project where images live + IMAGE_PROJECT: "libpod-218412" + # VM Image built in containers/automation_images + _BUILT_IMAGE_SUFFIX: "c6431352024203264" + FEDORA_CACHE_IMAGE_NAME: "fedora-${_BUILT_IMAGE_SUFFIX}" + PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${_BUILT_IMAGE_SUFFIX}" + UBUNTU_CACHE_IMAGE_NAME: "ubuntu-${_BUILT_IMAGE_SUFFIX}" + + #### + #### Command variables to help avoid duplication + #### + # Command to prefix every output line with a timestamp + # (can't do inline awk script, Cirrus-CI or YAML mangles quoting) + _TIMESTAMP: 'awk --file ${CIRRUS_WORKING_DIR}/${SCRIPT_BASE}/timestamp.awk' + _DFCMD: 'df -lhTx tmpfs' + _RAUDITCMD: 'cat /var/log/audit/audit.log' + _UAUDITCMD: 'cat /var/log/kern.log' + _JOURNALCMD: 'journalctl -b' + +gcp_credentials: ENCRYPTED[c87717f04fb15499d19a3b3fa0ad2cdedecc047e82967785d101e9bc418e93219f755e662feac8390088a2df1a4d8464] + +# Default timeout for each task +timeout_in: 120m + +# Default VM to use unless set or modified by task +gce_instance: + image_project: "${IMAGE_PROJECT}" + zone: "us-central1-b" # Required by Cirrus for the time being + cpu: 2 + memory: "4Gb" + disk: 200 + image_name: "${FEDORA_CACHE_IMAGE_NAME}" + + +fedora_testing_task: &fedora_testing + alias: fedora_testing + name: &std_test_name "${OS_NAME} ${TEST_DRIVER}" + depends_on: + - lint + + gce_instance: # Only need to specify differences from defaults (above) + image_name: "${VM_IMAGE}" + + env: + OS_NAME: "${FEDORA_NAME}" + VM_IMAGE: "${FEDORA_CACHE_IMAGE_NAME}" + + # Not all $TEST_DRIVER combinations valid for all $VM_IMAGE types. + matrix: &test_matrix + - env: + TEST_DRIVER: "vfs" + - env: + TEST_DRIVER: "overlay" + - env: + TEST_DRIVER: "fuse-overlay" + - env: + TEST_DRIVER: "fuse-overlay-whiteout" + + # Separate scripts for separate outputs, makes debugging easier. + setup_script: '${CIRRUS_WORKING_DIR}/${SCRIPT_BASE}/setup.sh |& ${_TIMESTAMP}' + build_and_test_script: '${CIRRUS_WORKING_DIR}/${SCRIPT_BASE}/build_and_test.sh |& ${_TIMESTAMP}' + + always: + df_script: '${_DFCMD} || true' + rh_audit_log_script: '${_RAUDITCMD} || true' + ubuntu_audit_log_script: '${_UAUDITCMD} || true' + journal_log_script: '${_JOURNALCMD} || true' + + +prior_fedora_testing_task: + <<: *fedora_testing + alias: prior_fedora_testing + name: *std_test_name + env: + OS_NAME: "${PRIOR_FEDORA_NAME}" + VM_IMAGE: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}" + + +ubuntu_testing_task: &ubuntu_testing + <<: *fedora_testing + alias: ubuntu_testing + name: *std_test_name + env: + OS_NAME: "${UBUNTU_NAME}" + VM_IMAGE: "${UBUNTU_CACHE_IMAGE_NAME}" + matrix: + - env: + TEST_DRIVER: "vfs" + - env: + TEST_DRIVER: "overlay" + + +lint_task: + env: + CIRRUS_WORKING_DIR: "/go/src/github.com/containers/storage" + container: + image: golang:1.15 + modules_cache: + fingerprint_script: cat go.sum + folder: $GOPATH/pkg/mod + build_script: | + echo "deb http://deb.debian.org/debian stretch-backports main" > /etc/apt/sources.list.d/backports.list + apt-get update + apt-get install -y libbtrfs-dev libdevmapper-dev + test_script: make lint + + +# Update metadata on VM images referenced by this repository state +meta_task: + + container: + image: "quay.io/libpod/imgts:${_BUILT_IMAGE_SUFFIX}" + cpu: 1 + memory: 1 + + env: + # Space-separated list of images used by this repository state + IMGNAMES: |- + ${FEDORA_CACHE_IMAGE_NAME} + ${PRIOR_FEDORA_CACHE_IMAGE_NAME} + ${UBUNTU_CACHE_IMAGE_NAME} + BUILDID: "${CIRRUS_BUILD_ID}" + REPOREF: "${CIRRUS_CHANGE_IN_REPO}" + GCPJSON: ENCRYPTED[244a93fe8b386b48b96f748342bf741350e43805eee81dd04b45093bdf737e540b993fc735df41f131835fa0f9b65826] + GCPNAME: ENCRYPTED[91cf7aa421858b26b67835978d224b4a5c46afcf52a0f1ec1b69a99b248715dc8e92a1b56fde18e092acf256fa80ae9c] + GCPPROJECT: ENCRYPTED[79b0f7eb5958e25bc7095d5d368fa8d94447a43ffacb9c693de438186e2f767b7efe9563d6954297ae4730220e10aa9c] + CIRRUS_CLONE_DEPTH: 1 # source not used + + script: '/usr/local/bin/entrypoint.sh |& ${_TIMESTAMP}' + + +vendor_task: + container: + image: golang:1.15 + modules_cache: + fingerprint_script: cat go.sum + folder: $GOPATH/pkg/mod + build_script: make vendor + test_script: hack/tree_status.sh + + +# Represent overall pass/fail status from required dependent tasks +success_task: + depends_on: + - lint + - fedora_testing + - prior_fedora_testing + - ubuntu_testing + - meta + - vendor + container: + image: golang:1.15 + clone_script: 'mkdir -p "$CIRRUS_WORKING_DIR"' # Source code not needed + script: /bin/true diff --git a/vendor/github.com/containers/storage/.dockerignore b/vendor/github.com/containers/storage/.dockerignore new file mode 100644 index 00000000000..9bd2c021930 --- /dev/null +++ b/vendor/github.com/containers/storage/.dockerignore @@ -0,0 +1,3 @@ +bundles +.gopath +vendor/pkg diff --git a/vendor/github.com/containers/storage/.gitignore b/vendor/github.com/containers/storage/.gitignore new file mode 100644 index 00000000000..99b40fbde50 --- /dev/null +++ b/vendor/github.com/containers/storage/.gitignore @@ -0,0 +1,32 @@ +# containers/storage project generated files to ignore +# if you want to ignore files created by your editor/tools, +# please consider a global .gitignore https://help.github.com/articles/ignoring-files +*.1 +*.5 +*.exe +*~ +*.orig +*.test +.*.swp +.DS_Store +.idea* +# a .bashrc may be added to customize the build environment +.bashrc +.gopath/ +docs/AWS_S3_BUCKET +docs/GITCOMMIT +docs/GIT_BRANCH +docs/VERSION +docs/_build +docs/_static +docs/_templates +docs/changed-files +# generated by man/md2man-all.sh +man/man1 +man/man5 +man/man8 +tests/tools/build +vendor/pkg/ +.vagrant +/containers-storage +/containers-storage.* diff --git a/vendor/github.com/containers/storage/.golangci.yml b/vendor/github.com/containers/storage/.golangci.yml new file mode 100644 index 00000000000..cd4638a39c2 --- /dev/null +++ b/vendor/github.com/containers/storage/.golangci.yml @@ -0,0 +1,37 @@ +--- +run: + concurrency: 6 + deadline: 5m +linters: + enable-all: true + disable: + - dogsled + - dupl + - errcheck + - funlen + - gochecknoglobals + - gochecknoinits + - gocognit + - gocritic + - gocyclo + - godox + - gomnd + - gosec + - gosimple + - govet + - ineffassign + - lll + - maligned + - misspell + - nakedret + - prealloc + - scopelint + - staticcheck + - structcheck + - stylecheck + - unconvert + - unparam + - unused + - varcheck + - whitespace + - wsl diff --git a/vendor/github.com/containers/storage/.mailmap b/vendor/github.com/containers/storage/.mailmap new file mode 100644 index 00000000000..0527b6d84d6 --- /dev/null +++ b/vendor/github.com/containers/storage/.mailmap @@ -0,0 +1,254 @@ +# Generate AUTHORS: hack/generate-authors.sh + +# Tip for finding duplicates (besides scanning the output of AUTHORS for name +# duplicates that aren't also email duplicates): scan the output of: +# git log --format='%aE - %aN' | sort -uf +# +# For explanation on this file format: man git-shortlog + +Patrick Stapleton +Shishir Mahajan +Erwin van der Koogh +Ahmed Kamal +Tejesh Mehta +Cristian Staretu +Cristian Staretu +Cristian Staretu +Marcus Linke +Aleksandrs Fadins +Christopher Latham +Hu Keping +Wayne Chang +Chen Chao +Daehyeok Mun + + + + + + +Guillaume J. Charmes + + + + + +Thatcher Peskens +Thatcher Peskens +Thatcher Peskens dhrp +Jérôme Petazzoni jpetazzo +Jérôme Petazzoni +Joffrey F +Joffrey F +Joffrey F +Tim Terhorst +Andy Smith + + + + + + + + + +Walter Stanish + +Roberto Hashioka +Konstantin Pelykh +David Sissitka +Nolan Darilek + +Benoit Chesneau +Jordan Arentsen +Daniel Garcia +Miguel Angel Fernández +Bhiraj Butala +Faiz Khan +Victor Lyuboslavsky +Jean-Baptiste Barth +Matthew Mueller + +Shih-Yuan Lee +Daniel Mizyrycki root +Jean-Baptiste Dalido + + + + + + + + + + + + + + +Sven Dowideit +Sven Dowideit +Sven Dowideit +Sven Dowideit <¨SvenDowideit@home.org.au¨> +Sven Dowideit +Sven Dowideit +Sven Dowideit + +Alexander Morozov +Alexander Morozov + +O.S. Tezer + +Roberto G. Hashioka + + + + + +Sridhar Ratnakumar +Sridhar Ratnakumar +Liang-Chi Hsieh +Aleksa Sarai +Aleksa Sarai +Aleksa Sarai +Will Weaver +Timothy Hobbs +Nathan LeClaire +Nathan LeClaire + + + + +Matthew Heon + + + + +Francisco Carriedo + + + + +Brian Goff + + + +Hollie Teal + + + +Jessica Frazelle +Jessica Frazelle +Jessica Frazelle +Jessica Frazelle +Jessica Frazelle + + + + +Thomas LEVEIL Thomas LÉVEIL + + +Antonio Murdaca +Antonio Murdaca +Antonio Murdaca +Antonio Murdaca +Antonio Murdaca +Darren Shepherd +Deshi Xiao +Deshi Xiao +Doug Davis +Jacob Atzen +Jeff Nickoloff +John Howard (VM) +John Howard (VM) +John Howard (VM) +John Howard (VM) +Madhu Venugopal +Mary Anthony +Mary Anthony moxiegirl +Mary Anthony +mattyw +resouer +AJ Bowen soulshake +AJ Bowen soulshake +Tibor Vass +Tibor Vass +Vincent Bernat +Yestin Sun +bin liu +John Howard (VM) jhowardmsft +Ankush Agarwal +Tangi COLIN tangicolin +Allen Sun +Adrien Gallouët + +Anuj Bahuguna +Anusha Ragunathan +Avi Miller +Brent Salisbury +Chander G +Chun Chen +Ying Li +Daehyeok Mun + +Daniel, Dao Quang Minh +Daniel Nephin +Dave Tucker +Doug Tangren +Frederick F. Kautz IV +Ben Golub +Harold Cooper +hsinko <21551195@zju.edu.cn> +Josh Hawn +Justin Cormack + + +Kamil Domański +Lei Jitang + +Linus Heckemann + +Lynda O'Leary + +Marianna Tessel +Michael Huettermann +Moysés Borges + +Nigel Poulton +Qiang Huang + +Boaz Shuster +Shuwei Hao + +Soshi Katsuta + +Stefan Berger + +Stephen Day + +Toli Kuznets +Tristan Carel + +Vincent Demeester + +Vishnu Kannan +xlgao-zju xlgao +yuchangchun y00277921 + + + + +Hao Shu Wei + + + + + + + +Shengbo Song mYmNeo +Shengbo Song + +Sylvain Bellemare + diff --git a/vendor/github.com/containers/storage/AUTHORS b/vendor/github.com/containers/storage/AUTHORS new file mode 100644 index 00000000000..129dd39692a --- /dev/null +++ b/vendor/github.com/containers/storage/AUTHORS @@ -0,0 +1,1523 @@ +# This file lists all individuals having contributed content to the repository. +# For how it is generated, see `hack/generate-authors.sh`. + +Aanand Prasad +Aaron Davidson +Aaron Feng +Aaron Huslage +Aaron Lehmann +Aaron Welch +Abel Muiño +Abhijeet Kasurde +Abhinav Ajgaonkar +Abhishek Chanda +Abin Shahab +Adam Miller +Adam Singer +Aditi Rajagopal +Aditya +Adria Casas +Adrian Mouat +Adrian Oprea +Adrien Folie +Adrien Gallouët +Ahmed Kamal +Ahmet Alp Balkan +Aidan Feldman +Aidan Hobson Sayers +AJ Bowen +Ajey Charantimath +ajneu +Akihiro Suda +Al Tobey +alambike +Alan Scherger +Alan Thompson +Albert Callarisa +Albert Zhang +Aleksa Sarai +Aleksandrs Fadins +Alena Prokharchyk +Alessandro Boch +Alessio Biancalana +Alex Chan +Alex Crawford +Alex Ellis +Alex Gaynor +Alex Samorukov +Alex Warhawk +Alexander Artemenko +Alexander Boyd +Alexander Larsson +Alexander Morozov +Alexander Shopov +Alexandre Beslic +Alexandre González +Alexandru Sfirlogea +Alexey Guskov +Alexey Kotlyarov +Alexey Shamrin +Alexis THOMAS +Ali Dehghani +Allen Madsen +Allen Sun +almoehi +Alvin Richards +amangoel +Amen Belayneh +Amit Bakshi +Amit Krishnan +Amy Lindburg +Anand Patil +AnandkumarPatel +Anatoly Borodin +Anchal Agrawal +Anders Janmyr +Andre Dublin <81dublin@gmail.com> +Andre Granovsky +Andrea Luzzardi +Andrea Turli +Andreas Köhler +Andreas Savvides +Andreas Tiefenthaler +Andrew C. Bodine +Andrew Clay Shafer +Andrew Duckworth +Andrew France +Andrew Gerrand +Andrew Guenther +Andrew Kuklewicz +Andrew Macgregor +Andrew Macpherson +Andrew Martin +Andrew Munsell +Andrew Weiss +Andrew Williams +Andrews Medina +Andrey Petrov +Andrey Stolbovsky +André Martins +andy +Andy Chambers +andy diller +Andy Goldstein +Andy Kipp +Andy Rothfusz +Andy Smith +Andy Wilson +Anes Hasicic +Anil Belur +Ankush Agarwal +Anonmily +Anthon van der Neut +Anthony Baire +Anthony Bishopric +Anthony Dahanne +Anton Löfgren +Anton Nikitin +Anton Polonskiy +Anton Tiurin +Antonio Murdaca +Antony Messerli +Anuj Bahuguna +Anusha Ragunathan +apocas +ArikaChen +Arnaud Porterie +Arthur Barr +Arthur Gautier +Artur Meyster +Arun Gupta +Asbjørn Enge +averagehuman +Avi Das +Avi Miller +ayoshitake +Azat Khuyiyakhmetov +Bardia Keyoumarsi +Barnaby Gray +Barry Allard +Bartłomiej Piotrowski +Bastiaan Bakker +bdevloed +Ben Firshman +Ben Golub +Ben Hall +Ben Sargent +Ben Severson +Ben Toews +Ben Wiklund +Benjamin Atkin +Benoit Chesneau +Bernerd Schaefer +Bert Goethals +Bharath Thiruveedula +Bhiraj Butala +Bill W +bin liu +Blake Geno +Boaz Shuster +bobby abbott +boucher +Bouke Haarsma +Boyd Hemphill +boynux +Bradley Cicenas +Bradley Wright +Brandon Liu +Brandon Philips +Brandon Rhodes +Brendan Dixon +Brent Salisbury +Brett Higgins +Brett Kochendorfer +Brian (bex) Exelbierd +Brian Bland +Brian DeHamer +Brian Dorsey +Brian Flad +Brian Goff +Brian McCallister +Brian Olsen +Brian Shumate +Brian Torres-Gil +Brian Trump +Brice Jaglin +Briehan Lombaard +Bruno Bigras +Bruno Binet +Bruno Gazzera +Bruno Renié +Bryan Bess +Bryan Boreham +Bryan Matsuo +Bryan Murphy +buddhamagnet +Burke Libbey +Byung Kang +Caleb Spare +Calen Pennington +Cameron Boehmer +Cameron Spear +Campbell Allen +Candid Dauth +Carl Henrik Lunde +Carl X. Su +Carlos Alexandro Becker +Carlos Sanchez +Carol Fager-Higgins +Cary +Casey Bisson +Cedric Davies +Cezar Sa Espinola +Chad Swenson +Chance Zibolski +Chander G +Charles Chan +Charles Hooper +Charles Law +Charles Lindsay +Charles Merriam +Charles Sarrazin +Charlie Lewis +Chase Bolt +ChaYoung You +Chen Chao +Chen Hanxiao +cheney90 +Chewey +Chia-liang Kao +chli +Cholerae Hu +Chris Alfonso +Chris Armstrong +Chris Dituri +Chris Fordham +Chris Khoo +Chris McKinnel +Chris Seto +Chris Snow +Chris St. Pierre +Chris Stivers +Chris Swan +Chris Wahl +Chris Weyl +chrismckinnel +Christian Berendt +Christian Böhme +Christian Persson +Christian Rotzoll +Christian Simon +Christian Stefanescu +ChristoperBiscardi +Christophe Mehay +Christophe Troestler +Christopher Currie +Christopher Jones +Christopher Latham +Christopher Rigor +Christy Perez +Chun Chen +Ciro S. Costa +Clayton Coleman +Clinton Kitson +Coenraad Loubser +Colin Dunklau +Colin Rice +Colin Walters +Collin Guarino +Colm Hally +companycy +Cory Forsyth +cressie176 +Cristian Staretu +cristiano balducci +Cruceru Calin-Cristian +Cyril F +Daan van Berkel +Daehyeok Mun +Dafydd Crosby +dalanlan +Damien Nadé +Damien Nozay +Damjan Georgievski +Dan Anolik +Dan Buch +Dan Cotora +Dan Griffin +Dan Hirsch +Dan Keder +Dan Levy +Dan McPherson +Dan Stine +Dan Walsh +Dan Williams +Daniel Antlinger +Daniel Exner +Daniel Farrell +Daniel Garcia +Daniel Gasienica +Daniel Hiltgen +Daniel Menet +Daniel Mizyrycki +Daniel Nephin +Daniel Norberg +Daniel Nordberg +Daniel Robinson +Daniel S +Daniel Von Fange +Daniel YC Lin +Daniel Zhang +Daniel, Dao Quang Minh +Danny Berger +Danny Yates +Darren Coxall +Darren Shepherd +Darren Stahl +Dave Barboza +Dave Henderson +Dave MacDonald +Dave Tucker +David Anderson +David Calavera +David Corking +David Cramer +David Currie +David Davis +David Gageot +David Gebler +David Lawrence +David Mackey +David Mat +David Mcanulty +David Pelaez +David R. Jenni +David Röthlisberger +David Sheets +David Sissitka +David Xia +David Young +Davide Ceretti +Dawn Chen +dcylabs +decadent +deed02392 +Deng Guangxing +Deni Bertovic +Denis Gladkikh +Denis Ollier +Dennis Docter +Derek +Derek +Derek Ch +Derek McGowan +Deric Crago +Deshi Xiao +devmeyster +Devvyn Murphy +Dharmit Shah +Dieter Reuter +Dima Stopel +Dimitri John Ledkov +Dimitry Andric +Dinesh Subhraveti +Diogo Monica +DiuDiugirl +Djibril Koné +dkumor +Dmitri Logvinenko +Dmitry Demeshchuk +Dmitry Gusev +Dmitry V. Krivenok +Dmitry Vorobev +Dolph Mathews +Dominik Finkbeiner +Dominik Honnef +Don Kirkby +Don Kjer +Don Spaulding +Donald Huang +Dong Chen +Donovan Jones +Doug Davis +Doug MacEachern +Doug Tangren +Dr Nic Williams +dragon788 +Dražen Lučanin +Dustin Sallings +Ed Costello +Edmund Wagner +Eiichi Tsukata +Eike Herzbach +Eivind Uggedal +Elan Ruusamäe +Elias Probst +Elijah Zupancic +eluck +Elvir Kuric +Emil Hernvall +Emily Maier +Emily Rose +Emir Ozer +Enguerran +Eohyung Lee +Eric Hanchrow +Eric Lee +Eric Myhre +Eric Paris +Eric Rafaloff +Eric Rosenberg +Eric Sage +Eric Windisch +Eric Yang +Eric-Olivier Lamey +Erik Bray +Erik Dubbelboer +Erik Hollensbe +Erik Inge Bolsø +Erik Kristensen +Erik Weathers +Erno Hopearuoho +Erwin van der Koogh +Euan +Eugene Yakubovich +eugenkrizo +evalle +Evan Allrich +Evan Carmi +Evan Hazlett +Evan Krall +Evan Phoenix +Evan Wies +Evgeny Vereshchagin +Ewa Czechowska +Eystein Måløy Stenberg +ezbercih +Fabiano Rosas +Fabio Falci +Fabio Rehm +Fabrizio Regini +Fabrizio Soppelsa +Faiz Khan +falmp +Fangyuan Gao <21551127@zju.edu.cn> +Fareed Dudhia +Fathi Boudra +Federico Gimenez +Felix Geisendörfer +Felix Hupfeld +Felix Rabe +Felix Schindler +Ferenc Szabo +Fernando +Fero Volar +Filipe Brandenburger +Filipe Oliveira +fl0yd +Flavio Castelli +FLGMwt +Florian +Florian Klein +Florian Maier +Florian Weingarten +Florin Asavoaie +Francesc Campoy +Francisco Carriedo +Francisco Souza +Frank Groeneveld +Frank Herrmann +Frank Macreery +Frank Rosquin +Fred Lifton +Frederick F. Kautz IV +Frederik Loeffert +Frederik Nordahl Jul Sabroe +Freek Kalter +fy2462 +Félix Baylac-Jacqué +Félix Cantournet +Gabe Rosenhouse +Gabor Nagy +Gabriel Monroy +GabrielNicolasAvellaneda +Galen Sampson +Gareth Rushgrove +Garrett Barboza +Gaurav +gautam, prasanna +GennadySpb +Geoffrey Bachelet +George MacRorie +George Xie +Georgi Hristozov +Gereon Frey +German DZ +Gert van Valkenhoef +Gianluca Borello +Gildas Cuisinier +gissehel +Giuseppe Mazzotta +Gleb Fotengauer-Malinovskiy +Gleb M Borisov +Glyn Normington +GoBella +Goffert van Gool +Gosuke Miyashita +Gou Rao +Govinda Fichtner +Grant Reaber +Graydon Hoare +Greg Fausak +Greg Thornton +grossws +grunny +gs11 +Guilhem Lettron +Guilherme Salgado +Guillaume Dufour +Guillaume J. Charmes +guoxiuyan +Gurjeet Singh +Guruprasad +gwx296173 +Günter Zöchbauer +Hans Kristian Flaatten +Hans Rødtang +Hao Shu Wei +Hao Zhang <21521210@zju.edu.cn> +Harald Albers +Harley Laue +Harold Cooper +Harry Zhang +He Simei +heartlock <21521209@zju.edu.cn> +Hector Castro +Henning Sprang +Hobofan +Hollie Teal +Hong Xu +hsinko <21551195@zju.edu.cn> +Hu Keping +Hu Tao +Huanzhong Zhang +Huayi Zhang +Hugo Duncan +Hugo Marisco <0x6875676f@gmail.com> +Hunter Blanks +huqun +Huu Nguyen +hyeongkyu.lee +hyp3rdino +Hyzhou <1187766782@qq.com> +Ian Babrou +Ian Bishop +Ian Bull +Ian Calvert +Ian Lee +Ian Main +Ian Truslove +Iavael +Icaro Seara +Igor Dolzhikov +Ilkka Laukkanen +Ilya Dmitrichenko +Ilya Gusev +ILYA Khlopotov +imre Fitos +inglesp +Ingo Gottwald +Isaac Dupree +Isabel Jimenez +Isao Jonas +Ivan Babrou +Ivan Fraixedes +Ivan Grcic +J Bruni +J. Nunn +Jack Danger Canty +Jacob Atzen +Jacob Edelman +Jake Champlin +Jake Moshenko +jakedt +James Allen +James Carey +James Carr +James DeFelice +James Harrison Fisher +James Kyburz +James Kyle +James Lal +James Mills +James Nugent +James Turnbull +Jamie Hannaford +Jamshid Afshar +Jan Keromnes +Jan Koprowski +Jan Pazdziora +Jan Toebes +Jan-Gerd Tenberge +Jan-Jaap Driessen +Jana Radhakrishnan +Januar Wayong +Jared Biel +Jared Hocutt +Jaroslaw Zabiello +jaseg +Jasmine Hegman +Jason Divock +Jason Giedymin +Jason Green +Jason Hall +Jason Heiss +Jason Livesay +Jason McVetta +Jason Plum +Jason Shepherd +Jason Smith +Jason Sommer +Jason Stangroome +jaxgeller +Jay +Jay +Jay Kamat +Jean-Baptiste Barth +Jean-Baptiste Dalido +Jean-Paul Calderone +Jean-Tiare Le Bigot +Jeff Anderson +Jeff Johnston +Jeff Lindsay +Jeff Mickey +Jeff Minard +Jeff Nickoloff +Jeff Welch +Jeffrey Bolle +Jeffrey Morgan +Jeffrey van Gogh +Jenny Gebske +Jeremy Grosser +Jeremy Price +Jeremy Qian +Jeremy Unruh +Jeroen Jacobs +Jesse Dearing +Jesse Dubay +Jessica Frazelle +Jezeniel Zapanta +jgeiger +Jhon Honce +Jian Zhang +jianbosun +Jilles Oldenbeuving +Jim Alateras +Jim Perrin +Jimmy Cuadra +Jimmy Puckett +jimmyxian +Jinsoo Park +Jiri Popelka +Jiří Župka +jjy +jmzwcn +Joe Beda +Joe Doliner +Joe Ferguson +Joe Gordon +Joe Shaw +Joe Van Dyk +Joel Friedly +Joel Handwell +Joel Hansson +Joel Wurtz +Joey Geiger +Joey Gibson +Joffrey F +Johan Euphrosine +Johan Rydberg +Johannes 'fish' Ziemke +John Costa +John Feminella +John Gardiner Myers +John Gossman +John Howard (VM) +John OBrien III +John Starks +John Tims +John Warwick +John Willis +Jon Wedaman +Jonas Pfenniger +Jonathan A. Sternberg +Jonathan Boulle +Jonathan Camp +Jonathan Dowland +Jonathan Lebon +Jonathan McCrohan +Jonathan Mueller +Jonathan Pares +Jonathan Rudenberg +Joost Cassee +Jordan +Jordan Arentsen +Jordan Sissel +Jordan Williams +Jose Diaz-Gonzalez +Joseph Anthony Pasquale Holsten +Joseph Hager +Joseph Kern +Josh +Josh Hawn +Josh Poimboeuf +Josiah Kiehl +José Tomás Albornoz +JP +jrabbit +Julian Taylor +Julien Barbier +Julien Bisconti +Julien Bordellier +Julien Dubois +Julien Pervillé +Julio Montes +Jun-Ru Chang +Jussi Nummelin +Justas Brazauskas +Justin Cormack +Justin Force +Justin Plock +Justin Simonelis +Justin Terry +Jyrki Puttonen +Jérôme Petazzoni +Jörg Thalheim +Kai Blin +Kai Qiang Wu(Kennan) +Kamil Domański +kamjar gerami +Kanstantsin Shautsou +Karan Lyons +Kareem Khazem +kargakis +Karl Grzeszczak +Karol Duleba +Katie McLaughlin +Kato Kazuyoshi +Katrina Owen +Kawsar Saiyeed +kayrus +Ke Xu +Keli Hu +Ken Cochrane +Ken ICHIKAWA +Kenfe-Mickael Laventure +Kenjiro Nakayama +Kent Johnson +Kevin "qwazerty" Houdebert +Kevin Clark +Kevin J. Lynagh +Kevin Menard +Kevin P. Kucharczyk +Kevin Shi +Kevin Wallace +Kevin Yap +kevinmeredith +Keyvan Fatehi +kies +Kim BKC Carlbacker +Kim Eik +Kimbro Staken +Kir Kolyshkin +Kiran Gangadharan +Kirill SIbirev +knappe +Kohei Tsuruta +Koichi Shiraishi +Konrad Kleine +Konstantin Pelykh +Krasimir Georgiev +Kristian Haugene +Kristina Zabunova +krrg +Kun Zhang +Kunal Kushwaha +Kyle Conroy +kyu +Lachlan Coote +Lai Jiangshan +Lajos Papp +Lakshan Perera +Lalatendu Mohanty +lalyos +Lance Chen +Lance Kinley +Lars Butler +Lars Kellogg-Stedman +Lars R. Damerow +Laszlo Meszaros +Laurent Erignoux +Laurie Voss +Leandro Siqueira +Lee, Meng-Han +leeplay +Lei Jitang +Len Weincier +Lennie +Leszek Kowalski +Levi Blackstone +Levi Gross +Lewis Marshall +Lewis Peckover +Liana Lo +Liang Mingqiang +Liang-Chi Hsieh +liaoqingwei +limsy +Lin Lu +LingFaKe +Linus Heckemann +Liran Tal +Liron Levin +Liu Bo +Liu Hua +LIZAO LI +Lloyd Dewolf +Lokesh Mandvekar +longliqiang88 <394564827@qq.com> +Lorenz Leutgeb +Lorenzo Fontana +Louis Opter +Luca Marturana +Luca Orlandi +Luca-Bogdan Grigorescu +Lucas Chan +Luis Martínez de Bartolomé Izquierdo +Lukas Waslowski +lukaspustina +Lukasz Zajaczkowski +lukemarsden +Lynda O'Leary +Lénaïc Huard +Ma Shimiao +Mabin +Madhav Puri +Madhu Venugopal +Mageee <21521230.zju.edu.cn> +Mahesh Tiyyagura +malnick +Malte Janduda +manchoz +Manfred Touron +Manfred Zabarauskas +mansinahar +Manuel Meurer +Manuel Woelker +mapk0y +Marc Abramowitz +Marc Kuo +Marc Tamsky +Marcelo Salazar +Marco Hennings +Marcus Farkas +Marcus Linke +Marcus Ramberg +Marek Goldmann +Marian Marinov +Marianna Tessel +Mario Loriedo +Marius Gundersen +Marius Sturm +Marius Voila +Mark Allen +Mark McGranaghan +Mark McKinstry +Mark West +Marko Mikulicic +Marko Tibold +Markus Fix +Martijn Dwars +Martijn van Oosterhout +Martin Honermeyer +Martin Kelly +Martin Mosegaard Amdisen +Martin Redmond +Mary Anthony +Masahito Zembutsu +Mason Malone +Mateusz Sulima +Mathias Monnerville +Mathieu Le Marec - Pasquet +Matt Apperson +Matt Bachmann +Matt Bentley +Matt Haggard +Matt McCormick +Matt Moore +Matt Robenolt +Matthew Heon +Matthew Mayer +Matthew Mueller +Matthew Riley +Matthias Klumpp +Matthias Kühnle +Matthias Rampke +Matthieu Hauglustaine +mattymo +mattyw +Mauricio Garavaglia +mauriyouth +Max Shytikov +Maxim Ivanov +Maxim Kulkin +Maxim Treskin +Maxime Petazzoni +Meaglith Ma +meejah +Megan Kostick +Mehul Kar +Mengdi Gao +Mert Yazıcıoğlu +Micah Zoltu +Michael A. Smith +Michael Bridgen +Michael Brown +Michael Chiang +Michael Crosby +Michael Currie +Michael Friis +Michael Gorsuch +Michael Grauer +Michael Holzheu +Michael Hudson-Doyle +Michael Huettermann +Michael Käufl +Michael Neale +Michael Prokop +Michael Scharf +Michael Stapelberg +Michael Steinert +Michael Thies +Michael West +Michal Fojtik +Michal Gebauer +Michal Jemala +Michal Minar +Michaël Pailloncy +Michał Czeraszkiewicz +Michiel@unhosted +Miguel Angel Fernández +Miguel Morales +Mihai Borobocea +Mihuleacc Sergiu +Mike Brown +Mike Chelen +Mike Danese +Mike Dillon +Mike Dougherty +Mike Gaffney +Mike Goelzer +Mike Leone +Mike MacCana +Mike Naberezny +Mike Snitzer +mikelinjie <294893458@qq.com> +Mikhail Sobolev +Miloslav Trmač +mingqing +Mingzhen Feng +Mitch Capper +mlarcher +Mohammad Banikazemi +Mohammed Aaqib Ansari +Mohit Soni +Morgan Bauer +Morgante Pell +Morgy93 +Morten Siebuhr +Morton Fox +Moysés Borges +mqliang +Mrunal Patel +msabansal +mschurenko +muge +Mustafa Akın +Muthukumar R +Máximo Cuadros +Médi-Rémi Hashim +Nahum Shalman +Nakul Pathak +Nalin Dahyabhai +Nan Monnand Deng +Naoki Orii +Natalie Parker +Natanael Copa +Nate Brennand +Nate Eagleson +Nate Jones +Nathan Hsieh +Nathan Kleyn +Nathan LeClaire +Nathan McCauley +Nathan Williams +Neal McBurnett +Nelson Chen +Nghia Tran +Niall O'Higgins +Nicholas E. Rabenau +Nick Irvine +Nick Parker +Nick Payne +Nick Stenning +Nick Stinemates +Nicolas Borboën +Nicolas De loof +Nicolas Dudebout +Nicolas Goy +Nicolas Kaiser +Nicolás Hock Isaza +Nigel Poulton +NikolaMandic +nikolas +Nirmal Mehta +Nishant Totla +NIWA Hideyuki +noducks +Nolan Darilek +nponeccop +Nuutti Kotivuori +nzwsch +O.S. Tezer +objectified +OddBloke +odk- +Oguz Bilgic +Oh Jinkyun +Ohad Schneider +Ole Reifschneider +Oliver Neal +Olivier Gambier +Olle Jonsson +Oriol Francès +Otto Kekäläinen +oyld +ozlerhakan +paetling +pandrew +panticz +Paolo G. Giarrusso +Pascal Borreli +Pascal Hartig +Patrick Devine +Patrick Hemmer +Patrick Stapleton +pattichen +Paul +paul +Paul Annesley +Paul Bellamy +Paul Bowsher +Paul Hammond +Paul Jimenez +Paul Lietar +Paul Liljenberg +Paul Morie +Paul Nasrat +Paul Weaver +Pavel Lobashov +Pavel Pospisil +Pavel Sutyrin +Pavel Tikhomirov +Pavlos Ratis +Peeyush Gupta +Peggy Li +Pei Su +Penghan Wang +perhapszzy@sina.com +Peter Bourgon +Peter Braden +Peter Choi +Peter Dave Hello +Peter Edge +Peter Ericson +Peter Esbensen +Peter Malmgren +Peter Salvatore +Peter Volpe +Peter Waller +Phil +Phil Estes +Phil Spitler +Philip Monroe +Philipp Wahala +Philipp Weissensteiner +Phillip Alexander +pidster +Piergiuliano Bossi +Pierre +Pierre Carrier +Pierre Wacrenier +Pierre-Alain RIVIERE +Piotr Bogdan +pixelistik +Porjo +Poul Kjeldager Sørensen +Pradeep Chhetri +Prasanna Gautam +Prayag Verma +Przemek Hejman +pysqz +qg <1373319223@qq.com> +qhuang +Qiang Huang +qq690388648 <690388648@qq.com> +Quentin Brossard +Quentin Perez +Quentin Tayssier +r0n22 +Rafal Jeczalik +Rafe Colton +Raghavendra K T +Raghuram Devarakonda +Rajat Pandit +Rajdeep Dua +Ralle +Ralph Bean +Ramkumar Ramachandra +Ramon van Alteren +Ray Tsang +ReadmeCritic +Recursive Madman +Regan McCooey +Remi Rampin +Renato Riccieri Santos Zannon +resouer +rgstephens +Rhys Hiltner +Rich Seymour +Richard +Richard Burnison +Richard Harvey +Richard Metzler +Richard Scothern +Richo Healey +Rick Bradley +Rick van de Loo +Rick Wieman +Rik Nijessen +Riku Voipio +Riley Guerin +Ritesh H Shukla +Riyaz Faizullabhoy +Rob Vesse +Robert Bachmann +Robert Bittle +Robert Obryk +Robert Stern +Robert Wallis +Roberto G. Hashioka +Robin Naundorf +Robin Schneider +Robin Speekenbrink +robpc +Rodolfo Carvalho +Rodrigo Vaz +Roel Van Nyen +Roger Peppe +Rohit Jnagal +Rohit Kadam +Roland Huß +Roland Kammerer +Roland Moriz +Roma Sokolov +Roman Strashkin +Ron Smits +root +root +root +root +Rory Hunter +Rory McCune +Ross Boucher +Rovanion Luckey +Rozhnov Alexandr +rsmoorthy +Rudolph Gottesheim +Rui Lopes +Ryan Anderson +Ryan Aslett +Ryan Belgrave +Ryan Detzel +Ryan Fowler +Ryan McLaughlin +Ryan O'Donnell +Ryan Seto +Ryan Thomas +Ryan Trauntvein +Ryan Wallner +RyanDeng +Rémy Greinhofer +s. rannou +s00318865 +Sabin Basyal +Sachin Joshi +Sagar Hani +Sainath Grandhi +Sally O'Malley +Sam Abed +Sam Alba +Sam Bailey +Sam J Sharpe +Sam Neirinck +Sam Reis +Sam Rijs +Sambuddha Basu +Sami Wagiaalla +Samuel Andaya +Samuel Dion-Girardeau +Samuel Karp +Samuel PHAN +Sankar சங்கர் +Sanket Saurav +Santhosh Manohar +sapphiredev +Satnam Singh +satoru +Satoshi Amemiya +scaleoutsean +Scott Bessler +Scott Collier +Scott Johnston +Scott Stamp +Scott Walls +sdreyesg +Sean Christopherson +Sean Cronin +Sean OMeara +Sean P. Kane +Sebastiaan van Steenis +Sebastiaan van Stijn +Senthil Kumar Selvaraj +Senthil Kumaran +SeongJae Park +Seongyeol Lim +Serge Hallyn +Sergey Alekseev +Sergey Evstifeev +Sevki Hasirci +Shane Canon +Shane da Silva +shaunol +Shawn Landden +Shawn Siefkas +Shekhar Gulati +Sheng Yang +Shengbo Song +Shih-Yuan Lee +Shijiang Wei +Shishir Mahajan +shuai-z +Shuwei Hao +Sian Lerk Lau +sidharthamani +Silas Sewell +Simei He +Simon Eskildsen +Simon Leinen +Simon Taranto +Sindhu S +Sjoerd Langkemper +Solganik Alexander +Solomon Hykes +Song Gao +Soshi Katsuta +Soulou +Spencer Brown +Spencer Smith +Sridatta Thatipamala +Sridhar Ratnakumar +Srini Brahmaroutu +srinsriv +Steeve Morin +Stefan Berger +Stefan J. Wernli +Stefan Praszalowicz +Stefan Scherer +Stefan Staudenmeyer +Stefan Weil +Stephen Crosby +Stephen Day +Stephen Rust +Steve Durrheimer +Steve Francia +Steve Koch +Steven Burgess +Steven Iveson +Steven Merrill +Steven Richards +Steven Taylor +Subhajit Ghosh +Sujith Haridasan +Suryakumar Sudar +Sven Dowideit +Swapnil Daingade +Sylvain Baubeau +Sylvain Bellemare +Sébastien +Sébastien Luttringer +Sébastien Stormacq +TAGOMORI Satoshi +tang0th +Tangi COLIN +Tatsuki Sugiura +Tatsushi Inagaki +Taylor Jones +tbonza +Ted M. Young +Tehmasp Chaudhri +Tejesh Mehta +terryding77 <550147740@qq.com> +tgic +Thatcher Peskens +theadactyl +Thell 'Bo' Fowler +Thermionix +Thijs Terlouw +Thomas Bikeev +Thomas Frössman +Thomas Gazagnaire +Thomas Grainger +Thomas Hansen +Thomas Leonard +Thomas LEVEIL +Thomas Orozco +Thomas Riccardi +Thomas Schroeter +Thomas Sjögren +Thomas Swift +Thomas Tanaka +Thomas Texier +Tianon Gravi +Tibor Vass +Tiffany Low +Tim Bosse +Tim Dettrick +Tim Düsterhus +Tim Hockin +Tim Ruffles +Tim Smith +Tim Terhorst +Tim Wang +Tim Waugh +Tim Wraight +Timothy Hobbs +tjwebb123 +tobe +Tobias Bieniek +Tobias Bradtke +Tobias Gesellchen +Tobias Klauser +Tobias Schmidt +Tobias Schwab +Todd Crane +Todd Lunter +Todd Whiteman +Toli Kuznets +Tom Barlow +Tom Denham +Tom Fotherby +Tom Howe +Tom Hulihan +Tom Maaswinkel +Tom X. Tobin +Tomas Tomecek +Tomasz Kopczynski +Tomasz Lipinski +Tomasz Nurkiewicz +Tommaso Visconti +Tomáš Hrčka +Tonis Tiigi +Tonny Xu +Tony Daws +Tony Miller +toogley +Torstein Husebø +tpng +tracylihui <793912329@qq.com> +Travis Cline +Travis Thieman +Trent Ogren +Trevor +Trevor Pounds +trishnaguha +Tristan Carel +Troy Denton +Tyler Brock +Tzu-Jung Lee +Tõnis Tiigi +Ulysse Carion +unknown +vagrant +Vaidas Jablonskis +Veres Lajos +vgeta +Victor Coisne +Victor Costan +Victor I. Wood +Victor Lyuboslavsky +Victor Marmol +Victor Palma +Victor Vieux +Victoria Bialas +Vijaya Kumar K +Viktor Stanchev +Viktor Vojnovski +VinayRaghavanKS +Vincent Batts +Vincent Bernat +Vincent Bernat +Vincent Demeester +Vincent Giersch +Vincent Mayers +Vincent Woo +Vinod Kulkarni +Vishal Doshi +Vishnu Kannan +Vitor Monteiro +Vivek Agarwal +Vivek Dasgupta +Vivek Goyal +Vladimir Bulyga +Vladimir Kirillov +Vladimir Rutsky +Vladimir Varankin +VladimirAus +Vojtech Vitek (V-Teq) +waitingkuo +Walter Leibbrandt +Walter Stanish +WANG Chao +Wang Xing +Ward Vandewege +WarheadsSE +Wayne Chang +Wei-Ting Kuo +weiyan +Weiyang Zhu +Wen Cheng Ma +Wendel Fleming +Wenxuan Zhao +Wenyu You <21551128@zju.edu.cn> +Wes Morgan +Will Dietz +Will Rouesnel +Will Weaver +willhf +William Delanoue +William Henry +William Hubbs +William Riancho +William Thurston +WiseTrem +wlan0 +Wolfgang Powisch +wonderflow +xamyzhao +XiaoBing Jiang +Xiaoxu Chen +xiekeyang +Xinzi Zhou +Xiuming Chen +xlgao-zju +xuzhaokui +Yahya +YAMADA Tsuyoshi +Yan Feng +Yang Bai +yangshukui +Yasunori Mahata +Yestin Sun +Yi EungJun +Yibai Zhang +Yihang Ho +Ying Li +Yohei Ueda +Yong Tang +Yongzhi Pan +yorkie +Youcef YEKHLEF +Yuan Sun +yuchangchun +yuchengxia +Yurii Rashkovskii +yuzou +Zac Dover +Zach Borboa +Zachary Jaffee +Zain Memon +Zaiste! +Zane DeGraffenried +Zefan Li +Zen Lin(Zhinan Lin) +Zhang Kun +Zhang Wei +Zhang Wentao +Zhenan Ye <21551168@zju.edu.cn> +Zhu Guihua +Zhuoyun Wei +Zilin Du +zimbatm +Ziming Dong +ZJUshuaizhou <21551191@zju.edu.cn> +zmarouf +Zoltan Tombol +zqh +Zuhayr Elahi +Zunayed Ali +Álex González +Álvaro Lázaro +Átila Camurça Alves +尹吉峰 +搏通 diff --git a/vendor/github.com/containers/storage/CODE-OF-CONDUCT.md b/vendor/github.com/containers/storage/CODE-OF-CONDUCT.md new file mode 100644 index 00000000000..f4f7df4b8cb --- /dev/null +++ b/vendor/github.com/containers/storage/CODE-OF-CONDUCT.md @@ -0,0 +1,3 @@ +## The Containers Storage Project Community Code of Conduct + +The Containers Storage project follows the [Containers Community Code of Conduct](https://github.com/containers/common/blob/main/CODE-OF-CONDUCT.md). diff --git a/vendor/github.com/containers/storage/CONTRIBUTING.md b/vendor/github.com/containers/storage/CONTRIBUTING.md new file mode 100644 index 00000000000..5364be769ed --- /dev/null +++ b/vendor/github.com/containers/storage/CONTRIBUTING.md @@ -0,0 +1,144 @@ +# Contributing to Containers/Storage + +We'd love to have you join the community! Below summarizes the processes +that we follow. + +## Topics + +* [Reporting Issues](#reporting-issues) +* [Submitting Pull Requests](#submitting-pull-requests) +* [Communications](#communications) + + +## Reporting Issues + +Before reporting an issue, check our backlog of +[open issues](https://github.com/containers/storage/issues) +to see if someone else has already reported it. If so, feel free to add +your scenario, or additional information, to the discussion. Or simply +"subscribe" to it to be notified when it is updated. + +If you find a new issue with the project we'd love to hear about it! The most +important aspect of a bug report is that it includes enough information for +us to reproduce it. So, please include as much detail as possible and try +to remove the extra stuff that doesn't really relate to the issue itself. +The easier it is for us to reproduce it, the faster it'll be fixed! + +Please don't include any private/sensitive information in your issue! + +## Submitting Pull Requests + +No Pull Request (PR) is too small! Typos, additional comments in the code, +new testcases, bug fixes, new features, more documentation, ... it's all +welcome! + +While bug fixes can first be identified via an "issue", that is not required. +It's ok to just open up a PR with the fix, but make sure you include the same +information you would have included in an issue - like how to reproduce it. + +PRs for new features should include some background on what use cases the +new code is trying to address. When possible and when it makes sense, try to break-up +larger PRs into smaller ones - it's easier to review smaller +code changes. But only if those smaller ones make sense as stand-alone PRs. + +Regardless of the type of PR, all PRs should include: +* well documented code changes +* additional testcases. Ideally, they should fail w/o your code change applied +* documentation changes + +Squash your commits into logical pieces of work that might want to be reviewed +separate from the rest of the PRs. But, squashing down to just one commit is ok +too since in the end the entire PR will be reviewed anyway. When in doubt, +squash. + +PRs that fix issues should include a reference like `Closes #XXXX` in the +commit message so that github will automatically close the referenced issue +when the PR is merged. + + + +### Sign your PRs + +The sign-off is a line at the end of the explanation for the patch. Your +signature certifies that you wrote the patch or otherwise have the right to pass +it on as an open-source patch. The rules are simple: if you can certify +the below (from [developercertificate.org](http://developercertificate.org/)): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +Then you just add a line to every git commit message: + + Signed-off-by: Joe Smith + +Use your real name (sorry, no pseudonyms or anonymous contributions.) + +If you set your `user.name` and `user.email` git configs, you can sign your +commit automatically with `git commit -s`. + +## Communications + +For general questions, or discussions, please use the +IRC group on `irc.freenode.net` called `container-projects` +that has been setup. + +For discussions around issues/bugs and features, you can use the github +[issues](https://github.com/containers/storage/issues) +and +[PRs](https://github.com/containers/storage/pulls) +tracking system. + + diff --git a/vendor/github.com/containers/storage/LICENSE b/vendor/github.com/containers/storage/LICENSE new file mode 100644 index 00000000000..8f3fee627a4 --- /dev/null +++ b/vendor/github.com/containers/storage/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2016 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containers/storage/Makefile b/vendor/github.com/containers/storage/Makefile new file mode 100644 index 00000000000..d7ca0c1c41f --- /dev/null +++ b/vendor/github.com/containers/storage/Makefile @@ -0,0 +1,125 @@ +export GO111MODULE=off +export GOPROXY=https://proxy.golang.org + +.PHONY: \ + all \ + binary \ + clean \ + cross \ + default \ + docs \ + gccgo \ + help \ + install.tools \ + local-binary \ + local-cross \ + local-gccgo \ + local-test-integration \ + local-test-unit \ + local-validate \ + lint \ + test \ + test-integration \ + test-unit \ + validate \ + vendor + +PACKAGE := github.com/containers/storage +GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null) +GIT_BRANCH_CLEAN := $(shell echo $(GIT_BRANCH) | sed -e "s/[^[:alnum:]]/-/g") +EPOCH_TEST_COMMIT := 0418ebf59f9e1f564831c0ba9378b7f8e40a1c73 +NATIVETAGS := +AUTOTAGS := $(shell ./hack/btrfs_tag.sh) $(shell ./hack/libdm_tag.sh) $(shell ./hack/libsubid_tag.sh) +BUILDFLAGS := -tags "$(AUTOTAGS) $(TAGS)" $(FLAGS) +GO ?= go +TESTFLAGS := $(shell go test -race $(BUILDFLAGS) ./pkg/stringutils 2>&1 > /dev/null && echo -race) + +# Go module support: set `-mod=vendor` to use the vendored sources +ifeq ($(shell $(GO) help mod >/dev/null 2>&1 && echo true), true) + GO:=GO111MODULE=on $(GO) + MOD_VENDOR=-mod=vendor +endif + +RUNINVM := vagrant/runinvm.sh + +default all: local-binary docs local-validate local-cross local-gccgo test-unit test-integration ## validate all checks, build and cross-build\nbinaries and docs, run tests in a VM + +clean: ## remove all built files + $(RM) -f containers-storage containers-storage.* docs/*.1 docs/*.5 + +sources := $(wildcard *.go cmd/containers-storage/*.go drivers/*.go drivers/*/*.go pkg/*/*.go pkg/*/*/*.go) +containers-storage: $(sources) ## build using gc on the host + $(GO) build $(MOD_VENDOR) -compiler gc $(BUILDFLAGS) ./cmd/containers-storage + +codespell: + codespell -S Makefile,build,buildah,buildah.spec,imgtype,copy,AUTHORS,bin,vendor,.git,go.sum,CHANGELOG.md,changelog.txt,seccomp.json,.cirrus.yml,"*.xz,*.gz,*.tar,*.tgz,*ico,*.png,*.1,*.5,*.orig,*.rej" -L flate,uint,iff,od,ERRO -w + +binary local-binary: containers-storage + +local-gccgo: ## build using gccgo on the host + GCCGO=$(PWD)/hack/gccgo-wrapper.sh $(GO) build $(MOD_VENDOR) -compiler gccgo $(BUILDFLAGS) -o containers-storage.gccgo ./cmd/containers-storage + +local-cross: ## cross build the binaries for arm, darwin, and\nfreebsd + @for target in linux/amd64 linux/386 linux/arm linux/arm64 linux/ppc64 linux/ppc64le darwin/amd64 windows/amd64 ; do \ + os=`echo $${target} | cut -f1 -d/` ; \ + arch=`echo $${target} | cut -f2 -d/` ; \ + suffix=$${os}.$${arch} ; \ + echo env CGO_ENABLED=0 GOOS=$${os} GOARCH=$${arch} $(GO) build $(MOD_VENDOR) -compiler gc -tags \"$(NATIVETAGS) $(TAGS)\" $(FLAGS) -o containers-storage.$${suffix} ./cmd/containers-storage ; \ + env CGO_ENABLED=0 GOOS=$${os} GOARCH=$${arch} $(GO) build $(MOD_VENDOR) -compiler gc -tags "$(NATIVETAGS) $(TAGS)" $(FLAGS) -o containers-storage.$${suffix} ./cmd/containers-storage || exit 1 ; \ + done + +cross: ## cross build the binaries for arm, darwin, and\nfreebsd using VMs + $(RUNINVM) make local-$@ + +docs: install.tools ## build the docs on the host + $(MAKE) -C docs docs + +gccgo: ## build using gccgo using VMs + $(RUNINVM) make local-$@ + +test: local-binary ## build the binaries and run the tests using VMs + $(RUNINVM) make local-binary local-cross local-test-unit local-test-integration + +local-test-unit: local-binary ## run the unit tests on the host (requires\nsuperuser privileges) + @$(GO) test $(MOD_VENDOR) $(BUILDFLAGS) $(TESTFLAGS) $(shell $(GO) list ./... | grep -v ^$(PACKAGE)/vendor) + +test-unit: local-binary ## run the unit tests using VMs + $(RUNINVM) make local-$@ + +local-test-integration: local-binary ## run the integration tests on the host (requires\nsuperuser privileges) + @cd tests; ./test_runner.bash + +test-integration: local-binary ## run the integration tests using VMs + $(RUNINVM) make local-$@ + +local-validate: ## validate DCO and gofmt on the host + @./hack/git-validation.sh + @./hack/gofmt.sh + +validate: ## validate DCO, gofmt, ./pkg/ isolation, golint,\ngo vet and vendor using VMs + $(RUNINVM) make local-$@ + +install.tools: + make -C tests/tools + +$(FFJSON): + make -C tests/tools + +install.docs: docs + make -C docs install + +install: install.docs + +lint: install.tools + tests/tools/build/golangci-lint run --build-tags="$(AUTOTAGS) $(TAGS)" + +help: ## this help + @awk 'BEGIN {FS = ":.*?## "} /^[a-z A-Z_-]+:.*?## / {gsub(" ",",",$$1);gsub("\\\\n",sprintf("\n%22c"," "), $$2);printf "\033[36m%-21s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST) + +vendor-in-container: + podman run --privileged --rm --env HOME=/root -v `pwd`:/src -w /src golang make vendor + +vendor: + $(GO) mod tidy + $(GO) mod vendor + $(GO) mod verify diff --git a/vendor/github.com/containers/storage/NOTICE b/vendor/github.com/containers/storage/NOTICE new file mode 100644 index 00000000000..8a37c1c7bc4 --- /dev/null +++ b/vendor/github.com/containers/storage/NOTICE @@ -0,0 +1,19 @@ +Docker +Copyright 2012-2016 Docker, Inc. + +This product includes software developed at Docker, Inc. (https://www.docker.com). + +This product contains software (https://github.com/kr/pty) developed +by Keith Rarick, licensed under the MIT License. + +The following is courtesy of our legal counsel: + + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see https://www.bis.doc.gov + +See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/containers/storage/README.md b/vendor/github.com/containers/storage/README.md new file mode 100644 index 00000000000..fef46a68931 --- /dev/null +++ b/vendor/github.com/containers/storage/README.md @@ -0,0 +1,46 @@ +`storage` is a Go library which aims to provide methods for storing filesystem +layers, container images, and containers. A `containers-storage` CLI wrapper +is also included for manual and scripting use. + +To build the CLI wrapper, use 'make binary'. + +Operations which use VMs expect to launch them using 'vagrant', defaulting to +using its 'libvirt' provider. The boxes used are also available for the +'virtualbox' provider, and can be selected by setting $VAGRANT_PROVIDER to +'virtualbox' before kicking off the build. + +The library manages three types of items: layers, images, and containers. + +A *layer* is a copy-on-write filesystem which is notionally stored as a set of +changes relative to its *parent* layer, if it has one. A given layer can only +have one parent, but any layer can be the parent of multiple layers. Layers +which are parents of other layers should be treated as read-only. + +An *image* is a reference to a particular layer (its _top_ layer), along with +other information which the library can manage for the convenience of its +caller. This information typically includes configuration templates for +running a binary contained within the image's layers, and may include +cryptographic signatures. Multiple images can reference the same layer, as the +differences between two images may not be in their layer contents. + +A *container* is a read-write layer which is a child of an image's top layer, +along with information which the library can manage for the convenience of its +caller. This information typically includes configuration information for +running the specific container. Multiple containers can be derived from a +single image. + +Layers, images, and containers are represented primarily by 32 character +hexadecimal IDs, but items of each kind can also have one or more arbitrary +names attached to them, which the library will automatically resolve to IDs +when they are passed in to API calls which expect IDs. + +The library can store what it calls *metadata* for each of these types of +items. This is expected to be a small piece of data, since it is cached in +memory and stored along with the library's own bookkeeping information. + +Additionally, the library can store one or more of what it calls *big data* for +images and containers. This is a named chunk of larger data, which is only in +memory when it is being read from or being written to its own disk file. + +**[Contributing](CONTRIBUTING.md)** +Information about contributing to this project. diff --git a/vendor/github.com/containers/storage/SECURITY.md b/vendor/github.com/containers/storage/SECURITY.md new file mode 100644 index 00000000000..ab2c14182fd --- /dev/null +++ b/vendor/github.com/containers/storage/SECURITY.md @@ -0,0 +1,3 @@ +## Security and Disclosure Information Policy for the Containers Storage Project + +The Containers Storage Project follows the [Security and Disclosure Information Policy](https://github.com/containers/common/blob/main/SECURITY.md) for the Containers Projects. diff --git a/vendor/github.com/containers/storage/VERSION b/vendor/github.com/containers/storage/VERSION new file mode 100644 index 00000000000..f6aeca3b5c6 --- /dev/null +++ b/vendor/github.com/containers/storage/VERSION @@ -0,0 +1 @@ +1.38.5 diff --git a/vendor/github.com/containers/storage/containers.go b/vendor/github.com/containers/storage/containers.go new file mode 100644 index 00000000000..148f47b1722 --- /dev/null +++ b/vendor/github.com/containers/storage/containers.go @@ -0,0 +1,657 @@ +package storage + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sync" + "time" + + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/ioutils" + "github.com/containers/storage/pkg/stringid" + "github.com/containers/storage/pkg/truncindex" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +// A Container is a reference to a read-write layer with metadata. +type Container struct { + // ID is either one which was specified at create-time, or a random + // value which was generated by the library. + ID string `json:"id"` + + // Names is an optional set of user-defined convenience values. The + // container can be referred to by its ID or any of its names. Names + // are unique among containers. + Names []string `json:"names,omitempty"` + + // ImageID is the ID of the image which was used to create the container. + ImageID string `json:"image"` + + // LayerID is the ID of the read-write layer for the container itself. + // It is assumed that the image's top layer is the parent of the container's + // read-write layer. + LayerID string `json:"layer"` + + // Metadata is data we keep for the convenience of the caller. It is not + // expected to be large, since it is kept in memory. + Metadata string `json:"metadata,omitempty"` + + // BigDataNames is a list of names of data items that we keep for the + // convenience of the caller. They can be large, and are only in + // memory when being read from or written to disk. + BigDataNames []string `json:"big-data-names,omitempty"` + + // BigDataSizes maps the names in BigDataNames to the sizes of the data + // that has been stored, if they're known. + BigDataSizes map[string]int64 `json:"big-data-sizes,omitempty"` + + // BigDataDigests maps the names in BigDataNames to the digests of the + // data that has been stored, if they're known. + BigDataDigests map[string]digest.Digest `json:"big-data-digests,omitempty"` + + // Created is the datestamp for when this container was created. Older + // versions of the library did not track this information, so callers + // will likely want to use the IsZero() method to verify that a value + // is set before using it. + Created time.Time `json:"created,omitempty"` + + // UIDMap and GIDMap are used for setting up a container's root + // filesystem for use inside of a user namespace where UID mapping is + // being used. + UIDMap []idtools.IDMap `json:"uidmap,omitempty"` + GIDMap []idtools.IDMap `json:"gidmap,omitempty"` + + Flags map[string]interface{} `json:"flags,omitempty"` +} + +// ContainerStore provides bookkeeping for information about Containers. +type ContainerStore interface { + FileBasedStore + MetadataStore + ContainerBigDataStore + FlaggableStore + + // Create creates a container that has a specified ID (or generates a + // random one if an empty value is supplied) and optional names, + // based on the specified image, using the specified layer as its + // read-write layer. + // The maps in the container's options structure are recorded for the + // convenience of the caller, nothing more. + Create(id string, names []string, image, layer, metadata string, options *ContainerOptions) (*Container, error) + + // SetNames updates the list of names associated with the container + // with the specified ID. + // Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`. + SetNames(id string, names []string) error + + // AddNames adds the supplied values to the list of names associated with the container with + // the specified id. + AddNames(id string, names []string) error + + // RemoveNames removes the supplied values from the list of names associated with the container with + // the specified id. + RemoveNames(id string, names []string) error + + // Get retrieves information about a container given an ID or name. + Get(id string) (*Container, error) + + // Exists checks if there is a container with the given ID or name. + Exists(id string) bool + + // Delete removes the record of the container. + Delete(id string) error + + // Wipe removes records of all containers. + Wipe() error + + // Lookup attempts to translate a name to an ID. Most methods do this + // implicitly. + Lookup(name string) (string, error) + + // Containers returns a slice enumerating the known containers. + Containers() ([]Container, error) +} + +type containerStore struct { + lockfile Locker + dir string + containers []*Container + idindex *truncindex.TruncIndex + byid map[string]*Container + bylayer map[string]*Container + byname map[string]*Container + loadMut sync.Mutex +} + +func copyContainer(c *Container) *Container { + return &Container{ + ID: c.ID, + Names: copyStringSlice(c.Names), + ImageID: c.ImageID, + LayerID: c.LayerID, + Metadata: c.Metadata, + BigDataNames: copyStringSlice(c.BigDataNames), + BigDataSizes: copyStringInt64Map(c.BigDataSizes), + BigDataDigests: copyStringDigestMap(c.BigDataDigests), + Created: c.Created, + UIDMap: copyIDMap(c.UIDMap), + GIDMap: copyIDMap(c.GIDMap), + Flags: copyStringInterfaceMap(c.Flags), + } +} + +func (c *Container) MountLabel() string { + if label, ok := c.Flags["MountLabel"].(string); ok { + return label + } + return "" +} + +func (c *Container) ProcessLabel() string { + if label, ok := c.Flags["ProcessLabel"].(string); ok { + return label + } + return "" +} + +func (c *Container) MountOpts() []string { + switch c.Flags["MountOpts"].(type) { + case []string: + return c.Flags["MountOpts"].([]string) + case []interface{}: + var mountOpts []string + for _, v := range c.Flags["MountOpts"].([]interface{}) { + if flag, ok := v.(string); ok { + mountOpts = append(mountOpts, flag) + } + } + return mountOpts + default: + return nil + } +} + +func (r *containerStore) Containers() ([]Container, error) { + containers := make([]Container, len(r.containers)) + for i := range r.containers { + containers[i] = *copyContainer(r.containers[i]) + } + return containers, nil +} + +func (r *containerStore) containerspath() string { + return filepath.Join(r.dir, "containers.json") +} + +func (r *containerStore) datadir(id string) string { + return filepath.Join(r.dir, id) +} + +func (r *containerStore) datapath(id, key string) string { + return filepath.Join(r.datadir(id), makeBigDataBaseName(key)) +} + +func (r *containerStore) Load() error { + needSave := false + rpath := r.containerspath() + data, err := ioutil.ReadFile(rpath) + if err != nil && !os.IsNotExist(err) { + return err + } + containers := []*Container{} + layers := make(map[string]*Container) + idlist := []string{} + ids := make(map[string]*Container) + names := make(map[string]*Container) + if err = json.Unmarshal(data, &containers); len(data) == 0 || err == nil { + idlist = make([]string, 0, len(containers)) + for n, container := range containers { + idlist = append(idlist, container.ID) + ids[container.ID] = containers[n] + layers[container.LayerID] = containers[n] + for _, name := range container.Names { + if conflict, ok := names[name]; ok { + r.removeName(conflict, name) + needSave = true + } + names[name] = containers[n] + } + } + } + r.containers = containers + r.idindex = truncindex.NewTruncIndex(idlist) + r.byid = ids + r.bylayer = layers + r.byname = names + if needSave { + return r.Save() + } + return nil +} + +func (r *containerStore) Save() error { + if !r.Locked() { + return errors.New("container store is not locked") + } + rpath := r.containerspath() + if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil { + return err + } + jdata, err := json.Marshal(&r.containers) + if err != nil { + return err + } + defer r.Touch() + return ioutils.AtomicWriteFile(rpath, jdata, 0600) +} + +func newContainerStore(dir string) (ContainerStore, error) { + if err := os.MkdirAll(dir, 0700); err != nil { + return nil, err + } + lockfile, err := GetLockfile(filepath.Join(dir, "containers.lock")) + if err != nil { + return nil, err + } + lockfile.Lock() + defer lockfile.Unlock() + cstore := containerStore{ + lockfile: lockfile, + dir: dir, + containers: []*Container{}, + byid: make(map[string]*Container), + bylayer: make(map[string]*Container), + byname: make(map[string]*Container), + } + if err := cstore.Load(); err != nil { + return nil, err + } + return &cstore, nil +} + +func (r *containerStore) lookup(id string) (*Container, bool) { + if container, ok := r.byid[id]; ok { + return container, ok + } else if container, ok := r.byname[id]; ok { + return container, ok + } else if container, ok := r.bylayer[id]; ok { + return container, ok + } else if longid, err := r.idindex.Get(id); err == nil { + if container, ok := r.byid[longid]; ok { + return container, ok + } + } + return nil, false +} + +func (r *containerStore) ClearFlag(id string, flag string) error { + container, ok := r.lookup(id) + if !ok { + return ErrContainerUnknown + } + delete(container.Flags, flag) + return r.Save() +} + +func (r *containerStore) SetFlag(id string, flag string, value interface{}) error { + container, ok := r.lookup(id) + if !ok { + return ErrContainerUnknown + } + if container.Flags == nil { + container.Flags = make(map[string]interface{}) + } + container.Flags[flag] = value + return r.Save() +} + +func (r *containerStore) Create(id string, names []string, image, layer, metadata string, options *ContainerOptions) (container *Container, err error) { + if id == "" { + id = stringid.GenerateRandomID() + _, idInUse := r.byid[id] + for idInUse { + id = stringid.GenerateRandomID() + _, idInUse = r.byid[id] + } + } + if _, idInUse := r.byid[id]; idInUse { + return nil, ErrDuplicateID + } + if options.MountOpts != nil { + options.Flags["MountOpts"] = append([]string{}, options.MountOpts...) + } + if options.Volatile { + options.Flags["Volatile"] = true + } + names = dedupeNames(names) + for _, name := range names { + if _, nameInUse := r.byname[name]; nameInUse { + return nil, errors.Wrapf(ErrDuplicateName, + fmt.Sprintf("the container name \"%s\" is already in use by \"%s\". You have to remove that container to be able to reuse that name.", name, r.byname[name].ID)) + } + } + if err == nil { + container = &Container{ + ID: id, + Names: names, + ImageID: image, + LayerID: layer, + Metadata: metadata, + BigDataNames: []string{}, + BigDataSizes: make(map[string]int64), + BigDataDigests: make(map[string]digest.Digest), + Created: time.Now().UTC(), + Flags: copyStringInterfaceMap(options.Flags), + UIDMap: copyIDMap(options.UIDMap), + GIDMap: copyIDMap(options.GIDMap), + } + r.containers = append(r.containers, container) + r.byid[id] = container + r.idindex.Add(id) + r.bylayer[layer] = container + for _, name := range names { + r.byname[name] = container + } + err = r.Save() + container = copyContainer(container) + } + return container, err +} + +func (r *containerStore) Metadata(id string) (string, error) { + if container, ok := r.lookup(id); ok { + return container.Metadata, nil + } + return "", ErrContainerUnknown +} + +func (r *containerStore) SetMetadata(id, metadata string) error { + if container, ok := r.lookup(id); ok { + container.Metadata = metadata + return r.Save() + } + return ErrContainerUnknown +} + +func (r *containerStore) removeName(container *Container, name string) { + container.Names = stringSliceWithoutValue(container.Names, name) +} + +// Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`. +func (r *containerStore) SetNames(id string, names []string) error { + return r.updateNames(id, names, setNames) +} + +func (r *containerStore) AddNames(id string, names []string) error { + return r.updateNames(id, names, addNames) +} + +func (r *containerStore) RemoveNames(id string, names []string) error { + return r.updateNames(id, names, removeNames) +} + +func (r *containerStore) updateNames(id string, names []string, op updateNameOperation) error { + container, ok := r.lookup(id) + if !ok { + return ErrContainerUnknown + } + oldNames := container.Names + names, err := applyNameOperation(oldNames, names, op) + if err != nil { + return err + } + for _, name := range oldNames { + delete(r.byname, name) + } + for _, name := range names { + if otherContainer, ok := r.byname[name]; ok { + r.removeName(otherContainer, name) + } + r.byname[name] = container + } + container.Names = names + return r.Save() +} + +func (r *containerStore) Delete(id string) error { + container, ok := r.lookup(id) + if !ok { + return ErrContainerUnknown + } + id = container.ID + toDeleteIndex := -1 + for i, candidate := range r.containers { + if candidate.ID == id { + toDeleteIndex = i + break + } + } + delete(r.byid, id) + r.idindex.Delete(id) + delete(r.bylayer, container.LayerID) + for _, name := range container.Names { + delete(r.byname, name) + } + if toDeleteIndex != -1 { + // delete the container at toDeleteIndex + if toDeleteIndex == len(r.containers)-1 { + r.containers = r.containers[:len(r.containers)-1] + } else { + r.containers = append(r.containers[:toDeleteIndex], r.containers[toDeleteIndex+1:]...) + } + } + if err := r.Save(); err != nil { + return err + } + if err := os.RemoveAll(r.datadir(id)); err != nil { + return err + } + return nil +} + +func (r *containerStore) Get(id string) (*Container, error) { + if container, ok := r.lookup(id); ok { + return copyContainer(container), nil + } + return nil, ErrContainerUnknown +} + +func (r *containerStore) Lookup(name string) (id string, err error) { + if container, ok := r.lookup(name); ok { + return container.ID, nil + } + return "", ErrContainerUnknown +} + +func (r *containerStore) Exists(id string) bool { + _, ok := r.lookup(id) + return ok +} + +func (r *containerStore) BigData(id, key string) ([]byte, error) { + if key == "" { + return nil, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve container big data value for empty name") + } + c, ok := r.lookup(id) + if !ok { + return nil, ErrContainerUnknown + } + return ioutil.ReadFile(r.datapath(c.ID, key)) +} + +func (r *containerStore) BigDataSize(id, key string) (int64, error) { + if key == "" { + return -1, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve size of container big data with empty name") + } + c, ok := r.lookup(id) + if !ok { + return -1, ErrContainerUnknown + } + if c.BigDataSizes == nil { + c.BigDataSizes = make(map[string]int64) + } + if size, ok := c.BigDataSizes[key]; ok { + return size, nil + } + if data, err := r.BigData(id, key); err == nil && data != nil { + if err = r.SetBigData(id, key, data); err == nil { + c, ok := r.lookup(id) + if !ok { + return -1, ErrContainerUnknown + } + if size, ok := c.BigDataSizes[key]; ok { + return size, nil + } + } else { + return -1, err + } + } + return -1, ErrSizeUnknown +} + +func (r *containerStore) BigDataDigest(id, key string) (digest.Digest, error) { + if key == "" { + return "", errors.Wrapf(ErrInvalidBigDataName, "can't retrieve digest of container big data value with empty name") + } + c, ok := r.lookup(id) + if !ok { + return "", ErrContainerUnknown + } + if c.BigDataDigests == nil { + c.BigDataDigests = make(map[string]digest.Digest) + } + if d, ok := c.BigDataDigests[key]; ok { + return d, nil + } + if data, err := r.BigData(id, key); err == nil && data != nil { + if err = r.SetBigData(id, key, data); err == nil { + c, ok := r.lookup(id) + if !ok { + return "", ErrContainerUnknown + } + if d, ok := c.BigDataDigests[key]; ok { + return d, nil + } + } else { + return "", err + } + } + return "", ErrDigestUnknown +} + +func (r *containerStore) BigDataNames(id string) ([]string, error) { + c, ok := r.lookup(id) + if !ok { + return nil, ErrContainerUnknown + } + return copyStringSlice(c.BigDataNames), nil +} + +func (r *containerStore) SetBigData(id, key string, data []byte) error { + if key == "" { + return errors.Wrapf(ErrInvalidBigDataName, "can't set empty name for container big data item") + } + c, ok := r.lookup(id) + if !ok { + return ErrContainerUnknown + } + if err := os.MkdirAll(r.datadir(c.ID), 0700); err != nil { + return err + } + err := ioutils.AtomicWriteFile(r.datapath(c.ID, key), data, 0600) + if err == nil { + save := false + if c.BigDataSizes == nil { + c.BigDataSizes = make(map[string]int64) + } + oldSize, sizeOk := c.BigDataSizes[key] + c.BigDataSizes[key] = int64(len(data)) + if c.BigDataDigests == nil { + c.BigDataDigests = make(map[string]digest.Digest) + } + oldDigest, digestOk := c.BigDataDigests[key] + newDigest := digest.Canonical.FromBytes(data) + c.BigDataDigests[key] = newDigest + if !sizeOk || oldSize != c.BigDataSizes[key] || !digestOk || oldDigest != newDigest { + save = true + } + addName := true + for _, name := range c.BigDataNames { + if name == key { + addName = false + break + } + } + if addName { + c.BigDataNames = append(c.BigDataNames, key) + save = true + } + if save { + err = r.Save() + } + } + return err +} + +func (r *containerStore) Wipe() error { + ids := make([]string, 0, len(r.byid)) + for id := range r.byid { + ids = append(ids, id) + } + for _, id := range ids { + if err := r.Delete(id); err != nil { + return err + } + } + return nil +} + +func (r *containerStore) Lock() { + r.lockfile.Lock() +} + +func (r *containerStore) RecursiveLock() { + r.lockfile.RecursiveLock() +} + +func (r *containerStore) RLock() { + r.lockfile.RLock() +} + +func (r *containerStore) Unlock() { + r.lockfile.Unlock() +} + +func (r *containerStore) Touch() error { + return r.lockfile.Touch() +} + +func (r *containerStore) Modified() (bool, error) { + return r.lockfile.Modified() +} + +func (r *containerStore) IsReadWrite() bool { + return r.lockfile.IsReadWrite() +} + +func (r *containerStore) TouchedSince(when time.Time) bool { + return r.lockfile.TouchedSince(when) +} + +func (r *containerStore) Locked() bool { + return r.lockfile.Locked() +} + +func (r *containerStore) ReloadIfChanged() error { + r.loadMut.Lock() + defer r.loadMut.Unlock() + + modified, err := r.Modified() + if err == nil && modified { + return r.Load() + } + return err +} diff --git a/vendor/github.com/containers/storage/drivers/aufs/aufs.go b/vendor/github.com/containers/storage/drivers/aufs/aufs.go new file mode 100644 index 00000000000..a566fbffa0f --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/aufs/aufs.go @@ -0,0 +1,766 @@ +// +build linux + +/* + +aufs driver directory structure + + . + ├── layers // Metadata of layers + │ ├── 1 + │ ├── 2 + │ └── 3 + ├── diff // Content of the layer + │ ├── 1 // Contains layers that need to be mounted for the id + │ ├── 2 + │ └── 3 + └── mnt // Mount points for the rw layers to be mounted + ├── 1 + ├── 2 + └── 3 + +*/ + +package aufs + +import ( + "bufio" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "strings" + "sync" + "time" + + graphdriver "github.com/containers/storage/drivers" + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/chrootarchive" + "github.com/containers/storage/pkg/directory" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/locker" + mountpk "github.com/containers/storage/pkg/mount" + "github.com/containers/storage/pkg/parsers" + "github.com/containers/storage/pkg/system" + "github.com/opencontainers/runc/libcontainer/userns" + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/vbatts/tar-split/tar/storage" + "golang.org/x/sys/unix" +) + +var ( + // ErrAufsNotSupported is returned if aufs is not supported by the host. + ErrAufsNotSupported = fmt.Errorf("AUFS was not found in /proc/filesystems") + // ErrAufsNested means aufs cannot be used bc we are in a user namespace + ErrAufsNested = fmt.Errorf("AUFS cannot be used in non-init user namespace") + backingFs = "" + + enableDirpermLock sync.Once + enableDirperm bool +) + +const defaultPerms = os.FileMode(0555) + +func init() { + graphdriver.Register("aufs", Init) +} + +// Driver contains information about the filesystem mounted. +type Driver struct { + sync.Mutex + root string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + ctr *graphdriver.RefCounter + pathCacheLock sync.Mutex + pathCache map[string]string + naiveDiff graphdriver.DiffDriver + locker *locker.Locker + mountOptions string +} + +// Init returns a new AUFS driver. +// An error is returned if AUFS is not supported. +func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) { + + // Try to load the aufs kernel module + if err := supportsAufs(); err != nil { + return nil, errors.Wrap(graphdriver.ErrNotSupported, "kernel does not support aufs") + + } + + fsMagic, err := graphdriver.GetFSMagic(home) + if err != nil { + return nil, err + } + if fsName, ok := graphdriver.FsNames[fsMagic]; ok { + backingFs = fsName + } + + switch fsMagic { + case graphdriver.FsMagicAufs, graphdriver.FsMagicBtrfs, graphdriver.FsMagicEcryptfs: + logrus.Errorf("AUFS is not supported over %s", backingFs) + return nil, errors.Wrapf(graphdriver.ErrIncompatibleFS, "AUFS is not supported over %q", backingFs) + } + + var mountOptions string + for _, option := range options.DriverOptions { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return nil, err + } + key = strings.ToLower(key) + switch key { + case "aufs.mountopt": + mountOptions = val + default: + return nil, fmt.Errorf("option %s not supported", option) + } + } + paths := []string{ + "mnt", + "diff", + "layers", + } + + a := &Driver{ + root: home, + uidMaps: options.UIDMaps, + gidMaps: options.GIDMaps, + pathCache: make(map[string]string), + ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicAufs)), + locker: locker.New(), + mountOptions: mountOptions, + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) + if err != nil { + return nil, err + } + // Create the root aufs driver dir and return + // if it already exists + // If not populate the dir structure + if err := idtools.MkdirAllAs(home, 0700, rootUID, rootGID); err != nil { + if os.IsExist(err) { + return a, nil + } + return nil, err + } + + if err := mountpk.MakePrivate(home); err != nil { + return nil, err + } + + // Populate the dir structure + for _, p := range paths { + if err := idtools.MkdirAllAs(path.Join(home, p), 0700, rootUID, rootGID); err != nil { + return nil, err + } + } + logger := logrus.WithFields(logrus.Fields{ + "module": "graphdriver", + "driver": "aufs", + }) + + for _, path := range []string{"mnt", "diff"} { + p := filepath.Join(home, path) + entries, err := ioutil.ReadDir(p) + if err != nil { + logger.WithError(err).WithField("dir", p).Error("error reading dir entries") + continue + } + for _, entry := range entries { + if !entry.IsDir() { + continue + } + if strings.HasSuffix(entry.Name(), "-removing") { + logger.WithField("dir", entry.Name()).Debug("Cleaning up stale layer dir") + if err := system.EnsureRemoveAll(filepath.Join(p, entry.Name())); err != nil { + logger.WithField("dir", entry.Name()).WithError(err).Error("Error removing stale layer dir") + } + } + } + } + + a.naiveDiff = graphdriver.NewNaiveDiffDriver(a, a) + return a, nil +} + +// Return a nil error if the kernel supports aufs +// We cannot modprobe because inside dind modprobe fails +// to run +func supportsAufs() error { + // We can try to modprobe aufs first before looking at + // proc/filesystems for when aufs is supported + exec.Command("modprobe", "aufs").Run() + + if userns.RunningInUserNS() { + return ErrAufsNested + } + + f, err := os.Open("/proc/filesystems") + if err != nil { + return err + } + defer f.Close() + + s := bufio.NewScanner(f) + for s.Scan() { + if strings.Contains(s.Text(), "aufs") { + return nil + } + } + return ErrAufsNotSupported +} + +func (a *Driver) rootPath() string { + return a.root +} + +func (*Driver) String() string { + return "aufs" +} + +// Status returns current information about the filesystem such as root directory, number of directories mounted, etc. +func (a *Driver) Status() [][2]string { + ids, _ := loadIds(path.Join(a.rootPath(), "layers")) + return [][2]string{ + {"Root Dir", a.rootPath()}, + {"Backing Filesystem", backingFs}, + {"Dirs", fmt.Sprintf("%d", len(ids))}, + {"Dirperm1 Supported", fmt.Sprintf("%v", useDirperm())}, + } +} + +// Metadata not implemented +func (a *Driver) Metadata(id string) (map[string]string, error) { + return nil, nil +} + +// Exists returns true if the given id is registered with +// this driver +func (a *Driver) Exists(id string) bool { + if _, err := os.Lstat(path.Join(a.rootPath(), "layers", id)); err != nil { + return false + } + return true +} + +// AdditionalImageStores returns additional image stores supported by the driver +func (a *Driver) AdditionalImageStores() []string { + return nil +} + +// CreateFromTemplate creates a layer with the same contents and parent as another layer. +func (a *Driver) CreateFromTemplate(id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *graphdriver.CreateOpts, readWrite bool) error { + if opts == nil { + opts = &graphdriver.CreateOpts{} + } + return graphdriver.NaiveCreateFromTemplate(a, id, template, templateIDMappings, parent, parentIDMappings, opts, readWrite) +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (a *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + return a.Create(id, parent, opts) +} + +// Create three folders for each id +// mnt, layers, and diff +func (a *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { + + if opts != nil && len(opts.StorageOpt) != 0 { + return fmt.Errorf("--storage-opt is not supported for aufs") + } + + if err := a.createDirsFor(id, parent); err != nil { + return err + } + // Write the layers metadata + f, err := os.Create(path.Join(a.rootPath(), "layers", id)) + if err != nil { + return err + } + defer f.Close() + + if parent != "" { + ids, err := getParentIDs(a.rootPath(), parent) + if err != nil { + return err + } + + if _, err := fmt.Fprintln(f, parent); err != nil { + return err + } + for _, i := range ids { + if _, err := fmt.Fprintln(f, i); err != nil { + return err + } + } + } + + return nil +} + +// createDirsFor creates two directories for the given id. +// mnt and diff +func (a *Driver) createDirsFor(id, parent string) error { + paths := []string{ + "mnt", + "diff", + } + + // Directory permission is 0555. + // The path of directories are /mnt/ + // and /diff/ + for _, p := range paths { + rootPair := idtools.NewIDMappingsFromMaps(a.uidMaps, a.gidMaps).RootPair() + rootPerms := defaultPerms + if parent != "" { + st, err := system.Stat(path.Join(a.rootPath(), p, parent)) + if err != nil { + return err + } + rootPerms = os.FileMode(st.Mode()) + rootPair.UID = int(st.UID()) + rootPair.GID = int(st.GID()) + } + if err := idtools.MkdirAllAndChownNew(path.Join(a.rootPath(), p, id), rootPerms, rootPair); err != nil { + return err + } + } + return nil +} + +// Remove will unmount and remove the given id. +func (a *Driver) Remove(id string) error { + a.locker.Lock(id) + defer a.locker.Unlock(id) + a.pathCacheLock.Lock() + mountpoint, exists := a.pathCache[id] + a.pathCacheLock.Unlock() + if !exists { + mountpoint = a.getMountpoint(id) + } + + logger := logrus.WithFields(logrus.Fields{ + "module": "graphdriver", + "driver": "aufs", + "layer": id, + }) + + var retries int + for { + mounted, err := a.mounted(mountpoint) + if err != nil { + if os.IsNotExist(err) { + break + } + return err + } + if !mounted { + break + } + + err = a.unmount(mountpoint) + if err == nil { + break + } + + if err != unix.EBUSY { + return errors.Wrapf(err, "aufs: unmount error: %s", mountpoint) + } + if retries >= 5 { + return errors.Wrapf(err, "aufs: unmount error after retries: %s", mountpoint) + } + // If unmount returns EBUSY, it could be a transient error. Sleep and retry. + retries++ + logger.Warnf("unmount failed due to EBUSY: retry count: %d", retries) + time.Sleep(100 * time.Millisecond) + } + + // Remove the layers file for the id + if err := os.Remove(path.Join(a.rootPath(), "layers", id)); err != nil && !os.IsNotExist(err) { + return errors.Wrapf(err, "error removing layers dir for %s", id) + } + + if err := atomicRemove(a.getDiffPath(id)); err != nil { + return errors.Wrapf(err, "could not remove diff path for id %s", id) + } + + // Atomically remove each directory in turn by first moving it out of the + // way (so that container runtime doesn't find it anymore) before doing removal of + // the whole tree. + if err := atomicRemove(mountpoint); err != nil { + if errors.Cause(err) == unix.EBUSY { + logger.WithField("dir", mountpoint).WithError(err).Warn("error performing atomic remove due to EBUSY") + } + return errors.Wrapf(err, "could not remove mountpoint for id %s", id) + } + + a.pathCacheLock.Lock() + delete(a.pathCache, id) + a.pathCacheLock.Unlock() + return nil +} + +func atomicRemove(source string) error { + target := source + "-removing" + + err := os.Rename(source, target) + switch { + case err == nil, os.IsNotExist(err): + case os.IsExist(err): + // Got error saying the target dir already exists, maybe the source doesn't exist due to a previous (failed) remove + if _, e := os.Stat(source); !os.IsNotExist(e) { + return errors.Wrapf(err, "target rename dir '%s' exists but should not, this needs to be manually cleaned up", target) + } + default: + return errors.Wrapf(err, "error preparing atomic delete") + } + + return system.EnsureRemoveAll(target) +} + +// Get returns the rootfs path for the id. +// This will mount the dir at its given path +func (a *Driver) Get(id string, options graphdriver.MountOpts) (string, error) { + a.locker.Lock(id) + defer a.locker.Unlock(id) + parents, err := a.getParentLayerPaths(id) + if err != nil && !os.IsNotExist(err) { + return "", err + } + + a.pathCacheLock.Lock() + m, exists := a.pathCache[id] + a.pathCacheLock.Unlock() + + if !exists { + m = a.getDiffPath(id) + if len(parents) > 0 { + m = a.getMountpoint(id) + } + } + if count := a.ctr.Increment(m); count > 1 { + return m, nil + } + + // If a dir does not have a parent ( no layers )do not try to mount + // just return the diff path to the data + if len(parents) > 0 { + if err := a.mount(id, m, parents, options); err != nil { + return "", err + } + } + + a.pathCacheLock.Lock() + a.pathCache[id] = m + a.pathCacheLock.Unlock() + return m, nil +} + +// Put unmounts and updates list of active mounts. +func (a *Driver) Put(id string) error { + a.locker.Lock(id) + defer a.locker.Unlock(id) + a.pathCacheLock.Lock() + m, exists := a.pathCache[id] + if !exists { + m = a.getMountpoint(id) + a.pathCache[id] = m + } + a.pathCacheLock.Unlock() + if count := a.ctr.Decrement(m); count > 0 { + return nil + } + + err := a.unmount(m) + if err != nil { + logrus.Debugf("Failed to unmount %s aufs: %v", id, err) + } + return err +} + +// ReadWriteDiskUsage returns the disk usage of the writable directory for the ID. +// For AUFS, it queries the mountpoint for this ID. +func (a *Driver) ReadWriteDiskUsage(id string) (*directory.DiskUsage, error) { + a.locker.Lock(id) + defer a.locker.Unlock(id) + a.pathCacheLock.Lock() + m, exists := a.pathCache[id] + if !exists { + m = a.getMountpoint(id) + a.pathCache[id] = m + } + a.pathCacheLock.Unlock() + return directory.Usage(m) +} + +// isParent returns if the passed in parent is the direct parent of the passed in layer +func (a *Driver) isParent(id, parent string) bool { + parents, _ := getParentIDs(a.rootPath(), id) + if parent == "" && len(parents) > 0 { + return false + } + return !(len(parents) > 0 && parent != parents[0]) +} + +// Diff produces an archive of the changes between the specified +// layer and its parent layer which may be "". +func (a *Driver) Diff(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (io.ReadCloser, error) { + if !a.isParent(id, parent) { + return a.naiveDiff.Diff(id, idMappings, parent, parentMappings, mountLabel) + } + + if idMappings == nil { + idMappings = &idtools.IDMappings{} + } + + // AUFS doesn't need the parent layer to produce a diff. + return archive.TarWithOptions(path.Join(a.rootPath(), "diff", id), &archive.TarOptions{ + Compression: archive.Uncompressed, + ExcludePatterns: []string{archive.WhiteoutMetaPrefix + "*", "!" + archive.WhiteoutOpaqueDir}, + UIDMaps: idMappings.UIDs(), + GIDMaps: idMappings.GIDs(), + }) +} + +type fileGetNilCloser struct { + storage.FileGetter +} + +func (f fileGetNilCloser) Close() error { + return nil +} + +// DiffGetter returns a FileGetCloser that can read files from the directory that +// contains files for the layer differences. Used for direct access for tar-split. +func (a *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) { + p := path.Join(a.rootPath(), "diff", id) + return fileGetNilCloser{storage.NewPathFileGetter(p)}, nil +} + +func (a *Driver) applyDiff(id string, idMappings *idtools.IDMappings, diff io.Reader) error { + if idMappings == nil { + idMappings = &idtools.IDMappings{} + } + return chrootarchive.UntarUncompressed(diff, path.Join(a.rootPath(), "diff", id), &archive.TarOptions{ + UIDMaps: idMappings.UIDs(), + GIDMaps: idMappings.GIDs(), + }) +} + +// DiffSize calculates the changes between the specified id +// and its parent and returns the size in bytes of the changes +// relative to its base filesystem directory. +func (a *Driver) DiffSize(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (size int64, err error) { + if !a.isParent(id, parent) { + return a.naiveDiff.DiffSize(id, idMappings, parent, parentMappings, mountLabel) + } + // AUFS doesn't need the parent layer to calculate the diff size. + return directory.Size(path.Join(a.rootPath(), "diff", id)) +} + +// ApplyDiff extracts the changeset from the given diff into the +// layer with the specified id and parent, returning the size of the +// new layer in bytes. +func (a *Driver) ApplyDiff(id, parent string, options graphdriver.ApplyDiffOpts) (size int64, err error) { + if !a.isParent(id, parent) { + return a.naiveDiff.ApplyDiff(id, parent, options) + } + + // AUFS doesn't need the parent id to apply the diff if it is the direct parent. + if err = a.applyDiff(id, options.Mappings, options.Diff); err != nil { + return + } + + return directory.Size(path.Join(a.rootPath(), "diff", id)) +} + +// Changes produces a list of changes between the specified layer +// and its parent layer. If parent is "", then all changes will be ADD changes. +func (a *Driver) Changes(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) ([]archive.Change, error) { + if !a.isParent(id, parent) { + return a.naiveDiff.Changes(id, idMappings, parent, parentMappings, mountLabel) + } + + // AUFS doesn't have snapshots, so we need to get changes from all parent + // layers. + layers, err := a.getParentLayerPaths(id) + if err != nil { + return nil, err + } + return archive.Changes(layers, path.Join(a.rootPath(), "diff", id)) +} + +func (a *Driver) getParentLayerPaths(id string) ([]string, error) { + parentIds, err := getParentIDs(a.rootPath(), id) + if err != nil { + return nil, err + } + layers := make([]string, len(parentIds)) + + // Get the diff paths for all the parent ids + for i, p := range parentIds { + layers[i] = path.Join(a.rootPath(), "diff", p) + } + return layers, nil +} + +func (a *Driver) mount(id string, target string, layers []string, options graphdriver.MountOpts) error { + a.Lock() + defer a.Unlock() + + // If the id is mounted or we get an error return + if mounted, err := a.mounted(target); err != nil || mounted { + return err + } + + rw := a.getDiffPath(id) + + if err := a.aufsMount(layers, rw, target, options); err != nil { + return fmt.Errorf("error creating aufs mount to %s: %v", target, err) + } + return nil +} + +func (a *Driver) unmount(mountPath string) error { + a.Lock() + defer a.Unlock() + + if mounted, err := a.mounted(mountPath); err != nil || !mounted { + return err + } + if err := Unmount(mountPath); err != nil { + return err + } + return nil +} + +func (a *Driver) mounted(mountpoint string) (bool, error) { + return graphdriver.Mounted(graphdriver.FsMagicAufs, mountpoint) +} + +// Cleanup aufs and unmount all mountpoints +func (a *Driver) Cleanup() error { + var dirs []string + if err := filepath.Walk(a.mntPath(), func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if !info.IsDir() { + return nil + } + dirs = append(dirs, path) + return nil + }); err != nil { + return err + } + + for _, m := range dirs { + if err := a.unmount(m); err != nil { + logrus.Debugf("aufs error unmounting %s: %s", m, err) + } + } + return mountpk.Unmount(a.root) +} + +func (a *Driver) aufsMount(ro []string, rw, target string, options graphdriver.MountOpts) (err error) { + defer func() { + if err != nil { + Unmount(target) + } + }() + + // Mount options are clipped to page size(4096 bytes). If there are more + // layers then these are remounted individually using append. + + offset := 54 + if useDirperm() { + offset += len(",dirperm1") + } + b := make([]byte, unix.Getpagesize()-len(options.MountLabel)-offset) // room for xino & mountLabel + bp := copy(b, fmt.Sprintf("br:%s=rw", rw)) + + index := 0 + for ; index < len(ro); index++ { + layer := fmt.Sprintf(":%s=ro+wh", ro[index]) + if bp+len(layer) > len(b) { + break + } + bp += copy(b[bp:], layer) + } + + opts := "dio,xino=/dev/shm/aufs.xino" + mountOptions := a.mountOptions + if len(options.Options) > 0 { + mountOptions = strings.Join(options.Options, ",") + } + if mountOptions != "" { + opts += fmt.Sprintf(",%s", mountOptions) + } + + if useDirperm() { + opts += ",dirperm1" + } + data := label.FormatMountLabel(fmt.Sprintf("%s,%s", string(b[:bp]), opts), options.MountLabel) + if err = mount("none", target, "aufs", 0, data); err != nil { + return + } + + for ; index < len(ro); index++ { + layer := fmt.Sprintf(":%s=ro+wh", ro[index]) + data := label.FormatMountLabel(fmt.Sprintf("append%s", layer), options.MountLabel) + if err = mount("none", target, "aufs", unix.MS_REMOUNT, data); err != nil { + return + } + } + + return +} + +// useDirperm checks dirperm1 mount option can be used with the current +// version of aufs. +func useDirperm() bool { + enableDirpermLock.Do(func() { + base, err := ioutil.TempDir("", "storage-aufs-base") + if err != nil { + logrus.Errorf("Checking dirperm1: %v", err) + return + } + defer os.RemoveAll(base) + + union, err := ioutil.TempDir("", "storage-aufs-union") + if err != nil { + logrus.Errorf("Checking dirperm1: %v", err) + return + } + defer os.RemoveAll(union) + + opts := fmt.Sprintf("br:%s,dirperm1,xino=/dev/shm/aufs.xino", base) + if err := mount("none", union, "aufs", 0, opts); err != nil { + return + } + enableDirperm = true + if err := Unmount(union); err != nil { + logrus.Errorf("Checking dirperm1: failed to unmount %v", err) + } + }) + return enableDirperm +} + +// UpdateLayerIDMap updates ID mappings in a layer from matching the ones +// specified by toContainer to those specified by toHost. +func (a *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) error { + return fmt.Errorf("aufs doesn't support changing ID mappings") +} + +// SupportsShifting tells whether the driver support shifting of the UIDs/GIDs in an userNS +func (a *Driver) SupportsShifting() bool { + return false +} diff --git a/vendor/github.com/containers/storage/drivers/aufs/dirs.go b/vendor/github.com/containers/storage/drivers/aufs/dirs.go new file mode 100644 index 00000000000..d2325fc46cd --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/aufs/dirs.go @@ -0,0 +1,64 @@ +// +build linux + +package aufs + +import ( + "bufio" + "io/ioutil" + "os" + "path" +) + +// Return all the directories +func loadIds(root string) ([]string, error) { + dirs, err := ioutil.ReadDir(root) + if err != nil { + return nil, err + } + out := []string{} + for _, d := range dirs { + if !d.IsDir() { + out = append(out, d.Name()) + } + } + return out, nil +} + +// Read the layers file for the current id and return all the +// layers represented by new lines in the file +// +// If there are no lines in the file then the id has no parent +// and an empty slice is returned. +func getParentIDs(root, id string) ([]string, error) { + f, err := os.Open(path.Join(root, "layers", id)) + if err != nil { + return nil, err + } + defer f.Close() + + out := []string{} + s := bufio.NewScanner(f) + + for s.Scan() { + if t := s.Text(); t != "" { + out = append(out, s.Text()) + } + } + return out, s.Err() +} + +func (a *Driver) getMountpoint(id string) string { + return path.Join(a.mntPath(), id) +} + +func (a *Driver) mntPath() string { + return path.Join(a.rootPath(), "mnt") +} + +func (a *Driver) getDiffPath(id string) string { + return path.Join(a.diffPath(), id) +} + +func (a *Driver) diffPath() string { + return path.Join(a.rootPath(), "diff") +} diff --git a/vendor/github.com/containers/storage/drivers/aufs/mount.go b/vendor/github.com/containers/storage/drivers/aufs/mount.go new file mode 100644 index 00000000000..100e7537a9c --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/aufs/mount.go @@ -0,0 +1,21 @@ +// +build linux + +package aufs + +import ( + "os/exec" + + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +// Unmount the target specified. +func Unmount(target string) error { + if err := exec.Command("auplink", target, "flush").Run(); err != nil { + logrus.Warnf("Couldn't run auplink before unmount %s: %s", target, err) + } + if err := unix.Unmount(target, 0); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/containers/storage/drivers/aufs/mount_linux.go b/vendor/github.com/containers/storage/drivers/aufs/mount_linux.go new file mode 100644 index 00000000000..937104ba3fd --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/aufs/mount_linux.go @@ -0,0 +1,7 @@ +package aufs + +import "golang.org/x/sys/unix" + +func mount(source string, target string, fstype string, flags uintptr, data string) error { + return unix.Mount(source, target, fstype, flags, data) +} diff --git a/vendor/github.com/containers/storage/drivers/aufs/mount_unsupported.go b/vendor/github.com/containers/storage/drivers/aufs/mount_unsupported.go new file mode 100644 index 00000000000..d030b066378 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/aufs/mount_unsupported.go @@ -0,0 +1,12 @@ +// +build !linux + +package aufs + +import "errors" + +// MsRemount declared to specify a non-linux system mount. +const MsRemount = 0 + +func mount(source string, target string, fstype string, flags uintptr, data string) (err error) { + return errors.New("mount is not implemented on this platform") +} diff --git a/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go b/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go new file mode 100644 index 00000000000..3903b1dddd9 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go @@ -0,0 +1,682 @@ +// +build linux,cgo + +package btrfs + +/* +#include +#include +#include +#include + +static void set_name_btrfs_ioctl_vol_args_v2(struct btrfs_ioctl_vol_args_v2* btrfs_struct, const char* value) { + snprintf(btrfs_struct->name, BTRFS_SUBVOL_NAME_MAX, "%s", value); +} +*/ +import "C" + +import ( + "fmt" + "io/ioutil" + "math" + "os" + "path" + "path/filepath" + "strconv" + "strings" + "sync" + "unsafe" + + graphdriver "github.com/containers/storage/drivers" + "github.com/containers/storage/pkg/directory" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/mount" + "github.com/containers/storage/pkg/parsers" + "github.com/containers/storage/pkg/system" + "github.com/docker/go-units" + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +const defaultPerms = os.FileMode(0555) + +func init() { + graphdriver.Register("btrfs", Init) +} + +type btrfsOptions struct { + minSpace uint64 + size uint64 +} + +// Init returns a new BTRFS driver. +// An error is returned if BTRFS is not supported. +func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) { + + fsMagic, err := graphdriver.GetFSMagic(home) + if err != nil { + return nil, err + } + + if fsMagic != graphdriver.FsMagicBtrfs { + return nil, errors.Wrapf(graphdriver.ErrPrerequisites, "%q is not on a btrfs filesystem", home) + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) + if err != nil { + return nil, err + } + if err := idtools.MkdirAllAs(home, 0700, rootUID, rootGID); err != nil { + return nil, err + } + + if err := mount.MakePrivate(home); err != nil { + return nil, err + } + + opt, userDiskQuota, err := parseOptions(options.DriverOptions) + if err != nil { + return nil, err + } + + driver := &Driver{ + home: home, + uidMaps: options.UIDMaps, + gidMaps: options.GIDMaps, + options: opt, + } + + if userDiskQuota { + if err := driver.enableQuota(); err != nil { + return nil, err + } + } + + return graphdriver.NewNaiveDiffDriver(driver, graphdriver.NewNaiveLayerIDMapUpdater(driver)), nil +} + +func parseOptions(opt []string) (btrfsOptions, bool, error) { + var options btrfsOptions + userDiskQuota := false + for _, option := range opt { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return options, userDiskQuota, err + } + key = strings.ToLower(key) + switch key { + case "btrfs.min_space": + minSpace, err := units.RAMInBytes(val) + if err != nil { + return options, userDiskQuota, err + } + userDiskQuota = true + options.minSpace = uint64(minSpace) + case "btrfs.mountopt": + return options, userDiskQuota, fmt.Errorf("btrfs driver does not support mount options") + default: + return options, userDiskQuota, fmt.Errorf("Unknown option %s", key) + } + } + return options, userDiskQuota, nil +} + +// Driver contains information about the filesystem mounted. +type Driver struct { + //root of the file system + home string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + options btrfsOptions + quotaEnabled bool + once sync.Once +} + +// String prints the name of the driver (btrfs). +func (d *Driver) String() string { + return "btrfs" +} + +// Status returns current driver information in a two dimensional string array. +// Output contains "Build Version" and "Library Version" of the btrfs libraries used. +// Version information can be used to check compatibility with your kernel. +func (d *Driver) Status() [][2]string { + status := [][2]string{} + if bv := btrfsBuildVersion(); bv != "-" { + status = append(status, [2]string{"Build Version", bv}) + } + if lv := btrfsLibVersion(); lv != -1 { + status = append(status, [2]string{"Library Version", fmt.Sprintf("%d", lv)}) + } + return status +} + +// Metadata returns empty metadata for this driver. +func (d *Driver) Metadata(id string) (map[string]string, error) { + return nil, nil +} + +// Cleanup unmounts the home directory. +func (d *Driver) Cleanup() error { + return mount.Unmount(d.home) +} + +func free(p *C.char) { + C.free(unsafe.Pointer(p)) +} + +func openDir(path string) (*C.DIR, error) { + Cpath := C.CString(path) + defer free(Cpath) + + dir := C.opendir(Cpath) + if dir == nil { + return nil, fmt.Errorf("Can't open dir") + } + return dir, nil +} + +func closeDir(dir *C.DIR) { + if dir != nil { + C.closedir(dir) + } +} + +func getDirFd(dir *C.DIR) uintptr { + return uintptr(C.dirfd(dir)) +} + +func subvolCreate(path, name string) error { + dir, err := openDir(path) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_vol_args + for i, c := range []byte(name) { + args.name[i] = C.char(c) + } + + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SUBVOL_CREATE, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to create btrfs subvolume: %v", errno.Error()) + } + return nil +} + +func subvolSnapshot(src, dest, name string) error { + srcDir, err := openDir(src) + if err != nil { + return err + } + defer closeDir(srcDir) + + destDir, err := openDir(dest) + if err != nil { + return err + } + defer closeDir(destDir) + + var args C.struct_btrfs_ioctl_vol_args_v2 + args.fd = C.__s64(getDirFd(srcDir)) + + var cs = C.CString(name) + C.set_name_btrfs_ioctl_vol_args_v2(&args, cs) + C.free(unsafe.Pointer(cs)) + + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(destDir), C.BTRFS_IOC_SNAP_CREATE_V2, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to create btrfs snapshot: %v", errno.Error()) + } + return nil +} + +func isSubvolume(p string) (bool, error) { + var bufStat unix.Stat_t + if err := unix.Lstat(p, &bufStat); err != nil { + return false, err + } + + // return true if it is a btrfs subvolume + return bufStat.Ino == C.BTRFS_FIRST_FREE_OBJECTID, nil +} + +func subvolDelete(dirpath, name string, quotaEnabled bool) error { + dir, err := openDir(dirpath) + if err != nil { + return err + } + defer closeDir(dir) + fullPath := path.Join(dirpath, name) + + var args C.struct_btrfs_ioctl_vol_args + + // walk the btrfs subvolumes + walkSubvolumes := func(p string, f os.FileInfo, err error) error { + if err != nil { + if os.IsNotExist(err) && p != fullPath { + // missing most likely because the path was a subvolume that got removed in the previous iteration + // since it's gone anyway, we don't care + return nil + } + return fmt.Errorf("error walking subvolumes: %v", err) + } + // we want to check children only so skip itself + // it will be removed after the filepath walk anyways + if f.IsDir() && p != fullPath { + sv, err := isSubvolume(p) + if err != nil { + return fmt.Errorf("Failed to test if %s is a btrfs subvolume: %v", p, err) + } + if sv { + if err := subvolDelete(path.Dir(p), f.Name(), quotaEnabled); err != nil { + return fmt.Errorf("Failed to destroy btrfs child subvolume (%s) of parent (%s): %v", p, dirpath, err) + } + } + } + return nil + } + if err := filepath.Walk(path.Join(dirpath, name), walkSubvolumes); err != nil { + return fmt.Errorf("Recursively walking subvolumes for %s failed: %v", dirpath, err) + } + + if quotaEnabled { + if qgroupid, err := subvolLookupQgroup(fullPath); err == nil { + var args C.struct_btrfs_ioctl_qgroup_create_args + args.qgroupid = C.__u64(qgroupid) + + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QGROUP_CREATE, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + logrus.Errorf("Failed to delete btrfs qgroup %v for %s: %v", qgroupid, fullPath, errno.Error()) + } + } else { + logrus.Errorf("Failed to lookup btrfs qgroup for %s: %v", fullPath, err.Error()) + } + } + + // all subvolumes have been removed + // now remove the one originally passed in + for i, c := range []byte(name) { + args.name[i] = C.char(c) + } + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SNAP_DESTROY, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to destroy btrfs snapshot %s for %s: %v", dirpath, name, errno.Error()) + } + return nil +} + +func (d *Driver) updateQuotaStatus() { + d.once.Do(func() { + if !d.quotaEnabled { + // In case quotaEnabled is not set, check qgroup and update quotaEnabled as needed + if err := qgroupStatus(d.home); err != nil { + // quota is still not enabled + return + } + d.quotaEnabled = true + } + }) +} + +func (d *Driver) enableQuota() error { + d.updateQuotaStatus() + + if d.quotaEnabled { + return nil + } + + dir, err := openDir(d.home) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_quota_ctl_args + args.cmd = C.BTRFS_QUOTA_CTL_ENABLE + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_CTL, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to enable btrfs quota for %s: %v", dir, errno.Error()) + } + + d.quotaEnabled = true + + return nil +} + +func (d *Driver) subvolRescanQuota() error { + d.updateQuotaStatus() + + if !d.quotaEnabled { + return nil + } + + dir, err := openDir(d.home) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_quota_rescan_args + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_RESCAN_WAIT, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to rescan btrfs quota for %s: %v", dir, errno.Error()) + } + + return nil +} + +func subvolLimitQgroup(path string, size uint64) error { + dir, err := openDir(path) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_qgroup_limit_args + args.lim.max_referenced = C.__u64(size) + args.lim.flags = C.BTRFS_QGROUP_LIMIT_MAX_RFER + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QGROUP_LIMIT, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to limit qgroup for %s: %v", dir, errno.Error()) + } + + return nil +} + +// qgroupStatus performs a BTRFS_IOC_TREE_SEARCH on the root path +// with search key of BTRFS_QGROUP_STATUS_KEY. +// In case qgroup is enabled, the returned key type will match BTRFS_QGROUP_STATUS_KEY. +// For more details please see https://github.com/kdave/btrfs-progs/blob/v4.9/qgroup.c#L1035 +func qgroupStatus(path string) error { + dir, err := openDir(path) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_search_args + args.key.tree_id = C.BTRFS_QUOTA_TREE_OBJECTID + args.key.min_type = C.BTRFS_QGROUP_STATUS_KEY + args.key.max_type = C.BTRFS_QGROUP_STATUS_KEY + args.key.max_objectid = C.__u64(math.MaxUint64) + args.key.max_offset = C.__u64(math.MaxUint64) + args.key.max_transid = C.__u64(math.MaxUint64) + args.key.nr_items = 4096 + + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_TREE_SEARCH, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to search qgroup for %s: %v", path, errno.Error()) + } + sh := (*C.struct_btrfs_ioctl_search_header)(unsafe.Pointer(&args.buf)) + if sh._type != C.BTRFS_QGROUP_STATUS_KEY { + return fmt.Errorf("Invalid qgroup search header type for %s: %v", path, sh._type) + } + return nil +} + +func subvolLookupQgroup(path string) (uint64, error) { + dir, err := openDir(path) + if err != nil { + return 0, err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_ino_lookup_args + args.objectid = C.BTRFS_FIRST_FREE_OBJECTID + + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_INO_LOOKUP, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return 0, fmt.Errorf("Failed to lookup qgroup for %s: %v", dir, errno.Error()) + } + if args.treeid == 0 { + return 0, fmt.Errorf("Invalid qgroup id for %s: 0", dir) + } + + return uint64(args.treeid), nil +} + +func (d *Driver) subvolumesDir() string { + return path.Join(d.home, "subvolumes") +} + +func (d *Driver) subvolumesDirID(id string) string { + return path.Join(d.subvolumesDir(), id) +} + +func (d *Driver) quotasDir() string { + return path.Join(d.home, "quotas") +} + +func (d *Driver) quotasDirID(id string) string { + return path.Join(d.quotasDir(), id) +} + +// CreateFromTemplate creates a layer with the same contents and parent as another layer. +func (d *Driver) CreateFromTemplate(id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *graphdriver.CreateOpts, readWrite bool) error { + return d.Create(id, template, opts) +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + return d.Create(id, parent, opts) +} + +// Create the filesystem with given id. +func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { + quotas := path.Join(d.home, "quotas") + subvolumes := path.Join(d.home, "subvolumes") + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + return err + } + if err := idtools.MkdirAllAs(subvolumes, 0700, rootUID, rootGID); err != nil { + return err + } + if parent == "" { + if err := subvolCreate(subvolumes, id); err != nil { + return err + } + if err := os.Chmod(path.Join(subvolumes, id), defaultPerms); err != nil { + return err + } + } else { + parentDir := d.subvolumesDirID(parent) + st, err := os.Stat(parentDir) + if err != nil { + return err + } + if !st.IsDir() { + return fmt.Errorf("%s: not a directory", parentDir) + } + if err := subvolSnapshot(parentDir, subvolumes, id); err != nil { + return err + } + } + + var storageOpt map[string]string + if opts != nil { + storageOpt = opts.StorageOpt + } + + if _, ok := storageOpt["size"]; ok { + driver := &Driver{} + if err := d.parseStorageOpt(storageOpt, driver); err != nil { + return err + } + + if err := d.setStorageSize(path.Join(subvolumes, id), driver); err != nil { + return err + } + if err := idtools.MkdirAllAs(quotas, 0700, rootUID, rootGID); err != nil { + return err + } + if err := ioutil.WriteFile(path.Join(quotas, id), []byte(fmt.Sprint(driver.options.size)), 0644); err != nil { + return err + } + } + + // if we have a remapped root (user namespaces enabled), change the created snapshot + // dir ownership to match + if rootUID != 0 || rootGID != 0 { + if err := os.Chown(path.Join(subvolumes, id), rootUID, rootGID); err != nil { + return err + } + } + + mountLabel := "" + if opts != nil { + mountLabel = opts.MountLabel + } + + return label.Relabel(path.Join(subvolumes, id), mountLabel, false) +} + +// Parse btrfs storage options +func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) error { + // Read size to change the subvolume disk quota per container + for key, val := range storageOpt { + key := strings.ToLower(key) + switch key { + case "size": + size, err := units.RAMInBytes(val) + if err != nil { + return err + } + driver.options.size = uint64(size) + default: + return fmt.Errorf("Unknown option %s", key) + } + } + + return nil +} + +// Set btrfs storage size +func (d *Driver) setStorageSize(dir string, driver *Driver) error { + if driver.options.size <= 0 { + return fmt.Errorf("btrfs: invalid storage size: %s", units.HumanSize(float64(driver.options.size))) + } + if d.options.minSpace > 0 && driver.options.size < d.options.minSpace { + return fmt.Errorf("btrfs: storage size cannot be less than %s", units.HumanSize(float64(d.options.minSpace))) + } + + if err := d.enableQuota(); err != nil { + return err + } + + if err := subvolLimitQgroup(dir, driver.options.size); err != nil { + return err + } + + return nil +} + +// Remove the filesystem with given id. +func (d *Driver) Remove(id string) error { + dir := d.subvolumesDirID(id) + if _, err := os.Stat(dir); err != nil { + return err + } + quotasDir := d.quotasDirID(id) + if _, err := os.Stat(quotasDir); err == nil { + if err := os.Remove(quotasDir); err != nil { + return err + } + } else if !os.IsNotExist(err) { + return err + } + + // Call updateQuotaStatus() to invoke status update + d.updateQuotaStatus() + + if err := subvolDelete(d.subvolumesDir(), id, d.quotaEnabled); err != nil { + if d.quotaEnabled { + return err + } + // If quota is not enabled, fallback to rmdir syscall to delete subvolumes. + // This would allow unprivileged user to delete their owned subvolumes + // in kernel >= 4.18 without user_subvol_rm_alowed mount option. + } + if err := system.EnsureRemoveAll(dir); err != nil { + return err + } + if err := d.subvolRescanQuota(); err != nil { + return err + } + return nil +} + +// Get the requested filesystem id. +func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) { + dir := d.subvolumesDirID(id) + st, err := os.Stat(dir) + if err != nil { + return "", err + } + switch len(options.Options) { + case 0: + case 1: + if options.Options[0] == "ro" { + // ignore "ro" option + break + } + fallthrough + default: + return "", fmt.Errorf("btrfs driver does not support mount options") + } + + if !st.IsDir() { + return "", fmt.Errorf("%s: not a directory", dir) + } + + if quota, err := ioutil.ReadFile(d.quotasDirID(id)); err == nil { + if size, err := strconv.ParseUint(string(quota), 10, 64); err == nil && size >= d.options.minSpace { + if err := d.enableQuota(); err != nil { + return "", err + } + if err := subvolLimitQgroup(dir, size); err != nil { + return "", err + } + } + } + + return dir, nil +} + +// Put is not implemented for BTRFS as there is no cleanup required for the id. +func (d *Driver) Put(id string) error { + // Get() creates no runtime resources (like e.g. mounts) + // so this doesn't need to do anything. + return nil +} + +// ReadWriteDiskUsage returns the disk usage of the writable directory for the ID. +// For BTRFS, it queries the subvolumes path for this ID. +func (d *Driver) ReadWriteDiskUsage(id string) (*directory.DiskUsage, error) { + return directory.Usage(d.subvolumesDirID(id)) +} + +// Exists checks if the id exists in the filesystem. +func (d *Driver) Exists(id string) bool { + dir := d.subvolumesDirID(id) + _, err := os.Stat(dir) + return err == nil +} + +// AdditionalImageStores returns additional image stores supported by the driver +func (d *Driver) AdditionalImageStores() []string { + return nil +} diff --git a/vendor/github.com/containers/storage/drivers/btrfs/dummy_unsupported.go b/vendor/github.com/containers/storage/drivers/btrfs/dummy_unsupported.go new file mode 100644 index 00000000000..f07088887a1 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/btrfs/dummy_unsupported.go @@ -0,0 +1,3 @@ +// +build !linux !cgo + +package btrfs diff --git a/vendor/github.com/containers/storage/drivers/btrfs/version.go b/vendor/github.com/containers/storage/drivers/btrfs/version.go new file mode 100644 index 00000000000..edd8bdab85e --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/btrfs/version.go @@ -0,0 +1,26 @@ +// +build linux,!btrfs_noversion,cgo + +package btrfs + +/* +#include + +// around version 3.16, they did not define lib version yet +#ifndef BTRFS_LIB_VERSION +#define BTRFS_LIB_VERSION -1 +#endif + +// upstream had removed it, but now it will be coming back +#ifndef BTRFS_BUILD_VERSION +#define BTRFS_BUILD_VERSION "-" +#endif +*/ +import "C" + +func btrfsBuildVersion() string { + return string(C.BTRFS_BUILD_VERSION) +} + +func btrfsLibVersion() int { + return int(C.BTRFS_LIB_VERSION) +} diff --git a/vendor/github.com/containers/storage/drivers/btrfs/version_none.go b/vendor/github.com/containers/storage/drivers/btrfs/version_none.go new file mode 100644 index 00000000000..905e834e354 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/btrfs/version_none.go @@ -0,0 +1,14 @@ +// +build !linux btrfs_noversion !cgo + +package btrfs + +// TODO(vbatts) remove this work-around once supported linux distros are on +// btrfs utilities of >= 3.16.1 + +func btrfsBuildVersion() string { + return "-" +} + +func btrfsLibVersion() int { + return -1 +} diff --git a/vendor/github.com/containers/storage/drivers/chown.go b/vendor/github.com/containers/storage/drivers/chown.go new file mode 100644 index 00000000000..63bfd2d136f --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/chown.go @@ -0,0 +1,133 @@ +package graphdriver + +import ( + "bytes" + "fmt" + "os" + + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/reexec" + "github.com/opencontainers/selinux/pkg/pwalk" +) + +const ( + chownByMapsCmd = "storage-chown-by-maps" +) + +func init() { + reexec.Register(chownByMapsCmd, chownByMapsMain) +} + +func chownByMapsMain() { + if len(os.Args) < 2 { + fmt.Fprintf(os.Stderr, "requires mapping configuration on stdin and directory path") + os.Exit(1) + } + // Read and decode our configuration. + discreteMaps := [4][]idtools.IDMap{} + config := bytes.Buffer{} + if _, err := config.ReadFrom(os.Stdin); err != nil { + fmt.Fprintf(os.Stderr, "error reading configuration: %v", err) + os.Exit(1) + } + if err := json.Unmarshal(config.Bytes(), &discreteMaps); err != nil { + fmt.Fprintf(os.Stderr, "error decoding configuration: %v", err) + os.Exit(1) + } + // Try to chroot. This may not be possible, and on some systems that + // means we just Chdir() to the directory, so from here on we should be + // using relative paths. + if err := chrootOrChdir(os.Args[1]); err != nil { + fmt.Fprintf(os.Stderr, "error chrooting to %q: %v", os.Args[1], err) + os.Exit(1) + } + // Build the mapping objects. + toContainer := idtools.NewIDMappingsFromMaps(discreteMaps[0], discreteMaps[1]) + if len(toContainer.UIDs()) == 0 && len(toContainer.GIDs()) == 0 { + toContainer = nil + } + toHost := idtools.NewIDMappingsFromMaps(discreteMaps[2], discreteMaps[3]) + if len(toHost.UIDs()) == 0 && len(toHost.GIDs()) == 0 { + toHost = nil + } + chown := func(path string, info os.FileInfo, _ error) error { + if path == "." { + return nil + } + return platformLChown(path, info, toHost, toContainer) + } + if err := pwalk.Walk(".", chown); err != nil { + fmt.Fprintf(os.Stderr, "error during chown: %v", err) + os.Exit(1) + } + os.Exit(0) +} + +// ChownPathByMaps walks the filesystem tree, changing the ownership +// information using the toContainer and toHost mappings, using them to replace +// on-disk owner UIDs and GIDs which are "host" values in the first map with +// UIDs and GIDs for "host" values from the second map which correspond to the +// same "container" IDs. +func ChownPathByMaps(path string, toContainer, toHost *idtools.IDMappings) error { + if toContainer == nil { + toContainer = &idtools.IDMappings{} + } + if toHost == nil { + toHost = &idtools.IDMappings{} + } + + config, err := json.Marshal([4][]idtools.IDMap{toContainer.UIDs(), toContainer.GIDs(), toHost.UIDs(), toHost.GIDs()}) + if err != nil { + return err + } + cmd := reexec.Command(chownByMapsCmd, path) + cmd.Stdin = bytes.NewReader(config) + output, err := cmd.CombinedOutput() + if len(output) > 0 && err != nil { + return fmt.Errorf("%v: %s", err, string(output)) + } + if err != nil { + return err + } + if len(output) > 0 { + return fmt.Errorf("%s", string(output)) + } + + return nil +} + +type naiveLayerIDMapUpdater struct { + ProtoDriver +} + +// NewNaiveLayerIDMapUpdater wraps the ProtoDriver in a LayerIDMapUpdater that +// uses ChownPathByMaps to update the ownerships in a layer's filesystem tree. +func NewNaiveLayerIDMapUpdater(driver ProtoDriver) LayerIDMapUpdater { + return &naiveLayerIDMapUpdater{ProtoDriver: driver} +} + +// UpdateLayerIDMap walks the layer's filesystem tree, changing the ownership +// information using the toContainer and toHost mappings, using them to replace +// on-disk owner UIDs and GIDs which are "host" values in the first map with +// UIDs and GIDs for "host" values from the second map which correspond to the +// same "container" IDs. +func (n *naiveLayerIDMapUpdater) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) error { + driver := n.ProtoDriver + options := MountOpts{ + MountLabel: mountLabel, + } + layerFs, err := driver.Get(id, options) + if err != nil { + return err + } + defer func() { + driver.Put(id) + }() + + return ChownPathByMaps(layerFs, toContainer, toHost) +} + +// SupportsShifting tells whether the driver support shifting of the UIDs/GIDs in an userNS +func (n *naiveLayerIDMapUpdater) SupportsShifting() bool { + return false +} diff --git a/vendor/github.com/containers/storage/drivers/chown_unix.go b/vendor/github.com/containers/storage/drivers/chown_unix.go new file mode 100644 index 00000000000..70633ac8b46 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/chown_unix.go @@ -0,0 +1,75 @@ +// +build !windows + +package graphdriver + +import ( + "errors" + "fmt" + "os" + "syscall" + + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/system" +) + +func platformLChown(path string, info os.FileInfo, toHost, toContainer *idtools.IDMappings) error { + st, ok := info.Sys().(*syscall.Stat_t) + if !ok { + return nil + } + // Map an on-disk UID/GID pair from host to container + // using the first map, then back to the host using the + // second map. Skip that first step if they're 0, to + // compensate for cases where a parent layer should + // have had a mapped value, but didn't. + uid, gid := int(st.Uid), int(st.Gid) + if toContainer != nil { + pair := idtools.IDPair{ + UID: uid, + GID: gid, + } + mappedUID, mappedGID, err := toContainer.ToContainer(pair) + if err != nil { + if (uid != 0) || (gid != 0) { + return fmt.Errorf("error mapping host ID pair %#v for %q to container: %v", pair, path, err) + } + mappedUID, mappedGID = uid, gid + } + uid, gid = mappedUID, mappedGID + } + if toHost != nil { + pair := idtools.IDPair{ + UID: uid, + GID: gid, + } + mappedPair, err := toHost.ToHost(pair) + if err != nil { + return fmt.Errorf("error mapping container ID pair %#v for %q to host: %v", pair, path, err) + } + uid, gid = mappedPair.UID, mappedPair.GID + } + if uid != int(st.Uid) || gid != int(st.Gid) { + cap, err := system.Lgetxattr(path, "security.capability") + if err != nil && !errors.Is(err, system.EOPNOTSUPP) && !errors.Is(err, system.EOVERFLOW) && err != system.ErrNotSupportedPlatform { + return fmt.Errorf("%s: %v", os.Args[0], err) + } + + // Make the change. + if err := system.Lchown(path, uid, gid); err != nil { + return fmt.Errorf("%s: %v", os.Args[0], err) + } + // Restore the SUID and SGID bits if they were originally set. + if (info.Mode()&os.ModeSymlink == 0) && info.Mode()&(os.ModeSetuid|os.ModeSetgid) != 0 { + if err := system.Chmod(path, info.Mode()); err != nil { + return fmt.Errorf("%s: %v", os.Args[0], err) + } + } + if cap != nil { + if err := system.Lsetxattr(path, "security.capability", cap, 0); err != nil { + return fmt.Errorf("%s: %v", os.Args[0], err) + } + } + + } + return nil +} diff --git a/vendor/github.com/containers/storage/drivers/chown_windows.go b/vendor/github.com/containers/storage/drivers/chown_windows.go new file mode 100644 index 00000000000..31bd5bb52dd --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/chown_windows.go @@ -0,0 +1,14 @@ +// +build windows + +package graphdriver + +import ( + "os" + "syscall" + + "github.com/containers/storage/pkg/idtools" +) + +func platformLChown(path string, info os.FileInfo, toHost, toContainer *idtools.IDMappings) error { + return &os.PathError{"lchown", path, syscall.EWINDOWS} +} diff --git a/vendor/github.com/containers/storage/drivers/chroot_unix.go b/vendor/github.com/containers/storage/drivers/chroot_unix.go new file mode 100644 index 00000000000..c8c4905bfee --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/chroot_unix.go @@ -0,0 +1,21 @@ +// +build linux darwin freebsd solaris + +package graphdriver + +import ( + "fmt" + "os" + "syscall" +) + +// chrootOrChdir() is either a chdir() to the specified path, or a chroot() to the +// specified path followed by chdir() to the new root directory +func chrootOrChdir(path string) error { + if err := syscall.Chroot(path); err != nil { + return fmt.Errorf("error chrooting to %q: %v", path, err) + } + if err := syscall.Chdir(string(os.PathSeparator)); err != nil { + return fmt.Errorf("error changing to %q: %v", path, err) + } + return nil +} diff --git a/vendor/github.com/containers/storage/drivers/chroot_windows.go b/vendor/github.com/containers/storage/drivers/chroot_windows.go new file mode 100644 index 00000000000..f4dc22a9615 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/chroot_windows.go @@ -0,0 +1,15 @@ +package graphdriver + +import ( + "fmt" + "syscall" +) + +// chrootOrChdir() is either a chdir() to the specified path, or a chroot() to the +// specified path followed by chdir() to the new root directory +func chrootOrChdir(path string) error { + if err := syscall.Chdir(path); err != nil { + return fmt.Errorf("error changing to %q: %v", path, err) + } + return nil +} diff --git a/vendor/github.com/containers/storage/drivers/copy/copy_linux.go b/vendor/github.com/containers/storage/drivers/copy/copy_linux.go new file mode 100644 index 00000000000..7773844f916 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/copy/copy_linux.go @@ -0,0 +1,306 @@ +// +build cgo + +package copy + +/* +#include + +#ifndef FICLONE +#define FICLONE _IOW(0x94, 9, int) +#endif +*/ +import "C" +import ( + "container/list" + "errors" + "fmt" + "io" + "net" + "os" + "path/filepath" + "strings" + "syscall" + "time" + + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/pools" + "github.com/containers/storage/pkg/system" + "github.com/containers/storage/pkg/unshare" + "github.com/opencontainers/runc/libcontainer/userns" + "golang.org/x/sys/unix" +) + +// Mode indicates whether to use hardlink or copy content +type Mode int + +const ( + // Content creates a new file, and copies the content of the file + Content Mode = iota + // Hardlink creates a new hardlink to the existing file + Hardlink +) + +// CopyRegularToFile copies the content of a file to another +func CopyRegularToFile(srcPath string, dstFile *os.File, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error { // nolint: golint + srcFile, err := os.Open(srcPath) + if err != nil { + return err + } + defer srcFile.Close() + + if *copyWithFileClone { + _, _, err = unix.Syscall(unix.SYS_IOCTL, dstFile.Fd(), C.FICLONE, srcFile.Fd()) + if err == nil { + return nil + } + + *copyWithFileClone = false + if err == unix.EXDEV { + *copyWithFileRange = false + } + } + if *copyWithFileRange { + err = doCopyWithFileRange(srcFile, dstFile, fileinfo) + // Trying the file_clone may not have caught the exdev case + // as the ioctl may not have been available (therefore EINVAL) + if err == unix.EXDEV || err == unix.ENOSYS { + *copyWithFileRange = false + } else { + return err + } + } + return legacyCopy(srcFile, dstFile) +} + +// CopyRegular copies the content of a file to another +func CopyRegular(srcPath, dstPath string, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error { // nolint: golint + // If the destination file already exists, we shouldn't blow it away + dstFile, err := os.OpenFile(dstPath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, fileinfo.Mode()) + if err != nil { + return err + } + defer dstFile.Close() + + return CopyRegularToFile(srcPath, dstFile, fileinfo, copyWithFileRange, copyWithFileClone) +} + +func doCopyWithFileRange(srcFile, dstFile *os.File, fileinfo os.FileInfo) error { + amountLeftToCopy := fileinfo.Size() + + for amountLeftToCopy > 0 { + n, err := unix.CopyFileRange(int(srcFile.Fd()), nil, int(dstFile.Fd()), nil, int(amountLeftToCopy), 0) + if err != nil { + return err + } + + amountLeftToCopy = amountLeftToCopy - int64(n) + } + + return nil +} + +func legacyCopy(srcFile io.Reader, dstFile io.Writer) error { + _, err := pools.Copy(dstFile, srcFile) + + return err +} + +func copyXattr(srcPath, dstPath, attr string) error { + data, err := system.Lgetxattr(srcPath, attr) + if err != nil && !errors.Is(err, unix.EOPNOTSUPP) { + return err + } + if data != nil { + if err := system.Lsetxattr(dstPath, attr, data, 0); err != nil { + return err + } + } + return nil +} + +type fileID struct { + dev uint64 + ino uint64 +} + +type dirMtimeInfo struct { + dstPath *string + stat *syscall.Stat_t +} + +// DirCopy copies or hardlinks the contents of one directory to another, +// properly handling xattrs, and soft links +// +// Copying xattrs can be opted out of by passing false for copyXattrs. +func DirCopy(srcDir, dstDir string, copyMode Mode, copyXattrs bool) error { + copyWithFileRange := true + copyWithFileClone := true + + // This is a map of source file inodes to dst file paths + copiedFiles := make(map[fileID]string) + + dirsToSetMtimes := list.New() + err := filepath.Walk(srcDir, func(srcPath string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + relPath, err := filepath.Rel(srcDir, srcPath) + if err != nil { + return err + } + + dstPath := filepath.Join(dstDir, relPath) + stat, ok := f.Sys().(*syscall.Stat_t) + if !ok { + return fmt.Errorf("Unable to get raw syscall.Stat_t data for %s", srcPath) + } + + isHardlink := false + + switch mode := f.Mode(); { + case mode.IsRegular(): + id := fileID{dev: uint64(stat.Dev), ino: stat.Ino} + if copyMode == Hardlink { + isHardlink = true + if err2 := os.Link(srcPath, dstPath); err2 != nil { + return err2 + } + } else if hardLinkDstPath, ok := copiedFiles[id]; ok { + if err2 := os.Link(hardLinkDstPath, dstPath); err2 != nil { + return err2 + } + } else { + if err2 := CopyRegular(srcPath, dstPath, f, ©WithFileRange, ©WithFileClone); err2 != nil { + return err2 + } + copiedFiles[id] = dstPath + } + + case mode.IsDir(): + if err := os.Mkdir(dstPath, f.Mode()); err != nil && !os.IsExist(err) { + return err + } + + case mode&os.ModeSymlink != 0: + link, err := os.Readlink(srcPath) + if err != nil { + return err + } + + if err := os.Symlink(link, dstPath); err != nil { + return err + } + + case mode&os.ModeNamedPipe != 0: + if err := unix.Mkfifo(dstPath, stat.Mode); err != nil { + return err + } + + case mode&os.ModeSocket != 0: + s, err := net.Listen("unix", dstPath) + if err != nil { + return err + } + s.Close() + + case mode&os.ModeDevice != 0: + if userns.RunningInUserNS() { + // cannot create a device if running in user namespace + return nil + } + if err := unix.Mknod(dstPath, stat.Mode, int(stat.Rdev)); err != nil { + return err + } + + default: + return fmt.Errorf("unknown file type with mode %v for %s", mode, srcPath) + } + + // Everything below is copying metadata from src to dst. All this metadata + // already shares an inode for hardlinks. + if isHardlink { + return nil + } + + if err := idtools.SafeLchown(dstPath, int(stat.Uid), int(stat.Gid)); err != nil { + return err + } + + if copyXattrs { + if err := doCopyXattrs(srcPath, dstPath); err != nil { + return err + } + } + + isSymlink := f.Mode()&os.ModeSymlink != 0 + + // There is no LChmod, so ignore mode for symlink. Also, this + // must happen after chown, as that can modify the file mode + if !isSymlink { + if err := os.Chmod(dstPath, f.Mode()); err != nil { + return err + } + } + + // system.Chtimes doesn't support a NOFOLLOW flag atm + // nolint: unconvert + if f.IsDir() { + dirsToSetMtimes.PushFront(&dirMtimeInfo{dstPath: &dstPath, stat: stat}) + } else if !isSymlink { + aTime := time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) + mTime := time.Unix(int64(stat.Mtim.Sec), int64(stat.Mtim.Nsec)) + if err := system.Chtimes(dstPath, aTime, mTime); err != nil { + return err + } + } else { + ts := []syscall.Timespec{stat.Atim, stat.Mtim} + if err := system.LUtimesNano(dstPath, ts); err != nil { + return err + } + } + return nil + }) + if err != nil { + return err + } + for e := dirsToSetMtimes.Front(); e != nil; e = e.Next() { + mtimeInfo := e.Value.(*dirMtimeInfo) + ts := []syscall.Timespec{mtimeInfo.stat.Atim, mtimeInfo.stat.Mtim} + if err := system.LUtimesNano(*mtimeInfo.dstPath, ts); err != nil { + return err + } + } + + return nil +} + +func doCopyXattrs(srcPath, dstPath string) error { + if err := copyXattr(srcPath, dstPath, "security.capability"); err != nil { + return err + } + + xattrs, err := system.Llistxattr(srcPath) + if err != nil && !errors.Is(err, unix.EOPNOTSUPP) { + return err + } + + for _, key := range xattrs { + if strings.HasPrefix(key, "user.") { + if err := copyXattr(srcPath, dstPath, key); err != nil { + return err + } + } + } + + if unshare.IsRootless() { + return nil + } + + // We need to copy this attribute if it appears in an overlay upper layer, as + // this function is used to copy those. It is set by overlay if a directory + // is removed and then re-created and should not inherit anything from the + // same dir in the lower dir. + return copyXattr(srcPath, dstPath, "trusted.overlay.opaque") +} diff --git a/vendor/github.com/containers/storage/drivers/copy/copy_unsupported.go b/vendor/github.com/containers/storage/drivers/copy/copy_unsupported.go new file mode 100644 index 00000000000..e97523c3578 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/copy/copy_unsupported.go @@ -0,0 +1,40 @@ +// +build !linux !cgo + +package copy + +import ( + "io" + "os" + + "github.com/containers/storage/pkg/chrootarchive" +) + +// Mode indicates whether to use hardlink or copy content +type Mode int + +const ( + // Content creates a new file, and copies the content of the file + Content Mode = iota +) + +// DirCopy copies or hardlinks the contents of one directory to another, +// properly handling soft links +func DirCopy(srcDir, dstDir string, _ Mode, _ bool) error { + return chrootarchive.NewArchiver(nil).CopyWithTar(srcDir, dstDir) +} + +// CopyRegularToFile copies the content of a file to another +func CopyRegularToFile(srcPath string, dstFile *os.File, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error { + f, err := os.Open(srcPath) + if err != nil { + return err + } + defer f.Close() + _, err = io.Copy(dstFile, f) + return err +} + +// CopyRegular copies the content of a file to another +func CopyRegular(srcPath, dstPath string, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error { + return chrootarchive.NewArchiver(nil).CopyWithTar(srcPath, dstPath) +} diff --git a/vendor/github.com/containers/storage/drivers/counter.go b/vendor/github.com/containers/storage/drivers/counter.go new file mode 100644 index 00000000000..3fc45495b2e --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/counter.go @@ -0,0 +1,63 @@ +package graphdriver + +import "sync" + +type minfo struct { + check bool + count int +} + +// RefCounter is a generic counter for use by graphdriver Get/Put calls +type RefCounter struct { + counts map[string]*minfo + mu sync.Mutex + checker Checker +} + +// NewRefCounter returns a new RefCounter +func NewRefCounter(c Checker) *RefCounter { + return &RefCounter{ + checker: c, + counts: make(map[string]*minfo), + } +} + +// Increment increases the ref count for the given id and returns the current count +func (c *RefCounter) Increment(path string) int { + return c.incdec(path, func(minfo *minfo) { + minfo.count++ + }) +} + +// Decrement decreases the ref count for the given id and returns the current count +func (c *RefCounter) Decrement(path string) int { + return c.incdec(path, func(minfo *minfo) { + minfo.count-- + }) +} + +func (c *RefCounter) incdec(path string, infoOp func(minfo *minfo)) int { + c.mu.Lock() + m := c.counts[path] + if m == nil { + m = &minfo{} + c.counts[path] = m + } + // if we are checking this path for the first time check to make sure + // if it was already mounted on the system and make sure we have a correct ref + // count if it is mounted as it is in use. + if !m.check { + m.check = true + if c.checker.IsMounted(path) { + m.count++ + } + } else if !c.checker.IsMounted(path) { + // if the unmount was performed outside of this process (e.g. conmon cleanup) + //the ref counter would lose track of it. Check if it is still mounted. + m.count = 0 + } + infoOp(m) + count := m.count + c.mu.Unlock() + return count +} diff --git a/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go b/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go new file mode 100644 index 00000000000..c23097b7635 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go @@ -0,0 +1,248 @@ +// +build linux,cgo + +package devmapper + +import ( + "bufio" + "bytes" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "strings" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +type directLVMConfig struct { + Device string + ThinpPercent uint64 + ThinpMetaPercent uint64 + AutoExtendPercent uint64 + AutoExtendThreshold uint64 + MetaDataSize string +} + +var ( + errThinpPercentMissing = errors.New("must set both `dm.thinp_percent` and `dm.thinp_metapercent` if either is specified") + errThinpPercentTooBig = errors.New("combined `dm.thinp_percent` and `dm.thinp_metapercent` must not be greater than 100") + errMissingSetupDevice = errors.New("must provide device path in `dm.directlvm_device` in order to configure direct-lvm") +) + +func validateLVMConfig(cfg directLVMConfig) error { + if cfg.Device == "" { + return errMissingSetupDevice + } + if (cfg.ThinpPercent > 0 && cfg.ThinpMetaPercent == 0) || cfg.ThinpMetaPercent > 0 && cfg.ThinpPercent == 0 { + return errThinpPercentMissing + } + + if cfg.ThinpPercent+cfg.ThinpMetaPercent > 100 { + return errThinpPercentTooBig + } + return nil +} + +func checkDevAvailable(dev string) error { + lvmScan, err := exec.LookPath("lvmdiskscan") + if err != nil { + logrus.Debug("could not find lvmdiskscan") + return nil + } + + out, err := exec.Command(lvmScan).CombinedOutput() + if err != nil { + logrus.WithError(err).Error(string(out)) + return nil + } + + if !bytes.Contains(out, []byte(dev)) { + return errors.Errorf("%s is not available for use with devicemapper", dev) + } + return nil +} + +func checkDevInVG(dev string) error { + pvDisplay, err := exec.LookPath("pvdisplay") + if err != nil { + logrus.Debug("could not find pvdisplay") + return nil + } + + out, err := exec.Command(pvDisplay, dev).CombinedOutput() + if err != nil { + logrus.WithError(err).Error(string(out)) + return nil + } + + scanner := bufio.NewScanner(bytes.NewReader(bytes.TrimSpace(out))) + for scanner.Scan() { + fields := strings.SplitAfter(strings.TrimSpace(scanner.Text()), "VG Name") + if len(fields) > 1 { + // got "VG Name" line" + vg := strings.TrimSpace(fields[1]) + if len(vg) > 0 { + return errors.Errorf("%s is already part of a volume group %q: must remove this device from any volume group or provide a different device", dev, vg) + } + logrus.Error(fields) + break + } + } + return nil +} + +func checkDevHasFS(dev string) error { + blkid, err := exec.LookPath("blkid") + if err != nil { + logrus.Debug("could not find blkid") + return nil + } + + out, err := exec.Command(blkid, dev).CombinedOutput() + if err != nil { + logrus.WithError(err).Error(string(out)) + return nil + } + + fields := bytes.Fields(out) + for _, f := range fields { + kv := bytes.Split(f, []byte{'='}) + if bytes.Equal(kv[0], []byte("TYPE")) { + v := bytes.Trim(kv[1], "\"") + if len(v) > 0 { + return errors.Errorf("%s has a filesystem already, use dm.directlvm_device_force=true if you want to wipe the device", dev) + } + return nil + } + } + return nil +} + +func verifyBlockDevice(dev string, force bool) error { + absPath, err := filepath.Abs(dev) + if err != nil { + return errors.Errorf("unable to get absolute path for %s: %s", dev, err) + } + realPath, err := filepath.EvalSymlinks(absPath) + if err != nil { + return errors.Errorf("failed to canonicalise path for %s: %s", dev, err) + } + if err := checkDevAvailable(absPath); err != nil { + logrus.Infof("block device '%s' not available, checking '%s'", absPath, realPath) + if err := checkDevAvailable(realPath); err != nil { + return errors.Errorf("neither '%s' nor '%s' are in the output of lvmdiskscan, can't use device.", absPath, realPath) + } + } + if err := checkDevInVG(realPath); err != nil { + return err + } + + if force { + return nil + } + + if err := checkDevHasFS(realPath); err != nil { + return err + } + return nil +} + +func readLVMConfig(root string) (directLVMConfig, error) { + var cfg directLVMConfig + + p := filepath.Join(root, "setup-config.json") + b, err := ioutil.ReadFile(p) + if err != nil { + if os.IsNotExist(err) { + return cfg, nil + } + return cfg, errors.Wrap(err, "error reading existing setup config") + } + + // check if this is just an empty file, no need to produce a json error later if so + if len(b) == 0 { + return cfg, nil + } + + err = json.Unmarshal(b, &cfg) + return cfg, errors.Wrap(err, "error unmarshaling previous device setup config") +} + +func writeLVMConfig(root string, cfg directLVMConfig) error { + p := filepath.Join(root, "setup-config.json") + b, err := json.Marshal(cfg) + if err != nil { + return errors.Wrap(err, "error marshalling direct lvm config") + } + err = ioutil.WriteFile(p, b, 0600) + return errors.Wrap(err, "error writing direct lvm config to file") +} + +func setupDirectLVM(cfg directLVMConfig) error { + lvmProfileDir := "/etc/lvm/profile" + binaries := []string{"pvcreate", "vgcreate", "lvcreate", "lvconvert", "lvchange", "thin_check"} + + for _, bin := range binaries { + if _, err := exec.LookPath(bin); err != nil { + return errors.Wrap(err, "error looking up command `"+bin+"` while setting up direct lvm") + } + } + + err := os.MkdirAll(lvmProfileDir, 0755) + if err != nil { + return errors.Wrap(err, "error creating lvm profile directory") + } + + if cfg.AutoExtendPercent == 0 { + cfg.AutoExtendPercent = 20 + } + + if cfg.AutoExtendThreshold == 0 { + cfg.AutoExtendThreshold = 80 + } + + if cfg.ThinpPercent == 0 { + cfg.ThinpPercent = 95 + } + if cfg.ThinpMetaPercent == 0 { + cfg.ThinpMetaPercent = 1 + } + if cfg.MetaDataSize == "" { + cfg.MetaDataSize = "128k" + } + + out, err := exec.Command("pvcreate", "--metadatasize", cfg.MetaDataSize, "-f", cfg.Device).CombinedOutput() + if err != nil { + return errors.Wrap(err, string(out)) + } + + out, err = exec.Command("vgcreate", "storage", cfg.Device).CombinedOutput() + if err != nil { + return errors.Wrap(err, string(out)) + } + + out, err = exec.Command("lvcreate", "--wipesignatures", "y", "-n", "thinpool", "storage", "--extents", fmt.Sprintf("%d%%VG", cfg.ThinpPercent)).CombinedOutput() + if err != nil { + return errors.Wrap(err, string(out)) + } + out, err = exec.Command("lvcreate", "--wipesignatures", "y", "-n", "thinpoolmeta", "storage", "--extents", fmt.Sprintf("%d%%VG", cfg.ThinpMetaPercent)).CombinedOutput() + if err != nil { + return errors.Wrap(err, string(out)) + } + + out, err = exec.Command("lvconvert", "-y", "--zero", "n", "-c", "512K", "--thinpool", "storage/thinpool", "--poolmetadata", "storage/thinpoolmeta").CombinedOutput() + if err != nil { + return errors.Wrap(err, string(out)) + } + + profile := fmt.Sprintf("activation{\nthin_pool_autoextend_threshold=%d\nthin_pool_autoextend_percent=%d\n}", cfg.AutoExtendThreshold, cfg.AutoExtendPercent) + err = ioutil.WriteFile(lvmProfileDir+"/storage-thinpool.profile", []byte(profile), 0600) + if err != nil { + return errors.Wrap(err, "error writing storage thinp autoextend profile") + } + + out, err = exec.Command("lvchange", "--metadataprofile", "storage-thinpool", "storage/thinpool").CombinedOutput() + return errors.Wrap(err, string(out)) +} diff --git a/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go b/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go new file mode 100644 index 00000000000..c5168bfdd25 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go @@ -0,0 +1,2869 @@ +// +build linux,cgo + +package devmapper + +import ( + "bufio" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "reflect" + "strconv" + "strings" + "sync" + "time" + + graphdriver "github.com/containers/storage/drivers" + "github.com/containers/storage/pkg/devicemapper" + "github.com/containers/storage/pkg/dmesg" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/loopback" + "github.com/containers/storage/pkg/mount" + "github.com/containers/storage/pkg/parsers" + "github.com/containers/storage/pkg/parsers/kernel" + units "github.com/docker/go-units" + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +var ( + defaultDataLoopbackSize int64 = 100 * 1024 * 1024 * 1024 + defaultMetaDataLoopbackSize int64 = 2 * 1024 * 1024 * 1024 + defaultBaseFsSize uint64 = 10 * 1024 * 1024 * 1024 + defaultThinpBlockSize uint32 = 128 // 64K = 128 512b sectors + defaultUdevSyncOverride = false + maxDeviceID = 0xffffff // 24 bit, pool limit + deviceIDMapSz = (maxDeviceID + 1) / 8 + driverDeferredRemovalSupport = false + enableDeferredRemoval = false + enableDeferredDeletion = false + userBaseSize = false + defaultMinFreeSpacePercent uint32 = 10 + lvmSetupConfigForce bool +) + +const ( + deviceSetMetaFile = "deviceset-metadata" + transactionMetaFile = "transaction-metadata" + xfs = "xfs" + ext4 = "ext4" + base = "base" +) + +type transaction struct { + OpenTransactionID uint64 `json:"open_transaction_id"` + DeviceIDHash string `json:"device_hash"` + DeviceID int `json:"device_id"` +} + +type devInfo struct { + Hash string `json:"-"` + DeviceID int `json:"device_id"` + Size uint64 `json:"size"` + TransactionID uint64 `json:"transaction_id"` + Initialized bool `json:"initialized"` + Deleted bool `json:"deleted"` + devices *DeviceSet + + // The global DeviceSet lock guarantees that we serialize all + // the calls to libdevmapper (which is not threadsafe), but we + // sometimes release that lock while sleeping. In that case + // this per-device lock is still held, protecting against + // other accesses to the device that we're doing the wait on. + // + // WARNING: In order to avoid AB-BA deadlocks when releasing + // the global lock while holding the per-device locks all + // device locks must be acquired *before* the device lock, and + // multiple device locks should be acquired parent before child. + lock sync.Mutex +} + +type metaData struct { + Devices map[string]*devInfo `json:"Devices"` +} + +// DeviceSet holds information about list of devices +type DeviceSet struct { + metaData `json:"-"` + sync.Mutex `json:"-"` // Protects all fields of DeviceSet and serializes calls into libdevmapper + root string + devicePrefix string + TransactionID uint64 `json:"-"` + NextDeviceID int `json:"next_device_id"` + deviceIDMap []byte + + // Options + dataLoopbackSize int64 + metaDataSize string + metaDataLoopbackSize int64 + baseFsSize uint64 + filesystem string + mountOptions string + mkfsArgs []string + dataDevice string // block or loop dev + dataLoopFile string // loopback file, if used + metadataDevice string // block or loop dev + metadataLoopFile string // loopback file, if used + doBlkDiscard bool + thinpBlockSize uint32 + thinPoolDevice string + transaction `json:"-"` + overrideUdevSyncCheck bool + deferredRemove bool // use deferred removal + deferredDelete bool // use deferred deletion + BaseDeviceUUID string // save UUID of base device + BaseDeviceFilesystem string // save filesystem of base device + nrDeletedDevices uint // number of deleted devices + deletionWorkerTicker *time.Ticker + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + minFreeSpacePercent uint32 //min free space percentage in thinpool + xfsNospaceRetries string // max retries when xfs receives ENOSPC + lvmSetupConfig directLVMConfig +} + +// DiskUsage contains information about disk usage and is used when reporting Status of a device. +type DiskUsage struct { + // Used bytes on the disk. + Used uint64 + // Total bytes on the disk. + Total uint64 + // Available bytes on the disk. + Available uint64 +} + +// Status returns the information about the device. +type Status struct { + // PoolName is the name of the data pool. + PoolName string + // DataFile is the actual block device for data. + DataFile string + // DataLoopback loopback file, if used. + DataLoopback string + // MetadataFile is the actual block device for metadata. + MetadataFile string + // MetadataLoopback is the loopback file, if used. + MetadataLoopback string + // Data is the disk used for data. + Data DiskUsage + // Metadata is the disk used for meta data. + Metadata DiskUsage + // BaseDeviceSize is base size of container and image + BaseDeviceSize uint64 + // BaseDeviceFS is backing filesystem. + BaseDeviceFS string + // SectorSize size of the vector. + SectorSize uint64 + // UdevSyncSupported is true if sync is supported. + UdevSyncSupported bool + // DeferredRemoveEnabled is true then the device is not unmounted. + DeferredRemoveEnabled bool + // True if deferred deletion is enabled. This is different from + // deferred removal. "removal" means that device mapper device is + // deactivated. Thin device is still in thin pool and can be activated + // again. But "deletion" means that thin device will be deleted from + // thin pool and it can't be activated again. + DeferredDeleteEnabled bool + DeferredDeletedDeviceCount uint + MinFreeSpace uint64 +} + +// Structure used to export image/container metadata in inspect. +type deviceMetadata struct { + deviceID int + deviceSize uint64 // size in bytes + deviceName string // Device name as used during activation +} + +// DevStatus returns information about device mounted containing its id, size and sector information. +type DevStatus struct { + // DeviceID is the id of the device. + DeviceID int + // Size is the size of the filesystem. + Size uint64 + // TransactionID is a unique integer per device set used to identify an operation on the file system, this number is incremental. + TransactionID uint64 + // SizeInSectors indicates the size of the sectors allocated. + SizeInSectors uint64 + // MappedSectors indicates number of mapped sectors. + MappedSectors uint64 + // HighestMappedSector is the pointer to the highest mapped sector. + HighestMappedSector uint64 +} + +func getDevName(name string) string { + return "/dev/mapper/" + name +} + +func (info *devInfo) Name() string { + hash := info.Hash + if hash == "" { + hash = base + } + return fmt.Sprintf("%s-%s", info.devices.devicePrefix, hash) +} + +func (info *devInfo) DevName() string { + return getDevName(info.Name()) +} + +func (devices *DeviceSet) loopbackDir() string { + return path.Join(devices.root, "devicemapper") +} + +func (devices *DeviceSet) metadataDir() string { + return path.Join(devices.root, "metadata") +} + +func (devices *DeviceSet) metadataFile(info *devInfo) string { + file := info.Hash + if file == "" { + file = base + } + return path.Join(devices.metadataDir(), file) +} + +func (devices *DeviceSet) transactionMetaFile() string { + return path.Join(devices.metadataDir(), transactionMetaFile) +} + +func (devices *DeviceSet) deviceSetMetaFile() string { + return path.Join(devices.metadataDir(), deviceSetMetaFile) +} + +func (devices *DeviceSet) oldMetadataFile() string { + return path.Join(devices.loopbackDir(), "json") +} + +func (devices *DeviceSet) getPoolName() string { + if devices.thinPoolDevice == "" { + return devices.devicePrefix + "-pool" + } + return devices.thinPoolDevice +} + +func (devices *DeviceSet) getPoolDevName() string { + return getDevName(devices.getPoolName()) +} + +func (devices *DeviceSet) hasImage(name string) bool { + dirname := devices.loopbackDir() + filename := path.Join(dirname, name) + + _, err := os.Stat(filename) + return err == nil +} + +// ensureImage creates a sparse file of bytes at the path +// /devicemapper/. +// If the file already exists and new size is larger than its current size, it grows to the new size. +// Either way it returns the full path. +func (devices *DeviceSet) ensureImage(name string, size int64) (string, error) { + dirname := devices.loopbackDir() + filename := path.Join(dirname, name) + + uid, gid, err := idtools.GetRootUIDGID(devices.uidMaps, devices.gidMaps) + if err != nil { + return "", err + } + if err := idtools.MkdirAllAs(dirname, 0700, uid, gid); err != nil { + return "", err + } + + if fi, err := os.Stat(filename); err != nil { + if !os.IsNotExist(err) { + return "", err + } + logrus.Debugf("devmapper: Creating loopback file %s for device-manage use", filename) + file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0600) + if err != nil { + return "", err + } + defer file.Close() + + if err := file.Truncate(size); err != nil { + return "", err + } + } else { + if fi.Size() < size { + file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0600) + if err != nil { + return "", err + } + defer file.Close() + if err := file.Truncate(size); err != nil { + return "", fmt.Errorf("devmapper: Unable to grow loopback file %s: %v", filename, err) + } + } else if fi.Size() > size { + logrus.Warnf("devmapper: Can't shrink loopback file %s", filename) + } + } + return filename, nil +} + +func (devices *DeviceSet) allocateTransactionID() uint64 { + devices.OpenTransactionID = devices.TransactionID + 1 + return devices.OpenTransactionID +} + +func (devices *DeviceSet) updatePoolTransactionID() error { + if err := devicemapper.SetTransactionID(devices.getPoolDevName(), devices.TransactionID, devices.OpenTransactionID); err != nil { + return fmt.Errorf("devmapper: Error setting devmapper transaction ID: %s", err) + } + devices.TransactionID = devices.OpenTransactionID + return nil +} + +func (devices *DeviceSet) removeMetadata(info *devInfo) error { + if err := os.RemoveAll(devices.metadataFile(info)); err != nil { + return fmt.Errorf("devmapper: Error removing metadata file %s: %s", devices.metadataFile(info), err) + } + return nil +} + +// Given json data and file path, write it to disk +func (devices *DeviceSet) writeMetaFile(jsonData []byte, filePath string) error { + tmpFile, err := ioutil.TempFile(devices.metadataDir(), ".tmp") + if err != nil { + return fmt.Errorf("devmapper: Error creating metadata file: %s", err) + } + + n, err := tmpFile.Write(jsonData) + if err != nil { + return fmt.Errorf("devmapper: Error writing metadata to %s: %s", tmpFile.Name(), err) + } + if n < len(jsonData) { + return io.ErrShortWrite + } + if err := tmpFile.Sync(); err != nil { + return fmt.Errorf("devmapper: Error syncing metadata file %s: %s", tmpFile.Name(), err) + } + if err := tmpFile.Close(); err != nil { + return fmt.Errorf("devmapper: Error closing metadata file %s: %s", tmpFile.Name(), err) + } + if err := os.Rename(tmpFile.Name(), filePath); err != nil { + return fmt.Errorf("devmapper: Error committing metadata file %s: %s", tmpFile.Name(), err) + } + + return nil +} + +func (devices *DeviceSet) saveMetadata(info *devInfo) error { + jsonData, err := json.Marshal(info) + if err != nil { + return fmt.Errorf("devmapper: Error encoding metadata to json: %s", err) + } + if err := devices.writeMetaFile(jsonData, devices.metadataFile(info)); err != nil { + return err + } + return nil +} + +func (devices *DeviceSet) markDeviceIDUsed(deviceID int) { + var mask byte + i := deviceID % 8 + mask = 1 << uint(i) + devices.deviceIDMap[deviceID/8] = devices.deviceIDMap[deviceID/8] | mask +} + +func (devices *DeviceSet) markDeviceIDFree(deviceID int) { + var mask byte + i := deviceID % 8 + mask = ^(1 << uint(i)) + devices.deviceIDMap[deviceID/8] = devices.deviceIDMap[deviceID/8] & mask +} + +func (devices *DeviceSet) isDeviceIDFree(deviceID int) bool { + var mask byte + i := deviceID % 8 + mask = (1 << uint(i)) + return (devices.deviceIDMap[deviceID/8] & mask) == 0 +} + +// Should be called with devices.Lock() held. +func (devices *DeviceSet) lookupDevice(hash string) (*devInfo, error) { + info := devices.Devices[hash] + if info == nil { + info = devices.loadMetadata(hash) + if info == nil { + return nil, fmt.Errorf("devmapper: Unknown device %s", hash) + } + + devices.Devices[hash] = info + } + return info, nil +} + +func (devices *DeviceSet) lookupDeviceWithLock(hash string) (*devInfo, error) { + devices.Lock() + defer devices.Unlock() + info, err := devices.lookupDevice(hash) + return info, err +} + +// This function relies on that device hash map has been loaded in advance. +// Should be called with devices.Lock() held. +func (devices *DeviceSet) constructDeviceIDMap() { + logrus.Debug("devmapper: constructDeviceIDMap()") + defer logrus.Debug("devmapper: constructDeviceIDMap() END") + + for _, info := range devices.Devices { + devices.markDeviceIDUsed(info.DeviceID) + logrus.Debugf("devmapper: Added deviceId=%d to DeviceIdMap", info.DeviceID) + } +} + +func (devices *DeviceSet) deviceFileWalkFunction(path string, finfo os.FileInfo) error { + + // Skip some of the meta files which are not device files. + if strings.HasSuffix(finfo.Name(), ".migrated") { + logrus.Debugf("devmapper: Skipping file %s", path) + return nil + } + + if strings.HasPrefix(finfo.Name(), ".") { + logrus.Debugf("devmapper: Skipping file %s", path) + return nil + } + + if finfo.Name() == deviceSetMetaFile { + logrus.Debugf("devmapper: Skipping file %s", path) + return nil + } + + if finfo.Name() == transactionMetaFile { + logrus.Debugf("devmapper: Skipping file %s", path) + return nil + } + + logrus.Debugf("devmapper: Loading data for file %s", path) + + hash := finfo.Name() + if hash == base { + hash = "" + } + + // Include deleted devices also as cleanup delete device logic + // will go through it and see if there are any deleted devices. + if _, err := devices.lookupDevice(hash); err != nil { + return fmt.Errorf("devmapper: Error looking up device %s:%v", hash, err) + } + + return nil +} + +func (devices *DeviceSet) loadDeviceFilesOnStart() error { + logrus.Debug("devmapper: loadDeviceFilesOnStart()") + defer logrus.Debug("devmapper: loadDeviceFilesOnStart() END") + + var scan = func(path string, info os.FileInfo, err error) error { + if err != nil { + logrus.Debugf("devmapper: Can't walk the file %s", path) + return nil + } + + // Skip any directories + if info.IsDir() { + return nil + } + + return devices.deviceFileWalkFunction(path, info) + } + + return filepath.Walk(devices.metadataDir(), scan) +} + +// Should be called with devices.Lock() held. +func (devices *DeviceSet) unregisterDevice(hash string) error { + logrus.Debugf("devmapper: unregisterDevice(%v)", hash) + info := &devInfo{ + Hash: hash, + } + + delete(devices.Devices, hash) + + if err := devices.removeMetadata(info); err != nil { + logrus.Debugf("devmapper: Error removing metadata: %s", err) + return err + } + + return nil +} + +// Should be called with devices.Lock() held. +func (devices *DeviceSet) registerDevice(id int, hash string, size uint64, transactionID uint64) (*devInfo, error) { + logrus.Debugf("devmapper: registerDevice(%v, %v)", id, hash) + info := &devInfo{ + Hash: hash, + DeviceID: id, + Size: size, + TransactionID: transactionID, + Initialized: false, + devices: devices, + } + + devices.Devices[hash] = info + + if err := devices.saveMetadata(info); err != nil { + // Try to remove unused device + delete(devices.Devices, hash) + return nil, err + } + + return info, nil +} + +func (devices *DeviceSet) activateDeviceIfNeeded(info *devInfo, ignoreDeleted bool) error { + logrus.Debugf("devmapper: activateDeviceIfNeeded(%v)", info.Hash) + + if info.Deleted && !ignoreDeleted { + return fmt.Errorf("devmapper: Can't activate device %v as it is marked for deletion", info.Hash) + } + + // Make sure deferred removal on device is canceled, if one was + // scheduled. + if err := devices.cancelDeferredRemovalIfNeeded(info); err != nil { + return fmt.Errorf("devmapper: Device Deferred Removal Cancellation Failed: %s", err) + } + + if devinfo, _ := devicemapper.GetInfo(info.Name()); devinfo != nil && devinfo.Exists != 0 { + return nil + } + + return devicemapper.ActivateDevice(devices.getPoolDevName(), info.Name(), info.DeviceID, info.Size) +} + +// xfsSupported checks if xfs is supported, returns nil if it is, otherwise an error +func xfsSupported() error { + // Make sure mkfs.xfs is available + if _, err := exec.LookPath("mkfs.xfs"); err != nil { + return err // error text is descriptive enough + } + + // Check if kernel supports xfs filesystem or not. + exec.Command("modprobe", xfs).Run() + + f, err := os.Open("/proc/filesystems") + if err != nil { + return errors.Wrapf(err, "error checking for xfs support") + } + defer f.Close() + + s := bufio.NewScanner(f) + for s.Scan() { + if strings.HasSuffix(s.Text(), "\txfs") { + return nil + } + } + + if err := s.Err(); err != nil { + return errors.Wrapf(err, "error checking for xfs support") + } + + return errors.New(`kernel does not support xfs, or "modprobe xfs" failed`) +} + +func determineDefaultFS() string { + err := xfsSupported() + if err == nil { + return xfs + } + + logrus.Warnf("devmapper: XFS is not supported in your system (%v). Defaulting to %s filesystem", ext4, err) + return ext4 +} + +// mkfsOptions tries to figure out whether some additional mkfs options are required +func mkfsOptions(fs string) []string { + if fs == xfs && !kernel.CheckKernelVersion(3, 16, 0) { + // For kernels earlier than 3.16 (and newer xfsutils), + // some xfs features need to be explicitly disabled. + return []string{"-m", "crc=0,finobt=0"} + } + + return []string{} +} + +func (devices *DeviceSet) createFilesystem(info *devInfo) (err error) { + devname := info.DevName() + + if devices.filesystem == "" { + devices.filesystem = determineDefaultFS() + } + if err := devices.saveBaseDeviceFilesystem(devices.filesystem); err != nil { + return err + } + + args := mkfsOptions(devices.filesystem) + args = append(args, devices.mkfsArgs...) + args = append(args, devname) + + logrus.Infof("devmapper: Creating filesystem %s on device %s, mkfs args: %v", devices.filesystem, info.Name(), args) + defer func() { + if err != nil { + logrus.Infof("devmapper: Error while creating filesystem %s on device %s: %v", devices.filesystem, info.Name(), err) + } else { + logrus.Infof("devmapper: Successfully created filesystem %s on device %s", devices.filesystem, info.Name()) + } + }() + + switch devices.filesystem { + case xfs: + err = exec.Command("mkfs.xfs", args...).Run() + case ext4: + err = exec.Command("mkfs.ext4", append([]string{"-E", "nodiscard,lazy_itable_init=0,lazy_journal_init=0"}, args...)...).Run() + if err != nil { + err = exec.Command("mkfs.ext4", append([]string{"-E", "nodiscard,lazy_itable_init=0"}, args...)...).Run() + } + if err != nil { + return err + } + err = exec.Command("tune2fs", append([]string{"-c", "-1", "-i", "0"}, devname)...).Run() + default: + err = fmt.Errorf("devmapper: Unsupported filesystem type %s", devices.filesystem) + } + return +} + +func (devices *DeviceSet) migrateOldMetaData() error { + // Migrate old metadata file + jsonData, err := ioutil.ReadFile(devices.oldMetadataFile()) + if err != nil && !os.IsNotExist(err) { + return err + } + + if jsonData != nil { + m := metaData{Devices: make(map[string]*devInfo)} + + if err := json.Unmarshal(jsonData, &m); err != nil { + return err + } + + for hash, info := range m.Devices { + info.Hash = hash + devices.saveMetadata(info) + } + if err := os.Rename(devices.oldMetadataFile(), devices.oldMetadataFile()+".migrated"); err != nil { + return err + } + + } + + return nil +} + +// Cleanup deleted devices. It assumes that all the devices have been +// loaded in the hash table. +func (devices *DeviceSet) cleanupDeletedDevices() error { + devices.Lock() + + // If there are no deleted devices, there is nothing to do. + if devices.nrDeletedDevices == 0 { + devices.Unlock() + return nil + } + + var deletedDevices []*devInfo + + for _, info := range devices.Devices { + if !info.Deleted { + continue + } + logrus.Debugf("devmapper: Found deleted device %s.", info.Hash) + deletedDevices = append(deletedDevices, info) + } + + // Delete the deleted devices. DeleteDevice() first takes the info lock + // and then devices.Lock(). So drop it to avoid deadlock. + devices.Unlock() + + for _, info := range deletedDevices { + // This will again try deferred deletion. + if err := devices.DeleteDevice(info.Hash, false); err != nil { + logrus.Warnf("devmapper: Deletion of device %s, device_id=%v failed:%v", info.Hash, info.DeviceID, err) + } + } + + return nil +} + +func (devices *DeviceSet) countDeletedDevices() { + for _, info := range devices.Devices { + if !info.Deleted { + continue + } + devices.nrDeletedDevices++ + } +} + +func (devices *DeviceSet) startDeviceDeletionWorker() { + // Deferred deletion is not enabled. Don't do anything. + if !devices.deferredDelete { + return + } + + // Cleanup right away if there are any leaked devices. Note this + // could cause some slowdown for process startup, if there were + // Leaked devices + devices.cleanupDeletedDevices() + logrus.Debug("devmapper: Worker to cleanup deleted devices started") + for range devices.deletionWorkerTicker.C { + devices.cleanupDeletedDevices() + } +} + +func (devices *DeviceSet) initMetaData() error { + devices.Lock() + defer devices.Unlock() + + if err := devices.migrateOldMetaData(); err != nil { + return err + } + + _, transactionID, _, _, _, _, err := devices.poolStatus() + if err != nil { + return err + } + + devices.TransactionID = transactionID + + if err := devices.loadDeviceFilesOnStart(); err != nil { + return fmt.Errorf("devmapper: Failed to load device files:%v", err) + } + + devices.constructDeviceIDMap() + devices.countDeletedDevices() + + if err := devices.processPendingTransaction(); err != nil { + return err + } + + // Start a goroutine to cleanup Deleted Devices + go devices.startDeviceDeletionWorker() + return nil +} + +func (devices *DeviceSet) incNextDeviceID() { + // IDs are 24bit, so wrap around + devices.NextDeviceID = (devices.NextDeviceID + 1) & maxDeviceID +} + +func (devices *DeviceSet) getNextFreeDeviceID() (int, error) { + devices.incNextDeviceID() + for i := 0; i <= maxDeviceID; i++ { + if devices.isDeviceIDFree(devices.NextDeviceID) { + devices.markDeviceIDUsed(devices.NextDeviceID) + return devices.NextDeviceID, nil + } + devices.incNextDeviceID() + } + + return 0, fmt.Errorf("devmapper: Unable to find a free device ID") +} + +func (devices *DeviceSet) poolHasFreeSpace() error { + if devices.minFreeSpacePercent == 0 { + return nil + } + + _, _, dataUsed, dataTotal, metadataUsed, metadataTotal, err := devices.poolStatus() + if err != nil { + return err + } + + minFreeData := (dataTotal * uint64(devices.minFreeSpacePercent)) / 100 + if minFreeData < 1 { + minFreeData = 1 + } + dataFree := dataTotal - dataUsed + if dataFree < minFreeData { + return fmt.Errorf("devmapper: Thin Pool has %v free data blocks which is less than minimum required %v free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior", (dataTotal - dataUsed), minFreeData) + } + + minFreeMetadata := (metadataTotal * uint64(devices.minFreeSpacePercent)) / 100 + if minFreeMetadata < 1 { + minFreeMetadata = 1 + } + + metadataFree := metadataTotal - metadataUsed + if metadataFree < minFreeMetadata { + return fmt.Errorf("devmapper: Thin Pool has %v free metadata blocks which is less than minimum required %v free metadata blocks. Create more free metadata space in thin pool or use dm.min_free_space option to change behavior", (metadataTotal - metadataUsed), minFreeMetadata) + } + + return nil +} + +func (devices *DeviceSet) createRegisterDevice(hash string) (*devInfo, error) { + devices.Lock() + defer devices.Unlock() + + deviceID, err := devices.getNextFreeDeviceID() + if err != nil { + return nil, err + } + + if err := devices.openTransaction(hash, deviceID); err != nil { + logrus.Debugf("devmapper: Error opening transaction hash = %s deviceID = %d", hash, deviceID) + devices.markDeviceIDFree(deviceID) + return nil, err + } + + for { + if err := devicemapper.CreateDevice(devices.getPoolDevName(), deviceID); err != nil { + if devicemapper.DeviceIDExists(err) { + // Device ID already exists. This should not + // happen. Now we have a mechanism to find + // a free device ID. So something is not right. + // Give a warning and continue. + logrus.Errorf("devmapper: Device ID %d exists in pool but it is supposed to be unused", deviceID) + deviceID, err = devices.getNextFreeDeviceID() + if err != nil { + return nil, err + } + // Save new device id into transaction + devices.refreshTransaction(deviceID) + continue + } + logrus.Debugf("devmapper: Error creating device: %s", err) + devices.markDeviceIDFree(deviceID) + return nil, err + } + break + } + + logrus.Debugf("devmapper: Registering device (id %v) with FS size %v", deviceID, devices.baseFsSize) + info, err := devices.registerDevice(deviceID, hash, devices.baseFsSize, devices.OpenTransactionID) + if err != nil { + _ = devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) + devices.markDeviceIDFree(deviceID) + return nil, err + } + + if err := devices.closeTransaction(); err != nil { + devices.unregisterDevice(hash) + devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) + devices.markDeviceIDFree(deviceID) + return nil, err + } + return info, nil +} + +func (devices *DeviceSet) takeSnapshot(hash string, baseInfo *devInfo, size uint64) error { + var ( + devinfo *devicemapper.Info + err error + ) + + if err = devices.poolHasFreeSpace(); err != nil { + return err + } + + if devices.deferredRemove { + devinfo, err = devicemapper.GetInfoWithDeferred(baseInfo.Name()) + if err != nil { + return err + } + if devinfo != nil && devinfo.DeferredRemove != 0 { + err = devices.cancelDeferredRemoval(baseInfo) + if err != nil { + // If Error is ErrEnxio. Device is probably already gone. Continue. + if errors.Cause(err) != devicemapper.ErrEnxio { + return err + } + devinfo = nil + } else { + defer devices.deactivateDevice(baseInfo) + } + } + } else { + devinfo, err = devicemapper.GetInfo(baseInfo.Name()) + if err != nil { + return err + } + } + + doSuspend := devinfo != nil && devinfo.Exists != 0 + + if doSuspend { + if err = devicemapper.SuspendDevice(baseInfo.Name()); err != nil { + return err + } + defer devicemapper.ResumeDevice(baseInfo.Name()) + } + + if err = devices.createRegisterSnapDevice(hash, baseInfo, size); err != nil { + return err + } + + return nil +} + +func (devices *DeviceSet) createRegisterSnapDevice(hash string, baseInfo *devInfo, size uint64) error { + deviceID, err := devices.getNextFreeDeviceID() + if err != nil { + return err + } + + if err := devices.openTransaction(hash, deviceID); err != nil { + logrus.Debugf("devmapper: Error opening transaction hash = %s deviceID = %d", hash, deviceID) + devices.markDeviceIDFree(deviceID) + return err + } + + for { + if err := devicemapper.CreateSnapDeviceRaw(devices.getPoolDevName(), deviceID, baseInfo.DeviceID); err != nil { + if devicemapper.DeviceIDExists(err) { + // Device ID already exists. This should not + // happen. Now we have a mechanism to find + // a free device ID. So something is not right. + // Give a warning and continue. + logrus.Errorf("devmapper: Device ID %d exists in pool but it is supposed to be unused", deviceID) + deviceID, err = devices.getNextFreeDeviceID() + if err != nil { + return err + } + // Save new device id into transaction + devices.refreshTransaction(deviceID) + continue + } + logrus.Debugf("devmapper: Error creating snap device: %s", err) + devices.markDeviceIDFree(deviceID) + return err + } + break + } + + if _, err := devices.registerDevice(deviceID, hash, size, devices.OpenTransactionID); err != nil { + devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) + devices.markDeviceIDFree(deviceID) + logrus.Debugf("devmapper: Error registering device: %s", err) + return err + } + + if err := devices.closeTransaction(); err != nil { + devices.unregisterDevice(hash) + devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) + devices.markDeviceIDFree(deviceID) + return err + } + return nil +} + +func (devices *DeviceSet) loadMetadata(hash string) *devInfo { + info := &devInfo{Hash: hash, devices: devices} + + jsonData, err := ioutil.ReadFile(devices.metadataFile(info)) + if err != nil { + logrus.Debugf("devmapper: Failed to read %s with err: %v", devices.metadataFile(info), err) + return nil + } + + if err := json.Unmarshal(jsonData, &info); err != nil { + logrus.Debugf("devmapper: Failed to unmarshal devInfo from %s with err: %v", devices.metadataFile(info), err) + return nil + } + + if info.DeviceID > maxDeviceID { + logrus.Errorf("devmapper: Ignoring Invalid DeviceId=%d", info.DeviceID) + return nil + } + + return info +} + +func getDeviceUUID(device string) (string, error) { + out, err := exec.Command("blkid", "-s", "UUID", "-o", "value", device).Output() + if err != nil { + return "", fmt.Errorf("devmapper: Failed to find uuid for device %s:%v", device, err) + } + + uuid := strings.TrimSuffix(string(out), "\n") + uuid = strings.TrimSpace(uuid) + logrus.Debugf("devmapper: UUID for device: %s is:%s", device, uuid) + return uuid, nil +} + +func (devices *DeviceSet) getBaseDeviceSize() uint64 { + info, _ := devices.lookupDevice("") + if info == nil { + return 0 + } + return info.Size +} + +func (devices *DeviceSet) getBaseDeviceFS() string { + return devices.BaseDeviceFilesystem +} + +func (devices *DeviceSet) verifyBaseDeviceUUIDFS(baseInfo *devInfo) error { + devices.Lock() + defer devices.Unlock() + + if err := devices.activateDeviceIfNeeded(baseInfo, false); err != nil { + return err + } + defer devices.deactivateDevice(baseInfo) + + uuid, err := getDeviceUUID(baseInfo.DevName()) + if err != nil { + return err + } + + if devices.BaseDeviceUUID != uuid { + return fmt.Errorf("devmapper: Current Base Device UUID:%s does not match with stored UUID:%s. Possibly using a different thin pool than last invocation", uuid, devices.BaseDeviceUUID) + } + + if devices.BaseDeviceFilesystem == "" { + fsType, err := ProbeFsType(baseInfo.DevName()) + if err != nil { + return err + } + if err := devices.saveBaseDeviceFilesystem(fsType); err != nil { + return err + } + } + + // If user specified a filesystem using dm.fs option and current + // file system of base image is not same, warn user that dm.fs + // will be ignored. + if devices.BaseDeviceFilesystem != devices.filesystem { + logrus.Warnf("devmapper: Base device already exists and has filesystem %s on it. User specified filesystem %s will be ignored.", devices.BaseDeviceFilesystem, devices.filesystem) + devices.filesystem = devices.BaseDeviceFilesystem + } + return nil +} + +func (devices *DeviceSet) saveBaseDeviceFilesystem(fs string) error { + devices.BaseDeviceFilesystem = fs + return devices.saveDeviceSetMetaData() +} + +func (devices *DeviceSet) saveBaseDeviceUUID(baseInfo *devInfo) error { + devices.Lock() + defer devices.Unlock() + + if err := devices.activateDeviceIfNeeded(baseInfo, false); err != nil { + return err + } + defer devices.deactivateDevice(baseInfo) + + uuid, err := getDeviceUUID(baseInfo.DevName()) + if err != nil { + return err + } + + devices.BaseDeviceUUID = uuid + return devices.saveDeviceSetMetaData() +} + +func (devices *DeviceSet) createBaseImage() error { + logrus.Debug("devmapper: Initializing base device-mapper thin volume") + + // Create initial device + info, err := devices.createRegisterDevice("") + if err != nil { + return err + } + + logrus.Debug("devmapper: Creating filesystem on base device-mapper thin volume") + + if err := devices.activateDeviceIfNeeded(info, false); err != nil { + return err + } + + if err := devices.createFilesystem(info); err != nil { + return err + } + + info.Initialized = true + if err := devices.saveMetadata(info); err != nil { + info.Initialized = false + return err + } + + if err := devices.saveBaseDeviceUUID(info); err != nil { + return fmt.Errorf("devmapper: Could not query and save base device UUID:%v", err) + } + + return nil +} + +// Returns if thin pool device exists or not. If device exists, also makes +// sure it is a thin pool device and not some other type of device. +func (devices *DeviceSet) thinPoolExists(thinPoolDevice string) (bool, error) { + logrus.Debugf("devmapper: Checking for existence of the pool %s", thinPoolDevice) + + info, err := devicemapper.GetInfo(thinPoolDevice) + if err != nil { + return false, fmt.Errorf("devmapper: GetInfo() on device %s failed: %v", thinPoolDevice, err) + } + + // Device does not exist. + if info.Exists == 0 { + return false, nil + } + + _, _, deviceType, _, err := devicemapper.GetStatus(thinPoolDevice) + if err != nil { + return false, fmt.Errorf("devmapper: GetStatus() on device %s failed: %v", thinPoolDevice, err) + } + + if deviceType != "thin-pool" { + return false, fmt.Errorf("devmapper: Device %s is not a thin pool", thinPoolDevice) + } + + return true, nil +} + +func (devices *DeviceSet) checkThinPool() error { + _, transactionID, dataUsed, _, _, _, err := devices.poolStatus() + if err != nil { + return err + } + if dataUsed != 0 { + return fmt.Errorf("devmapper: Unable to take ownership of thin-pool (%s) that already has used data blocks", + devices.thinPoolDevice) + } + if transactionID != 0 { + return fmt.Errorf("devmapper: Unable to take ownership of thin-pool (%s) with non-zero transaction ID", + devices.thinPoolDevice) + } + return nil +} + +// Base image is initialized properly. Either save UUID for first time (for +// upgrade case or verify UUID. +func (devices *DeviceSet) setupVerifyBaseImageUUIDFS(baseInfo *devInfo) error { + // If BaseDeviceUUID is nil (upgrade case), save it and return success. + if devices.BaseDeviceUUID == "" { + if err := devices.saveBaseDeviceUUID(baseInfo); err != nil { + return fmt.Errorf("devmapper: Could not query and save base device UUID:%v", err) + } + return nil + } + + if err := devices.verifyBaseDeviceUUIDFS(baseInfo); err != nil { + return fmt.Errorf("devmapper: Base Device UUID and Filesystem verification failed: %v", err) + } + + return nil +} + +func (devices *DeviceSet) checkGrowBaseDeviceFS(info *devInfo) error { + + if !userBaseSize { + return nil + } + + if devices.baseFsSize < devices.getBaseDeviceSize() { + return fmt.Errorf("devmapper: Base device size cannot be smaller than %s", units.HumanSize(float64(devices.getBaseDeviceSize()))) + } + + if devices.baseFsSize == devices.getBaseDeviceSize() { + return nil + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + info.Size = devices.baseFsSize + + if err := devices.saveMetadata(info); err != nil { + // Try to remove unused device + delete(devices.Devices, info.Hash) + return err + } + + return devices.growFS(info) +} + +func (devices *DeviceSet) growFS(info *devInfo) error { + if err := devices.activateDeviceIfNeeded(info, false); err != nil { + return fmt.Errorf("Error activating devmapper device: %s", err) + } + + defer devices.deactivateDevice(info) + + fsMountPoint := "/run/containers/storage/mnt" + if _, err := os.Stat(fsMountPoint); os.IsNotExist(err) { + if err := os.MkdirAll(fsMountPoint, 0700); err != nil { + return err + } + defer os.RemoveAll(fsMountPoint) + } + + options := "" + if devices.BaseDeviceFilesystem == xfs { + // XFS needs nouuid or it can't mount filesystems with the same fs + options = joinMountOptions(options, "nouuid") + } + options = joinMountOptions(options, devices.mountOptions) + + if err := mount.Mount(info.DevName(), fsMountPoint, devices.BaseDeviceFilesystem, options); err != nil { + return errors.Wrapf(err, "Failed to mount; dmesg: %s", string(dmesg.Dmesg(256))) + } + + defer func() { + if err := mount.Unmount(fsMountPoint); err != nil { + logrus.Warnf("devmapper.growFS cleanup error: %v", err) + } + }() + + switch devices.BaseDeviceFilesystem { + case ext4: + if out, err := exec.Command("resize2fs", info.DevName()).CombinedOutput(); err != nil { + return fmt.Errorf("Failed to grow rootfs:%v:%s", err, string(out)) + } + case xfs: + if out, err := exec.Command("xfs_growfs", info.DevName()).CombinedOutput(); err != nil { + return fmt.Errorf("Failed to grow rootfs:%v:%s", err, string(out)) + } + default: + return fmt.Errorf("Unsupported filesystem type %s", devices.BaseDeviceFilesystem) + } + return nil +} + +func (devices *DeviceSet) setupBaseImage() error { + oldInfo, _ := devices.lookupDeviceWithLock("") + + // base image already exists. If it is initialized properly, do UUID + // verification and return. Otherwise remove image and set it up + // fresh. + + if oldInfo != nil { + if oldInfo.Initialized && !oldInfo.Deleted { + if err := devices.setupVerifyBaseImageUUIDFS(oldInfo); err != nil { + return err + } + + if err := devices.checkGrowBaseDeviceFS(oldInfo); err != nil { + return err + } + + return nil + } + + logrus.Debug("devmapper: Removing uninitialized base image") + // If previous base device is in deferred delete state, + // that needs to be cleaned up first. So don't try + // deferred deletion. + if err := devices.DeleteDevice("", true); err != nil { + return err + } + } + + // If we are setting up base image for the first time, make sure + // thin pool is empty. + if devices.thinPoolDevice != "" && oldInfo == nil { + if err := devices.checkThinPool(); err != nil { + return err + } + } + + // Create new base image device + if err := devices.createBaseImage(); err != nil { + return err + } + + return nil +} + +func setCloseOnExec(name string) { + fileInfos, _ := ioutil.ReadDir("/proc/self/fd") + for _, i := range fileInfos { + link, _ := os.Readlink(filepath.Join("/proc/self/fd", i.Name())) + if link == name { + fd, err := strconv.Atoi(i.Name()) + if err == nil { + unix.CloseOnExec(fd) + } + } + } +} + +func major(device uint64) uint64 { + return (device >> 8) & 0xfff +} + +func minor(device uint64) uint64 { + return (device & 0xff) | ((device >> 12) & 0xfff00) +} + +// ResizePool increases the size of the pool. +func (devices *DeviceSet) ResizePool(size int64) error { + dirname := devices.loopbackDir() + datafilename := path.Join(dirname, "data") + if len(devices.dataDevice) > 0 { + datafilename = devices.dataDevice + } + metadatafilename := path.Join(dirname, "metadata") + if len(devices.metadataDevice) > 0 { + metadatafilename = devices.metadataDevice + } + + datafile, err := os.OpenFile(datafilename, os.O_RDWR, 0) + if datafile == nil { + return err + } + defer datafile.Close() + + fi, err := datafile.Stat() + if fi == nil { + return err + } + + if fi.Size() > size { + return fmt.Errorf("devmapper: Can't shrink file") + } + + dataloopback := loopback.FindLoopDeviceFor(datafile) + if dataloopback == nil { + return fmt.Errorf("devmapper: Unable to find loopback mount for: %s", datafilename) + } + defer dataloopback.Close() + + metadatafile, err := os.OpenFile(metadatafilename, os.O_RDWR, 0) + if metadatafile == nil { + return err + } + defer metadatafile.Close() + + metadataloopback := loopback.FindLoopDeviceFor(metadatafile) + if metadataloopback == nil { + return fmt.Errorf("devmapper: Unable to find loopback mount for: %s", metadatafilename) + } + defer metadataloopback.Close() + + // Grow loopback file + if err := datafile.Truncate(size); err != nil { + return fmt.Errorf("devmapper: Unable to grow loopback file: %s", err) + } + + // Reload size for loopback device + if err := loopback.SetCapacity(dataloopback); err != nil { + return fmt.Errorf("Unable to update loopback capacity: %s", err) + } + + // Suspend the pool + if err := devicemapper.SuspendDevice(devices.getPoolName()); err != nil { + return fmt.Errorf("devmapper: Unable to suspend pool: %s", err) + } + + // Reload with the new block sizes + if err := devicemapper.ReloadPool(devices.getPoolName(), dataloopback, metadataloopback, devices.thinpBlockSize); err != nil { + return fmt.Errorf("devmapper: Unable to reload pool: %s", err) + } + + // Resume the pool + if err := devicemapper.ResumeDevice(devices.getPoolName()); err != nil { + return fmt.Errorf("devmapper: Unable to resume pool: %s", err) + } + + return nil +} + +func (devices *DeviceSet) loadTransactionMetaData() error { + jsonData, err := ioutil.ReadFile(devices.transactionMetaFile()) + if err != nil { + // There is no active transaction. This will be the case + // during upgrade. + if os.IsNotExist(err) { + devices.OpenTransactionID = devices.TransactionID + return nil + } + return err + } + + json.Unmarshal(jsonData, &devices.transaction) + return nil +} + +func (devices *DeviceSet) saveTransactionMetaData() error { + jsonData, err := json.Marshal(&devices.transaction) + if err != nil { + return fmt.Errorf("devmapper: Error encoding metadata to json: %s", err) + } + + return devices.writeMetaFile(jsonData, devices.transactionMetaFile()) +} + +func (devices *DeviceSet) removeTransactionMetaData() error { + return os.RemoveAll(devices.transactionMetaFile()) +} + +func (devices *DeviceSet) rollbackTransaction() error { + logrus.Debugf("devmapper: Rolling back open transaction: TransactionID=%d hash=%s device_id=%d", devices.OpenTransactionID, devices.DeviceIDHash, devices.DeviceID) + + // A device id might have already been deleted before transaction + // closed. In that case this call will fail. Just leave a message + // in case of failure. + if err := devicemapper.DeleteDevice(devices.getPoolDevName(), devices.DeviceID); err != nil { + logrus.Errorf("devmapper: Unable to delete device: %s", err) + } + + dinfo := &devInfo{Hash: devices.DeviceIDHash} + if err := devices.removeMetadata(dinfo); err != nil { + logrus.Errorf("devmapper: Unable to remove metadata: %s", err) + } else { + devices.markDeviceIDFree(devices.DeviceID) + } + + if err := devices.removeTransactionMetaData(); err != nil { + logrus.Errorf("devmapper: Unable to remove transaction meta file %s: %s", devices.transactionMetaFile(), err) + } + + return nil +} + +func (devices *DeviceSet) processPendingTransaction() error { + if err := devices.loadTransactionMetaData(); err != nil { + return err + } + + // If there was open transaction but pool transaction ID is same + // as open transaction ID, nothing to roll back. + if devices.TransactionID == devices.OpenTransactionID { + return nil + } + + // If open transaction ID is less than pool transaction ID, something + // is wrong. Bail out. + if devices.OpenTransactionID < devices.TransactionID { + logrus.Errorf("devmapper: Open Transaction id %d is less than pool transaction id %d", devices.OpenTransactionID, devices.TransactionID) + return nil + } + + // Pool transaction ID is not same as open transaction. There is + // a transaction which was not completed. + if err := devices.rollbackTransaction(); err != nil { + return fmt.Errorf("devmapper: Rolling back open transaction failed: %s", err) + } + + devices.OpenTransactionID = devices.TransactionID + return nil +} + +func (devices *DeviceSet) loadDeviceSetMetaData() error { + jsonData, err := ioutil.ReadFile(devices.deviceSetMetaFile()) + if err != nil { + // For backward compatibility return success if file does + // not exist. + if os.IsNotExist(err) { + return nil + } + return err + } + + return json.Unmarshal(jsonData, devices) +} + +func (devices *DeviceSet) saveDeviceSetMetaData() error { + jsonData, err := json.Marshal(devices) + if err != nil { + return fmt.Errorf("devmapper: Error encoding metadata to json: %s", err) + } + + return devices.writeMetaFile(jsonData, devices.deviceSetMetaFile()) +} + +func (devices *DeviceSet) openTransaction(hash string, DeviceID int) error { + devices.allocateTransactionID() + devices.DeviceIDHash = hash + devices.DeviceID = DeviceID + if err := devices.saveTransactionMetaData(); err != nil { + return fmt.Errorf("devmapper: Error saving transaction metadata: %s", err) + } + return nil +} + +func (devices *DeviceSet) refreshTransaction(DeviceID int) error { + devices.DeviceID = DeviceID + if err := devices.saveTransactionMetaData(); err != nil { + return fmt.Errorf("devmapper: Error saving transaction metadata: %s", err) + } + return nil +} + +func (devices *DeviceSet) closeTransaction() error { + if err := devices.updatePoolTransactionID(); err != nil { + logrus.Debug("devmapper: Failed to close Transaction") + return err + } + return nil +} + +func determineDriverCapabilities(version string) error { + // Kernel driver version >= 4.27.0 support deferred removal + + logrus.Debugf("devicemapper: kernel dm driver version is %s", version) + + versionSplit := strings.Split(version, ".") + major, err := strconv.Atoi(versionSplit[0]) + if err != nil { + return errors.Wrapf(graphdriver.ErrNotSupported, "unable to parse driver major version %q as a number", versionSplit[0]) + } + + if major > 4 { + driverDeferredRemovalSupport = true + return nil + } + + if major < 4 { + return nil + } + + minor, err := strconv.Atoi(versionSplit[1]) + if err != nil { + return errors.Wrapf(graphdriver.ErrNotSupported, "unable to parse driver minor version %q as a number", versionSplit[1]) + } + + /* + * If major is 4 and minor is 27, then there is no need to + * check for patch level as it can not be less than 0. + */ + if minor >= 27 { + driverDeferredRemovalSupport = true + return nil + } + + return nil +} + +// Determine the major and minor number of loopback device +func getDeviceMajorMinor(file *os.File) (uint64, uint64, error) { + var stat unix.Stat_t + err := unix.Stat(file.Name(), &stat) + if err != nil { + return 0, 0, err + } + + dev := stat.Rdev + majorNum := major(uint64(dev)) + minorNum := minor(uint64(dev)) + + logrus.Debugf("devmapper: Major:Minor for device: %s is:%v:%v", file.Name(), majorNum, minorNum) + return majorNum, minorNum, nil +} + +// Given a file which is backing file of a loop back device, find the +// loopback device name and its major/minor number. +func getLoopFileDeviceMajMin(filename string) (string, uint64, uint64, error) { + file, err := os.Open(filename) + if err != nil { + logrus.Debugf("devmapper: Failed to open file %s", filename) + return "", 0, 0, err + } + + defer file.Close() + loopbackDevice := loopback.FindLoopDeviceFor(file) + if loopbackDevice == nil { + return "", 0, 0, fmt.Errorf("devmapper: Unable to find loopback mount for: %s", filename) + } + defer loopbackDevice.Close() + + Major, Minor, err := getDeviceMajorMinor(loopbackDevice) + if err != nil { + return "", 0, 0, err + } + return loopbackDevice.Name(), Major, Minor, nil +} + +// Get the major/minor numbers of thin pool data and metadata devices +func (devices *DeviceSet) getThinPoolDataMetaMajMin() (uint64, uint64, uint64, uint64, error) { + var params, poolDataMajMin, poolMetadataMajMin string + + _, _, _, params, err := devicemapper.GetTable(devices.getPoolName()) + if err != nil { + return 0, 0, 0, 0, err + } + + if _, err = fmt.Sscanf(params, "%s %s", &poolMetadataMajMin, &poolDataMajMin); err != nil { + return 0, 0, 0, 0, err + } + + logrus.Debugf("devmapper: poolDataMajMin=%s poolMetaMajMin=%s\n", poolDataMajMin, poolMetadataMajMin) + + poolDataMajMinorSplit := strings.Split(poolDataMajMin, ":") + poolDataMajor, err := strconv.ParseUint(poolDataMajMinorSplit[0], 10, 32) + if err != nil { + return 0, 0, 0, 0, err + } + + poolDataMinor, err := strconv.ParseUint(poolDataMajMinorSplit[1], 10, 32) + if err != nil { + return 0, 0, 0, 0, err + } + + poolMetadataMajMinorSplit := strings.Split(poolMetadataMajMin, ":") + poolMetadataMajor, err := strconv.ParseUint(poolMetadataMajMinorSplit[0], 10, 32) + if err != nil { + return 0, 0, 0, 0, err + } + + poolMetadataMinor, err := strconv.ParseUint(poolMetadataMajMinorSplit[1], 10, 32) + if err != nil { + return 0, 0, 0, 0, err + } + + return poolDataMajor, poolDataMinor, poolMetadataMajor, poolMetadataMinor, nil +} + +func (devices *DeviceSet) loadThinPoolLoopBackInfo() error { + poolDataMajor, poolDataMinor, poolMetadataMajor, poolMetadataMinor, err := devices.getThinPoolDataMetaMajMin() + if err != nil { + return err + } + + dirname := devices.loopbackDir() + + // data device has not been passed in. So there should be a data file + // which is being mounted as loop device. + if devices.dataDevice == "" { + datafilename := path.Join(dirname, "data") + dataLoopDevice, dataMajor, dataMinor, err := getLoopFileDeviceMajMin(datafilename) + if err != nil { + return err + } + + // Compare the two + if poolDataMajor == dataMajor && poolDataMinor == dataMinor { + devices.dataDevice = dataLoopDevice + devices.dataLoopFile = datafilename + } + + } + + // metadata device has not been passed in. So there should be a + // metadata file which is being mounted as loop device. + if devices.metadataDevice == "" { + metadatafilename := path.Join(dirname, "metadata") + metadataLoopDevice, metadataMajor, metadataMinor, err := getLoopFileDeviceMajMin(metadatafilename) + if err != nil { + return err + } + if poolMetadataMajor == metadataMajor && poolMetadataMinor == metadataMinor { + devices.metadataDevice = metadataLoopDevice + devices.metadataLoopFile = metadatafilename + } + } + + return nil +} + +func (devices *DeviceSet) enableDeferredRemovalDeletion() error { + + // If user asked for deferred removal then check both libdm library + // and kernel driver support deferred removal otherwise error out. + if enableDeferredRemoval { + if !driverDeferredRemovalSupport { + return fmt.Errorf("devmapper: Deferred removal can not be enabled as kernel does not support it") + } + if !devicemapper.LibraryDeferredRemovalSupport { + return fmt.Errorf("devmapper: Deferred removal can not be enabled as libdm does not support it") + } + logrus.Debug("devmapper: Deferred removal support enabled.") + devices.deferredRemove = true + } + + if enableDeferredDeletion { + if !devices.deferredRemove { + return fmt.Errorf("devmapper: Deferred deletion can not be enabled as deferred removal is not enabled. Enable deferred removal using --storage-opt dm.use_deferred_removal=true parameter") + } + logrus.Debug("devmapper: Deferred deletion support enabled.") + devices.deferredDelete = true + } + return nil +} + +func (devices *DeviceSet) initDevmapper(doInit bool) (retErr error) { + if err := devices.enableDeferredRemovalDeletion(); err != nil { + return err + } + + // https://github.com/docker/docker/issues/4036 + if supported := devicemapper.UdevSetSyncSupport(true); !supported { + logrus.Error("devmapper: Udev sync is not supported. This will lead to data loss and unexpected behavior. Install a more recent version of libdevmapper or select a different storage driver. For more information, see https://docs.docker.com/engine/reference/commandline/dockerd/#storage-driver-options") + + if !devices.overrideUdevSyncCheck { + return graphdriver.ErrNotSupported + } + } + + //create the root dir of the devmapper driver ownership to match this + //daemon's remapped root uid/gid so containers can start properly + uid, gid, err := idtools.GetRootUIDGID(devices.uidMaps, devices.gidMaps) + if err != nil { + return err + } + if err := idtools.MkdirAs(devices.root, 0700, uid, gid); err != nil { + return err + } + if err := os.MkdirAll(devices.metadataDir(), 0700); err != nil { + return err + } + + prevSetupConfig, err := readLVMConfig(devices.root) + if err != nil { + return err + } + + if !reflect.DeepEqual(devices.lvmSetupConfig, directLVMConfig{}) { + if devices.thinPoolDevice != "" { + return errors.New("cannot setup direct-lvm when `dm.thinpooldev` is also specified") + } + + if !reflect.DeepEqual(prevSetupConfig, devices.lvmSetupConfig) { + if !reflect.DeepEqual(prevSetupConfig, directLVMConfig{}) { + return errors.New("changing direct-lvm config is not supported") + } + logrus.WithField("storage-driver", "devicemapper").WithField("direct-lvm-config", devices.lvmSetupConfig).Debugf("Setting up direct lvm mode") + if err := verifyBlockDevice(devices.lvmSetupConfig.Device, lvmSetupConfigForce); err != nil { + return err + } + if err := setupDirectLVM(devices.lvmSetupConfig); err != nil { + return err + } + if err := writeLVMConfig(devices.root, devices.lvmSetupConfig); err != nil { + return err + } + } + devices.thinPoolDevice = "storage-thinpool" + logrus.WithField("storage-driver", "devicemapper").Debugf("Setting dm.thinpooldev to %q", devices.thinPoolDevice) + } + + // Set the device prefix from the device id and inode of the storage root dir + var st unix.Stat_t + if err := unix.Stat(devices.root, &st); err != nil { + return fmt.Errorf("devmapper: Error looking up dir %s: %s", devices.root, err) + } + // "reg-" stands for "regular file". + // In the future we might use "dev-" for "device file", etc. + // container-maj,min[-inode] stands for: + // - Managed by container storage + // - The target of this device is at major and minor + // - If is defined, use that file inside the device as a loopback image. Otherwise use the device itself. + devices.devicePrefix = fmt.Sprintf("container-%d:%d-%d", major(uint64(st.Dev)), minor(uint64(st.Dev)), st.Ino) + logrus.Debugf("devmapper: Generated prefix: %s", devices.devicePrefix) + + // Check for the existence of the thin-pool device + poolExists, err := devices.thinPoolExists(devices.getPoolName()) + if err != nil { + return err + } + + // It seems libdevmapper opens this without O_CLOEXEC, and go exec will not close files + // that are not Close-on-exec, + // so we add this badhack to make sure it closes itself + setCloseOnExec("/dev/mapper/control") + + // Make sure the sparse images exist in /devicemapper/data and + // /devicemapper/metadata + + createdLoopback := false + + // If the pool doesn't exist, create it + if !poolExists && devices.thinPoolDevice == "" { + logrus.Debug("devmapper: Pool doesn't exist. Creating it.") + + var ( + dataFile *os.File + metadataFile *os.File + ) + + fsMagic, err := graphdriver.GetFSMagic(devices.loopbackDir()) + if err != nil { + return err + } + switch fsMagic { + case graphdriver.FsMagicAufs: + return errors.Errorf("devmapper: Loopback devices can not be created on AUFS filesystems") + } + + if devices.dataDevice == "" { + // Make sure the sparse images exist in /devicemapper/data + + hasData := devices.hasImage("data") + + if !doInit && !hasData { + return errors.New("loopback data file not found") + } + + if !hasData { + createdLoopback = true + } + + data, err := devices.ensureImage("data", devices.dataLoopbackSize) + if err != nil { + logrus.Debugf("devmapper: Error device ensureImage (data): %s", err) + return err + } + + dataFile, err = loopback.AttachLoopDevice(data) + if err != nil { + return err + } + devices.dataLoopFile = data + devices.dataDevice = dataFile.Name() + } else { + dataFile, err = os.OpenFile(devices.dataDevice, os.O_RDWR, 0600) + if err != nil { + return err + } + } + defer dataFile.Close() + + if devices.metadataDevice == "" { + // Make sure the sparse images exist in /devicemapper/metadata + + hasMetadata := devices.hasImage("metadata") + + if !doInit && !hasMetadata { + return errors.New("loopback metadata file not found") + } + + if !hasMetadata { + createdLoopback = true + } + + metadata, err := devices.ensureImage("metadata", devices.metaDataLoopbackSize) + if err != nil { + logrus.Debugf("devmapper: Error device ensureImage (metadata): %s", err) + return err + } + + metadataFile, err = loopback.AttachLoopDevice(metadata) + if err != nil { + return err + } + devices.metadataLoopFile = metadata + devices.metadataDevice = metadataFile.Name() + } else { + metadataFile, err = os.OpenFile(devices.metadataDevice, os.O_RDWR, 0600) + if err != nil { + return err + } + } + defer metadataFile.Close() + + if err := devicemapper.CreatePool(devices.getPoolName(), dataFile, metadataFile, devices.thinpBlockSize); err != nil { + return err + } + defer func() { + if retErr != nil { + err = devices.deactivatePool() + if err != nil { + logrus.Warnf("devmapper: Failed to deactivatePool: %v", err) + } + } + }() + } + + // Pool already exists and caller did not pass us a pool. That means + // we probably created pool earlier and could not remove it as some + // containers were still using it. Detect some of the properties of + // pool, like is it using loop devices. + if poolExists && devices.thinPoolDevice == "" { + if err := devices.loadThinPoolLoopBackInfo(); err != nil { + logrus.Debugf("devmapper: Failed to load thin pool loopback device information:%v", err) + return err + } + } + + // If we didn't just create the data or metadata image, we need to + // load the transaction id and migrate old metadata + if !createdLoopback { + if err := devices.initMetaData(); err != nil { + return err + } + } + + if devices.thinPoolDevice == "" { + if devices.metadataLoopFile != "" || devices.dataLoopFile != "" { + logrus.Warn("devmapper: Usage of loopback devices is strongly discouraged for production use. Please use `--storage-opt dm.thinpooldev`.") + } + } + + // Right now this loads only NextDeviceID. If there is more metadata + // down the line, we might have to move it earlier. + if err := devices.loadDeviceSetMetaData(); err != nil { + return err + } + + // Setup the base image + if doInit { + if err := devices.setupBaseImage(); err != nil { + logrus.Debugf("devmapper: Error device setupBaseImage: %s", err) + return err + } + } + + return nil +} + +// AddDevice adds a device and registers in the hash. +func (devices *DeviceSet) AddDevice(hash, baseHash string, storageOpt map[string]string) error { + logrus.Debugf("devmapper: AddDevice START(hash=%s basehash=%s)", hash, baseHash) + defer logrus.Debugf("devmapper: AddDevice END(hash=%s basehash=%s)", hash, baseHash) + + // If a deleted device exists, return error. + baseInfo, err := devices.lookupDeviceWithLock(baseHash) + if err != nil { + return err + } + + if baseInfo.Deleted { + return fmt.Errorf("devmapper: Base device %v has been marked for deferred deletion", baseInfo.Hash) + } + + baseInfo.lock.Lock() + defer baseInfo.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + // Also include deleted devices in case hash of new device is + // same as one of the deleted devices. + if info, _ := devices.lookupDevice(hash); info != nil { + return fmt.Errorf("devmapper: device %s already exists. Deleted=%v", hash, info.Deleted) + } + + size, err := devices.parseStorageOpt(storageOpt) + if err != nil { + return err + } + + if size == 0 { + size = baseInfo.Size + } + + if size < baseInfo.Size { + return fmt.Errorf("devmapper: Container size cannot be smaller than %s", units.HumanSize(float64(baseInfo.Size))) + } + + if err := devices.takeSnapshot(hash, baseInfo, size); err != nil { + return err + } + + // Grow the container rootfs. + if size > baseInfo.Size { + info, err := devices.lookupDevice(hash) + if err != nil { + return err + } + + if err := devices.growFS(info); err != nil { + return err + } + } + + return nil +} + +func (devices *DeviceSet) parseStorageOpt(storageOpt map[string]string) (uint64, error) { + + // Read size to change the block device size per container. + for key, val := range storageOpt { + key := strings.ToLower(key) + switch key { + case "size": + size, err := units.RAMInBytes(val) + if err != nil { + return 0, err + } + return uint64(size), nil + default: + return 0, fmt.Errorf("Unknown option %s", key) + } + } + + return 0, nil +} + +func (devices *DeviceSet) markForDeferredDeletion(info *devInfo) error { + // If device is already in deleted state, there is nothing to be done. + if info.Deleted { + return nil + } + + logrus.Debugf("devmapper: Marking device %s for deferred deletion.", info.Hash) + + info.Deleted = true + + // save device metadata to reflect deleted state. + if err := devices.saveMetadata(info); err != nil { + info.Deleted = false + return err + } + + devices.nrDeletedDevices++ + return nil +} + +// Should be called with devices.Lock() held. +func (devices *DeviceSet) deleteDeviceNoLock(info *devInfo, syncDelete bool) error { + err := devicemapper.DeleteDevice(devices.getPoolDevName(), info.DeviceID) + if err != nil { + // If syncDelete is true, we want to return error. If deferred + // deletion is not enabled, we return an error. If error is + // something other then EBUSY, return an error. + if syncDelete || !devices.deferredDelete || errors.Cause(err) != devicemapper.ErrBusy { + logrus.Debugf("devmapper: Error deleting device: %s", err) + return err + } + } + + if err == nil { + if err := devices.unregisterDevice(info.Hash); err != nil { + return err + } + // If device was already in deferred delete state that means + // deletion was being tried again later. Reduce the deleted + // device count. + if info.Deleted { + devices.nrDeletedDevices-- + } + devices.markDeviceIDFree(info.DeviceID) + } else { + if err := devices.markForDeferredDeletion(info); err != nil { + return err + } + } + + return nil +} + +// Issue discard only if device open count is zero. +func (devices *DeviceSet) issueDiscard(info *devInfo) error { + logrus.Debugf("devmapper: issueDiscard START(device: %s).", info.Hash) + defer logrus.Debugf("devmapper: issueDiscard END(device: %s).", info.Hash) + // This is a workaround for the kernel not discarding block so + // on the thin pool when we remove a thinp device, so we do it + // manually. + // Even if device is deferred deleted, activate it and issue + // discards. + if err := devices.activateDeviceIfNeeded(info, true); err != nil { + return err + } + + devinfo, err := devicemapper.GetInfo(info.Name()) + if err != nil { + return err + } + + if devinfo.OpenCount != 0 { + logrus.Debugf("devmapper: Device: %s is in use. OpenCount=%d. Not issuing discards.", info.Hash, devinfo.OpenCount) + return nil + } + + if err := devicemapper.BlockDeviceDiscard(info.DevName()); err != nil { + logrus.Debugf("devmapper: Error discarding block on device: %s (ignoring)", err) + } + return nil +} + +// Should be called with devices.Lock() held. +func (devices *DeviceSet) deleteDevice(info *devInfo, syncDelete bool) error { + if err := devices.openTransaction(info.Hash, info.DeviceID); err != nil { + logrus.WithField("storage-driver", "devicemapper").Debugf("Error opening transaction hash = %s deviceId = %d", info.Hash, info.DeviceID) + return err + } + + defer devices.closeTransaction() + + if devices.doBlkDiscard { + devices.issueDiscard(info) + } + + // Try to deactivate device in case it is active. + // If deferred removal is enabled and deferred deletion is disabled + // then make sure device is removed synchronously. There have been + // some cases of device being busy for short duration and we would + // rather busy wait for device removal to take care of these cases. + deferredRemove := devices.deferredRemove + if !devices.deferredDelete { + deferredRemove = false + } + + if err := devices.deactivateDeviceMode(info, deferredRemove); err != nil { + logrus.Debugf("devmapper: Error deactivating device: %s", err) + return err + } + + if err := devices.deleteDeviceNoLock(info, syncDelete); err != nil { + return err + } + + return nil +} + +// DeleteDevice will return success if device has been marked for deferred +// removal. If one wants to override that and want DeleteDevice() to fail if +// device was busy and could not be deleted, set syncDelete=true. +func (devices *DeviceSet) DeleteDevice(hash string, syncDelete bool) error { + logrus.Debugf("devmapper: DeleteDevice START(hash=%v syncDelete=%v)", hash, syncDelete) + defer logrus.Debugf("devmapper: DeleteDevice END(hash=%v syncDelete=%v)", hash, syncDelete) + info, err := devices.lookupDeviceWithLock(hash) + if err != nil { + return err + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + return devices.deleteDevice(info, syncDelete) +} + +func (devices *DeviceSet) deactivatePool() error { + logrus.Debug("devmapper: deactivatePool() START") + defer logrus.Debug("devmapper: deactivatePool() END") + devname := devices.getPoolDevName() + + devinfo, err := devicemapper.GetInfo(devname) + if err != nil { + return err + } + + if devinfo.Exists == 0 { + return nil + } + if err := devicemapper.RemoveDevice(devname); err != nil { + return err + } + + if d, err := devicemapper.GetDeps(devname); err == nil { + logrus.Warnf("devmapper: device %s still has %d active dependents", devname, d.Count) + } + + return nil +} + +func (devices *DeviceSet) deactivateDevice(info *devInfo) error { + return devices.deactivateDeviceMode(info, devices.deferredRemove) +} + +func (devices *DeviceSet) deactivateDeviceMode(info *devInfo, deferredRemove bool) error { + var err error + logrus.Debugf("devmapper: deactivateDevice START(%s)", info.Hash) + defer logrus.Debugf("devmapper: deactivateDevice END(%s)", info.Hash) + + devinfo, err := devicemapper.GetInfo(info.Name()) + if err != nil { + return err + } + + if devinfo.Exists == 0 { + return nil + } + + if deferredRemove { + err = devicemapper.RemoveDeviceDeferred(info.Name()) + } else { + err = devices.removeDevice(info.Name()) + } + + // This function's semantics is such that it does not return an + // error if device does not exist. So if device went away by + // the time we actually tried to remove it, do not return error. + if errors.Cause(err) != devicemapper.ErrEnxio { + return err + } + return nil +} + +// Issues the underlying dm remove operation. +func (devices *DeviceSet) removeDevice(devname string) error { + var err error + + logrus.Debugf("devmapper: removeDevice START(%s)", devname) + defer logrus.Debugf("devmapper: removeDevice END(%s)", devname) + + for i := 0; i < 200; i++ { + err = devicemapper.RemoveDevice(devname) + if err == nil { + break + } + if errors.Cause(err) != devicemapper.ErrBusy { + return err + } + + // If we see EBUSY it may be a transient error, + // sleep a bit a retry a few times. + devices.Unlock() + time.Sleep(100 * time.Millisecond) + devices.Lock() + } + + return err +} + +func (devices *DeviceSet) cancelDeferredRemovalIfNeeded(info *devInfo) error { + if !devices.deferredRemove { + return nil + } + + logrus.Debugf("devmapper: cancelDeferredRemovalIfNeeded START(%s)", info.Name()) + defer logrus.Debugf("devmapper: cancelDeferredRemovalIfNeeded END(%s)", info.Name()) + + devinfo, err := devicemapper.GetInfoWithDeferred(info.Name()) + if err != nil { + return err + } + + if devinfo != nil && devinfo.DeferredRemove == 0 { + return nil + } + + // Cancel deferred remove + if err := devices.cancelDeferredRemoval(info); err != nil { + // If Error is ErrEnxio. Device is probably already gone. Continue. + if errors.Cause(err) != devicemapper.ErrEnxio { + return err + } + } + return nil +} + +func (devices *DeviceSet) cancelDeferredRemoval(info *devInfo) error { + logrus.Debugf("devmapper: cancelDeferredRemoval START(%s)", info.Name()) + defer logrus.Debugf("devmapper: cancelDeferredRemoval END(%s)", info.Name()) + + var err error + + // Cancel deferred remove + for i := 0; i < 100; i++ { + err = devicemapper.CancelDeferredRemove(info.Name()) + if err != nil { + if errors.Cause(err) != devicemapper.ErrBusy { + // If we see EBUSY it may be a transient error, + // sleep a bit a retry a few times. + devices.Unlock() + time.Sleep(100 * time.Millisecond) + devices.Lock() + continue + } + } + break + } + return err +} + +func (devices *DeviceSet) unmountAndDeactivateAll(dir string) { + files, err := ioutil.ReadDir(dir) + if err != nil { + logrus.Warnf("devmapper: unmountAndDeactivate: %s", err) + return + } + + for _, d := range files { + if !d.IsDir() { + continue + } + + name := d.Name() + fullname := path.Join(dir, name) + + // We use MNT_DETACH here in case it is still busy in some running + // container. This means it'll go away from the global scope directly, + // and the device will be released when that container dies. + if err := mount.Unmount(fullname); err != nil { + logrus.Warnf("devmapper.Shutdown error: %s", err) + } + + if devInfo, err := devices.lookupDevice(name); err != nil { + logrus.Debugf("devmapper: Shutdown lookup device %s, error: %s", name, err) + } else { + if err := devices.deactivateDevice(devInfo); err != nil { + logrus.Debugf("devmapper: Shutdown deactivate %s, error: %s", devInfo.Hash, err) + } + } + } +} + +// Shutdown shuts down the device by unmounting the root. +func (devices *DeviceSet) Shutdown(home string) error { + logrus.Debugf("devmapper: [deviceset %s] Shutdown()", devices.devicePrefix) + logrus.Debugf("devmapper: Shutting down DeviceSet: %s", devices.root) + defer logrus.Debugf("devmapper: [deviceset %s] Shutdown() END", devices.devicePrefix) + + // Stop deletion worker. This should start delivering new events to + // ticker channel. That means no new instance of cleanupDeletedDevice() + // will run after this call. If one instance is already running at + // the time of the call, it must be holding devices.Lock() and + // we will block on this lock till cleanup function exits. + devices.deletionWorkerTicker.Stop() + + devices.Lock() + // Save DeviceSet Metadata first. Docker kills all threads if they + // don't finish in certain time. It is possible that Shutdown() + // routine does not finish in time as we loop trying to deactivate + // some devices while these are busy. In that case shutdown() routine + // will be killed and we will not get a chance to save deviceset + // metadata. Hence save this early before trying to deactivate devices. + devices.saveDeviceSetMetaData() + devices.unmountAndDeactivateAll(path.Join(home, "mnt")) + devices.Unlock() + + info, _ := devices.lookupDeviceWithLock("") + if info != nil { + info.lock.Lock() + devices.Lock() + if err := devices.deactivateDevice(info); err != nil { + logrus.Debugf("devmapper: Shutdown deactivate base , error: %s", err) + } + devices.Unlock() + info.lock.Unlock() + } + + devices.Lock() + if devices.thinPoolDevice == "" { + if err := devices.deactivatePool(); err != nil { + logrus.Debugf("devmapper: Shutdown deactivate pool , error: %s", err) + } + } + devices.Unlock() + + return nil +} + +// Recent XFS changes allow changing behavior of filesystem in case of errors. +// When thin pool gets full and XFS gets ENOSPC error, currently it tries +// IO infinitely and sometimes it can block the container process +// and process can't be killWith 0 value, XFS will not retry upon error +// and instead will shutdown filesystem. + +func (devices *DeviceSet) xfsSetNospaceRetries(info *devInfo) error { + dmDevicePath, err := os.Readlink(info.DevName()) + if err != nil { + return fmt.Errorf("devmapper: readlink failed for device %v:%v", info.DevName(), err) + } + + dmDeviceName := path.Base(dmDevicePath) + filePath := "/sys/fs/xfs/" + dmDeviceName + "/error/metadata/ENOSPC/max_retries" + maxRetriesFile, err := os.OpenFile(filePath, os.O_WRONLY, 0) + if err != nil { + return fmt.Errorf("devmapper: user specified daemon option dm.xfs_nospace_max_retries but it does not seem to be supported on this system :%v", err) + } + defer maxRetriesFile.Close() + + // Set max retries to 0 + _, err = maxRetriesFile.WriteString(devices.xfsNospaceRetries) + if err != nil { + return fmt.Errorf("devmapper: Failed to write string %v to file %v:%v", devices.xfsNospaceRetries, filePath, err) + } + return nil +} + +// MountDevice mounts the device if not already mounted. +func (devices *DeviceSet) MountDevice(hash, path string, moptions graphdriver.MountOpts) error { + info, err := devices.lookupDeviceWithLock(hash) + if err != nil { + return err + } + + if info.Deleted { + return fmt.Errorf("devmapper: Can't mount device %v as it has been marked for deferred deletion", info.Hash) + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + if err := devices.activateDeviceIfNeeded(info, false); err != nil { + return fmt.Errorf("devmapper: Error activating devmapper device for '%s': %s", hash, err) + } + + fstype, err := ProbeFsType(info.DevName()) + if err != nil { + return err + } + + options := "" + + if fstype == xfs { + // XFS needs nouuid or it can't mount filesystems with the same fs + options = joinMountOptions(options, "nouuid") + } + + mountOptions := devices.mountOptions + if len(moptions.Options) > 0 { + addNouuid := strings.Contains("nouuid", mountOptions) + mountOptions = strings.Join(moptions.Options, ",") + if addNouuid { + mountOptions = fmt.Sprintf("nouuid,%s", mountOptions) + } + } + + options = joinMountOptions(options, mountOptions) + options = joinMountOptions(options, label.FormatMountLabel("", moptions.MountLabel)) + + if err := mount.Mount(info.DevName(), path, fstype, options); err != nil { + return errors.Wrapf(err, "Failed to mount; dmesg: %s", string(dmesg.Dmesg(256))) + } + + if fstype == xfs && devices.xfsNospaceRetries != "" { + if err := devices.xfsSetNospaceRetries(info); err != nil { + if err := mount.Unmount(path); err != nil { + logrus.Warnf("devmapper.MountDevice cleanup error: %v", err) + } + devices.deactivateDevice(info) + return err + } + } + + return nil +} + +// UnmountDevice unmounts the device and removes it from hash. +func (devices *DeviceSet) UnmountDevice(hash, mountPath string) error { + logrus.Debugf("devmapper: UnmountDevice START(hash=%s)", hash) + defer logrus.Debugf("devmapper: UnmountDevice END(hash=%s)", hash) + + info, err := devices.lookupDeviceWithLock(hash) + if err != nil { + return err + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + logrus.Debugf("devmapper: Unmount(%s)", mountPath) + if err := mount.Unmount(mountPath); err != nil { + if ok, _ := Mounted(mountPath); ok { + return err + } + } + logrus.Debug("devmapper: Unmount done") + + // Remove the mountpoint here. Removing the mountpoint (in newer kernels) + // will cause all other instances of this mount in other mount namespaces + // to be killed (this is an anti-DoS measure that is necessary for things + // like devicemapper). This is necessary to avoid cases where a libdm mount + // that is present in another namespace will cause subsequent RemoveDevice + // operations to fail. We ignore any errors here because this may fail on + // older kernels which don't have + // torvalds/linux@8ed936b5671bfb33d89bc60bdcc7cf0470ba52fe applied. + if err := os.Remove(mountPath); err != nil { + logrus.Debugf("devmapper: error doing a remove on unmounted device %s: %v", mountPath, err) + } + + return devices.deactivateDevice(info) +} + +// HasDevice returns true if the device metadata exists. +func (devices *DeviceSet) HasDevice(hash string) bool { + info, _ := devices.lookupDeviceWithLock(hash) + return info != nil +} + +// List returns a list of device ids. +func (devices *DeviceSet) List() []string { + devices.Lock() + defer devices.Unlock() + + ids := make([]string, len(devices.Devices)) + i := 0 + for k := range devices.Devices { + ids[i] = k + i++ + } + return ids +} + +func (devices *DeviceSet) deviceStatus(devName string) (sizeInSectors, mappedSectors, highestMappedSector uint64, err error) { + var params string + _, sizeInSectors, _, params, err = devicemapper.GetStatus(devName) + if err != nil { + return + } + if _, err = fmt.Sscanf(params, "%d %d", &mappedSectors, &highestMappedSector); err == nil { + return + } + return +} + +// GetDeviceStatus provides size, mapped sectors +func (devices *DeviceSet) GetDeviceStatus(hash string) (*DevStatus, error) { + info, err := devices.lookupDeviceWithLock(hash) + if err != nil { + return nil, err + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + status := &DevStatus{ + DeviceID: info.DeviceID, + Size: info.Size, + TransactionID: info.TransactionID, + } + + if err := devices.activateDeviceIfNeeded(info, false); err != nil { + return nil, fmt.Errorf("devmapper: Error activating devmapper device for '%s': %s", hash, err) + } + + sizeInSectors, mappedSectors, highestMappedSector, err := devices.deviceStatus(info.DevName()) + + if err != nil { + return nil, err + } + + status.SizeInSectors = sizeInSectors + status.MappedSectors = mappedSectors + status.HighestMappedSector = highestMappedSector + + return status, nil +} + +func (devices *DeviceSet) poolStatus() (totalSizeInSectors, transactionID, dataUsed, dataTotal, metadataUsed, metadataTotal uint64, err error) { + var params string + if _, totalSizeInSectors, _, params, err = devicemapper.GetStatus(devices.getPoolName()); err == nil { + _, err = fmt.Sscanf(params, "%d %d/%d %d/%d", &transactionID, &metadataUsed, &metadataTotal, &dataUsed, &dataTotal) + } + return +} + +// DataDevicePath returns the path to the data storage for this deviceset, +// regardless of loopback or block device +func (devices *DeviceSet) DataDevicePath() string { + return devices.dataDevice +} + +// MetadataDevicePath returns the path to the metadata storage for this deviceset, +// regardless of loopback or block device +func (devices *DeviceSet) MetadataDevicePath() string { + return devices.metadataDevice +} + +func (devices *DeviceSet) getUnderlyingAvailableSpace(loopFile string) (uint64, error) { + buf := new(unix.Statfs_t) + if err := unix.Statfs(loopFile, buf); err != nil { + logrus.Warnf("devmapper: Couldn't stat loopfile filesystem %v: %v", loopFile, err) + return 0, err + } + return buf.Bfree * uint64(buf.Bsize), nil +} + +func (devices *DeviceSet) isRealFile(loopFile string) (bool, error) { + if loopFile != "" { + fi, err := os.Stat(loopFile) + if err != nil { + logrus.Warnf("devmapper: Couldn't stat loopfile %v: %v", loopFile, err) + return false, err + } + return fi.Mode().IsRegular(), nil + } + return false, nil +} + +// Status returns the current status of this deviceset +func (devices *DeviceSet) Status() *Status { + devices.Lock() + defer devices.Unlock() + + status := &Status{} + + status.PoolName = devices.getPoolName() + status.DataFile = devices.DataDevicePath() + status.DataLoopback = devices.dataLoopFile + status.MetadataFile = devices.MetadataDevicePath() + status.MetadataLoopback = devices.metadataLoopFile + status.UdevSyncSupported = devicemapper.UdevSyncSupported() + status.DeferredRemoveEnabled = devices.deferredRemove + status.DeferredDeleteEnabled = devices.deferredDelete + status.DeferredDeletedDeviceCount = devices.nrDeletedDevices + status.BaseDeviceSize = devices.getBaseDeviceSize() + status.BaseDeviceFS = devices.getBaseDeviceFS() + + totalSizeInSectors, _, dataUsed, dataTotal, metadataUsed, metadataTotal, err := devices.poolStatus() + if err == nil { + // Convert from blocks to bytes + blockSizeInSectors := totalSizeInSectors / dataTotal + + status.Data.Used = dataUsed * blockSizeInSectors * 512 + status.Data.Total = dataTotal * blockSizeInSectors * 512 + status.Data.Available = status.Data.Total - status.Data.Used + + // metadata blocks are always 4k + status.Metadata.Used = metadataUsed * 4096 + status.Metadata.Total = metadataTotal * 4096 + status.Metadata.Available = status.Metadata.Total - status.Metadata.Used + + status.SectorSize = blockSizeInSectors * 512 + + if check, _ := devices.isRealFile(devices.dataLoopFile); check { + actualSpace, err := devices.getUnderlyingAvailableSpace(devices.dataLoopFile) + if err == nil && actualSpace < status.Data.Available { + status.Data.Available = actualSpace + } + } + + if check, _ := devices.isRealFile(devices.metadataLoopFile); check { + actualSpace, err := devices.getUnderlyingAvailableSpace(devices.metadataLoopFile) + if err == nil && actualSpace < status.Metadata.Available { + status.Metadata.Available = actualSpace + } + } + + minFreeData := (dataTotal * uint64(devices.minFreeSpacePercent)) / 100 + status.MinFreeSpace = minFreeData * blockSizeInSectors * 512 + } + + return status +} + +// Status returns the current status of this deviceset +func (devices *DeviceSet) exportDeviceMetadata(hash string) (*deviceMetadata, error) { + info, err := devices.lookupDeviceWithLock(hash) + if err != nil { + return nil, err + } + + info.lock.Lock() + defer info.lock.Unlock() + + metadata := &deviceMetadata{info.DeviceID, info.Size, info.Name()} + return metadata, nil +} + +// NewDeviceSet creates the device set based on the options provided. +func NewDeviceSet(root string, doInit bool, options []string, uidMaps, gidMaps []idtools.IDMap) (*DeviceSet, error) { + devicemapper.SetDevDir("/dev") + + devices := &DeviceSet{ + root: root, + metaData: metaData{Devices: make(map[string]*devInfo)}, + dataLoopbackSize: defaultDataLoopbackSize, + metaDataLoopbackSize: defaultMetaDataLoopbackSize, + baseFsSize: defaultBaseFsSize, + overrideUdevSyncCheck: defaultUdevSyncOverride, + doBlkDiscard: true, + thinpBlockSize: defaultThinpBlockSize, + deviceIDMap: make([]byte, deviceIDMapSz), + deletionWorkerTicker: time.NewTicker(time.Second * 30), + uidMaps: uidMaps, + gidMaps: gidMaps, + minFreeSpacePercent: defaultMinFreeSpacePercent, + } + + version, err := devicemapper.GetDriverVersion() + if err != nil { + // Can't even get driver version, assume not supported + return nil, graphdriver.ErrNotSupported + } + + if err := determineDriverCapabilities(version); err != nil { + return nil, graphdriver.ErrNotSupported + } + + if driverDeferredRemovalSupport && devicemapper.LibraryDeferredRemovalSupport { + // enable deferred stuff by default + enableDeferredDeletion = true + enableDeferredRemoval = true + } + + foundBlkDiscard := false + var lvmSetupConfig directLVMConfig + testMode := false + for _, option := range options { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return nil, err + } + key = strings.ToLower(key) + switch key { + case "dm.basesize": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + userBaseSize = true + devices.baseFsSize = uint64(size) + case "dm.loopdatasize": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + devices.dataLoopbackSize = size + case "dm.loopmetadatasize": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + devices.metaDataLoopbackSize = size + case "dm.fs": + if val != ext4 && val != xfs { + return nil, fmt.Errorf("devmapper: Unsupported filesystem %s", val) + } + devices.filesystem = val + case "dm.mkfsarg": + devices.mkfsArgs = append(devices.mkfsArgs, val) + case "dm.mountopt", "devicemapper.mountopt": + devices.mountOptions = joinMountOptions(devices.mountOptions, val) + case "dm.metadatadev": + devices.metadataDevice = val + case "dm.metadata_size": + devices.metaDataSize = val + case "dm.datadev": + devices.dataDevice = val + case "dm.thinpooldev": + devices.thinPoolDevice = strings.TrimPrefix(val, "/dev/mapper/") + case "dm.blkdiscard": + foundBlkDiscard = true + devices.doBlkDiscard, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + case "dm.blocksize": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + // convert to 512b sectors + devices.thinpBlockSize = uint32(size) >> 9 + case "dm.override_udev_sync_check": + devices.overrideUdevSyncCheck, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + + case "dm.use_deferred_removal": + enableDeferredRemoval, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + + case "dm.use_deferred_deletion": + enableDeferredDeletion, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + + case "dm.metaDataSize": + lvmSetupConfig.MetaDataSize = val + case "dm.min_free_space": + if !strings.HasSuffix(val, "%") { + return nil, fmt.Errorf("devmapper: Option dm.min_free_space requires %% suffix") + } + + valstring := strings.TrimSuffix(val, "%") + minFreeSpacePercent, err := strconv.ParseUint(valstring, 10, 32) + if err != nil { + return nil, err + } + + if minFreeSpacePercent >= 100 { + return nil, fmt.Errorf("devmapper: Invalid value %v for option dm.min_free_space", val) + } + + devices.minFreeSpacePercent = uint32(minFreeSpacePercent) + case "dm.xfs_nospace_max_retries": + _, err := strconv.ParseUint(val, 10, 64) + if err != nil { + return nil, err + } + devices.xfsNospaceRetries = val + case "dm.directlvm_device": + lvmSetupConfig.Device = val + case "dm.directlvm_device_force": + lvmSetupConfigForce, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + case "dm.thinp_percent": + per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32) + if err != nil { + return nil, errors.Wrapf(err, "could not parse `dm.thinp_percent=%s`", val) + } + if per >= 100 { + return nil, errors.New("dm.thinp_percent must be greater than 0 and less than 100") + } + lvmSetupConfig.ThinpPercent = per + case "dm.thinp_metapercent": + per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32) + if err != nil { + return nil, errors.Wrapf(err, "could not parse `dm.thinp_metapercent=%s`", val) + } + if per >= 100 { + return nil, errors.New("dm.thinp_metapercent must be greater than 0 and less than 100") + } + lvmSetupConfig.ThinpMetaPercent = per + case "dm.thinp_autoextend_percent": + per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32) + if err != nil { + return nil, errors.Wrapf(err, "could not parse `dm.thinp_autoextend_percent=%s`", val) + } + if per > 100 { + return nil, errors.New("dm.thinp_autoextend_percent must be greater than 0 and less than 100") + } + lvmSetupConfig.AutoExtendPercent = per + case "dm.thinp_autoextend_threshold": + per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32) + if err != nil { + return nil, errors.Wrapf(err, "could not parse `dm.thinp_autoextend_threshold=%s`", val) + } + if per > 100 { + return nil, errors.New("dm.thinp_autoextend_threshold must be greater than 0 and less than 100") + } + lvmSetupConfig.AutoExtendThreshold = per + case "dm.libdm_log_level": + level, err := strconv.ParseInt(val, 10, 32) + if err != nil { + return nil, errors.Wrapf(err, "could not parse `dm.libdm_log_level=%s`", val) + } + if level < devicemapper.LogLevelFatal || level > devicemapper.LogLevelDebug { + return nil, errors.Errorf("dm.libdm_log_level must be in range [%d,%d]", devicemapper.LogLevelFatal, devicemapper.LogLevelDebug) + } + // Register a new logging callback with the specified level. + devicemapper.LogInit(devicemapper.DefaultLogger{ + Level: int(level), + }) + case "test": + testMode, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("devmapper: Unknown option %s", key) + } + } + + if !testMode { + if err := validateLVMConfig(lvmSetupConfig); err != nil { + return nil, err + } + } + + devices.lvmSetupConfig = lvmSetupConfig + + // By default, don't do blk discard hack on raw devices, its rarely useful and is expensive + if !foundBlkDiscard && (devices.dataDevice != "" || devices.thinPoolDevice != "") { + devices.doBlkDiscard = false + } + + if err := devices.initDevmapper(doInit); err != nil { + return nil, err + } + + return devices, nil +} diff --git a/vendor/github.com/containers/storage/drivers/devmapper/devmapper_doc.go b/vendor/github.com/containers/storage/drivers/devmapper/devmapper_doc.go new file mode 100644 index 00000000000..418b9e61087 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/devmapper/devmapper_doc.go @@ -0,0 +1,108 @@ +// +build linux,cgo + +package devmapper + +// Definition of struct dm_task and sub structures (from lvm2) +// +// struct dm_ioctl { +// /* +// * The version number is made up of three parts: +// * major - no backward or forward compatibility, +// * minor - only backwards compatible, +// * patch - both backwards and forwards compatible. +// * +// * All clients of the ioctl interface should fill in the +// * version number of the interface that they were +// * compiled with. +// * +// * All recognized ioctl commands (ie. those that don't +// * return -ENOTTY) fill out this field, even if the +// * command failed. +// */ +// uint32_t version[3]; /* in/out */ +// uint32_t data_size; /* total size of data passed in +// * including this struct */ + +// uint32_t data_start; /* offset to start of data +// * relative to start of this struct */ + +// uint32_t target_count; /* in/out */ +// int32_t open_count; /* out */ +// uint32_t flags; /* in/out */ + +// /* +// * event_nr holds either the event number (input and output) or the +// * udev cookie value (input only). +// * The DM_DEV_WAIT ioctl takes an event number as input. +// * The DM_SUSPEND, DM_DEV_REMOVE and DM_DEV_RENAME ioctls +// * use the field as a cookie to return in the DM_COOKIE +// * variable with the uevents they issue. +// * For output, the ioctls return the event number, not the cookie. +// */ +// uint32_t event_nr; /* in/out */ +// uint32_t padding; + +// uint64_t dev; /* in/out */ + +// char name[DM_NAME_LEN]; /* device name */ +// char uuid[DM_UUID_LEN]; /* unique identifier for +// * the block device */ +// char data[7]; /* padding or data */ +// }; + +// struct target { +// uint64_t start; +// uint64_t length; +// char *type; +// char *params; + +// struct target *next; +// }; + +// typedef enum { +// DM_ADD_NODE_ON_RESUME, /* add /dev/mapper node with dmsetup resume */ +// DM_ADD_NODE_ON_CREATE /* add /dev/mapper node with dmsetup create */ +// } dm_add_node_t; + +// struct dm_task { +// int type; +// char *dev_name; +// char *mangled_dev_name; + +// struct target *head, *tail; + +// int read_only; +// uint32_t event_nr; +// int major; +// int minor; +// int allow_default_major_fallback; +// uid_t uid; +// gid_t gid; +// mode_t mode; +// uint32_t read_ahead; +// uint32_t read_ahead_flags; +// union { +// struct dm_ioctl *v4; +// } dmi; +// char *newname; +// char *message; +// char *geometry; +// uint64_t sector; +// int no_flush; +// int no_open_count; +// int skip_lockfs; +// int query_inactive_table; +// int suppress_identical_reload; +// dm_add_node_t add_node; +// uint64_t existing_table_size; +// int cookie_set; +// int new_uuid; +// int secure_data; +// int retry_remove; +// int enable_checks; +// int expected_errno; + +// char *uuid; +// char *mangled_uuid; +// }; +// diff --git a/vendor/github.com/containers/storage/drivers/devmapper/driver.go b/vendor/github.com/containers/storage/drivers/devmapper/driver.go new file mode 100644 index 00000000000..d2f165e26de --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/devmapper/driver.go @@ -0,0 +1,273 @@ +// +build linux,cgo + +package devmapper + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "strconv" + + graphdriver "github.com/containers/storage/drivers" + "github.com/containers/storage/pkg/devicemapper" + "github.com/containers/storage/pkg/directory" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/locker" + "github.com/containers/storage/pkg/mount" + units "github.com/docker/go-units" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +const defaultPerms = os.FileMode(0555) + +func init() { + graphdriver.Register("devicemapper", Init) +} + +// Driver contains the device set mounted and the home directory +type Driver struct { + *DeviceSet + home string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + ctr *graphdriver.RefCounter + locker *locker.Locker +} + +// Init creates a driver with the given home and the set of options. +func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) { + deviceSet, err := NewDeviceSet(home, true, options.DriverOptions, options.UIDMaps, options.GIDMaps) + if err != nil { + return nil, err + } + + if err := mount.MakePrivate(home); err != nil { + return nil, err + } + + d := &Driver{ + DeviceSet: deviceSet, + home: home, + uidMaps: options.UIDMaps, + gidMaps: options.GIDMaps, + ctr: graphdriver.NewRefCounter(graphdriver.NewDefaultChecker()), + locker: locker.New(), + } + + return graphdriver.NewNaiveDiffDriver(d, graphdriver.NewNaiveLayerIDMapUpdater(d)), nil +} + +func (d *Driver) String() string { + return "devicemapper" +} + +// Status returns the status about the driver in a printable format. +// Information returned contains Pool Name, Data File, Metadata file, disk usage by +// the data and metadata, etc. +func (d *Driver) Status() [][2]string { + s := d.DeviceSet.Status() + + status := [][2]string{ + {"Pool Name", s.PoolName}, + {"Pool Blocksize", units.HumanSize(float64(s.SectorSize))}, + {"Base Device Size", units.HumanSize(float64(s.BaseDeviceSize))}, + {"Backing Filesystem", s.BaseDeviceFS}, + {"Data file", s.DataFile}, + {"Metadata file", s.MetadataFile}, + {"Data Space Used", units.HumanSize(float64(s.Data.Used))}, + {"Data Space Total", units.HumanSize(float64(s.Data.Total))}, + {"Data Space Available", units.HumanSize(float64(s.Data.Available))}, + {"Metadata Space Used", units.HumanSize(float64(s.Metadata.Used))}, + {"Metadata Space Total", units.HumanSize(float64(s.Metadata.Total))}, + {"Metadata Space Available", units.HumanSize(float64(s.Metadata.Available))}, + {"Thin Pool Minimum Free Space", units.HumanSize(float64(s.MinFreeSpace))}, + {"Udev Sync Supported", fmt.Sprintf("%v", s.UdevSyncSupported)}, + {"Deferred Removal Enabled", fmt.Sprintf("%v", s.DeferredRemoveEnabled)}, + {"Deferred Deletion Enabled", fmt.Sprintf("%v", s.DeferredDeleteEnabled)}, + {"Deferred Deleted Device Count", fmt.Sprintf("%v", s.DeferredDeletedDeviceCount)}, + } + if len(s.DataLoopback) > 0 { + status = append(status, [2]string{"Data loop file", s.DataLoopback}) + } + if len(s.MetadataLoopback) > 0 { + status = append(status, [2]string{"Metadata loop file", s.MetadataLoopback}) + } + if vStr, err := devicemapper.GetLibraryVersion(); err == nil { + status = append(status, [2]string{"Library Version", vStr}) + } + return status +} + +// Metadata returns a map of information about the device. +func (d *Driver) Metadata(id string) (map[string]string, error) { + m, err := d.DeviceSet.exportDeviceMetadata(id) + + if err != nil { + return nil, err + } + + metadata := make(map[string]string) + metadata["DeviceId"] = strconv.Itoa(m.deviceID) + metadata["DeviceSize"] = strconv.FormatUint(m.deviceSize, 10) + metadata["DeviceName"] = m.deviceName + return metadata, nil +} + +// Cleanup unmounts a device. +func (d *Driver) Cleanup() error { + err := d.DeviceSet.Shutdown(d.home) + + umountErr := mount.Unmount(d.home) + // in case we have two errors, prefer the one from Shutdown() + if err != nil { + return err + } + + return umountErr +} + +// CreateFromTemplate creates a layer with the same contents and parent as another layer. +func (d *Driver) CreateFromTemplate(id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *graphdriver.CreateOpts, readWrite bool) error { + return d.Create(id, template, opts) +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + return d.Create(id, parent, opts) +} + +// Create adds a device with a given id and the parent. +func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { + var storageOpt map[string]string + if opts != nil { + storageOpt = opts.StorageOpt + } + + if err := d.DeviceSet.AddDevice(id, parent, storageOpt); err != nil { + return err + } + + return nil +} + +// Remove removes a device with a given id, unmounts the filesystem, and removes the mount point. +func (d *Driver) Remove(id string) error { + d.locker.Lock(id) + defer d.locker.Unlock(id) + if !d.DeviceSet.HasDevice(id) { + // Consider removing a non-existing device a no-op + // This is useful to be able to progress on container removal + // if the underlying device has gone away due to earlier errors + return nil + } + + // This assumes the device has been properly Get/Put:ed and thus is unmounted + if err := d.DeviceSet.DeleteDevice(id, false); err != nil { + return fmt.Errorf("failed to remove device %s: %v", id, err) + } + + // Most probably the mount point is already removed on Put() + // (see DeviceSet.UnmountDevice()), but just in case it was not + // let's try to remove it here as well, ignoring errors as + // an older kernel can return EBUSY if e.g. the mount was leaked + // to other mount namespaces. A failure to remove the container's + // mount point is not important and should not be treated + // as a failure to remove the container. + mp := path.Join(d.home, "mnt", id) + err := unix.Rmdir(mp) + if err != nil && !os.IsNotExist(err) { + logrus.WithField("storage-driver", "devicemapper").Warnf("unable to remove mount point %q: %s", mp, err) + } + + return nil +} + +// Get mounts a device with given id into the root filesystem +func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) { + d.locker.Lock(id) + defer d.locker.Unlock(id) + mp := path.Join(d.home, "mnt", id) + rootFs := path.Join(mp, "rootfs") + if count := d.ctr.Increment(mp); count > 1 { + return rootFs, nil + } + + uid, gid, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + d.ctr.Decrement(mp) + return "", err + } + + // Create the target directories if they don't exist + if err := idtools.MkdirAllAs(path.Join(d.home, "mnt"), 0755, uid, gid); err != nil { + d.ctr.Decrement(mp) + return "", err + } + if err := idtools.MkdirAs(mp, 0755, uid, gid); err != nil && !os.IsExist(err) { + d.ctr.Decrement(mp) + return "", err + } + + // Mount the device + if err := d.DeviceSet.MountDevice(id, mp, options); err != nil { + d.ctr.Decrement(mp) + return "", err + } + + if err := idtools.MkdirAllAs(rootFs, defaultPerms, uid, gid); err != nil { + d.ctr.Decrement(mp) + d.DeviceSet.UnmountDevice(id, mp) + return "", err + } + + idFile := path.Join(mp, "id") + if _, err := os.Stat(idFile); err != nil && os.IsNotExist(err) { + // Create an "id" file with the container/image id in it to help reconstruct this in case + // of later problems + if err := ioutil.WriteFile(idFile, []byte(id), 0600); err != nil { + d.ctr.Decrement(mp) + d.DeviceSet.UnmountDevice(id, mp) + return "", err + } + } + + return rootFs, nil +} + +// Put unmounts a device and removes it. +func (d *Driver) Put(id string) error { + d.locker.Lock(id) + defer d.locker.Unlock(id) + mp := path.Join(d.home, "mnt", id) + if count := d.ctr.Decrement(mp); count > 0 { + return nil + } + + err := d.DeviceSet.UnmountDevice(id, mp) + if err != nil { + logrus.Errorf("devmapper: Error unmounting device %s: %v", id, err) + } + + return err +} + +// ReadWriteDiskUsage returns the disk usage of the writable directory for the ID. +// For devmapper, it queries the mnt path for this ID. +func (d *Driver) ReadWriteDiskUsage(id string) (*directory.DiskUsage, error) { + d.locker.Lock(id) + defer d.locker.Unlock(id) + return directory.Usage(path.Join(d.home, "mnt", id)) +} + +// Exists checks to see if the device exists. +func (d *Driver) Exists(id string) bool { + return d.DeviceSet.HasDevice(id) +} + +// AdditionalImageStores returns additional image stores supported by the driver +func (d *Driver) AdditionalImageStores() []string { + return nil +} diff --git a/vendor/github.com/containers/storage/drivers/devmapper/jsoniter.go b/vendor/github.com/containers/storage/drivers/devmapper/jsoniter.go new file mode 100644 index 00000000000..54db6ab4aea --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/devmapper/jsoniter.go @@ -0,0 +1,5 @@ +package devmapper + +import jsoniter "github.com/json-iterator/go" + +var json = jsoniter.ConfigCompatibleWithStandardLibrary diff --git a/vendor/github.com/containers/storage/drivers/devmapper/mount.go b/vendor/github.com/containers/storage/drivers/devmapper/mount.go new file mode 100644 index 00000000000..41e73faf525 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/devmapper/mount.go @@ -0,0 +1,88 @@ +// +build linux,cgo + +package devmapper + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + + "golang.org/x/sys/unix" +) + +// FIXME: this is copy-pasted from the aufs driver. +// It should be moved into the core. + +// Mounted returns true if a mount point exists. +func Mounted(mountpoint string) (bool, error) { + var mntpointSt unix.Stat_t + if err := unix.Stat(mountpoint, &mntpointSt); err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, err + } + var parentSt unix.Stat_t + if err := unix.Stat(filepath.Join(mountpoint, ".."), &parentSt); err != nil { + return false, err + } + return mntpointSt.Dev != parentSt.Dev, nil +} + +type probeData struct { + fsName string + magic string + offset uint64 +} + +// ProbeFsType returns the filesystem name for the given device id. +func ProbeFsType(device string) (string, error) { + probes := []probeData{ + {"btrfs", "_BHRfS_M", 0x10040}, + {"ext4", "\123\357", 0x438}, + {"xfs", "XFSB", 0}, + } + + maxLen := uint64(0) + for _, p := range probes { + l := p.offset + uint64(len(p.magic)) + if l > maxLen { + maxLen = l + } + } + + file, err := os.Open(device) + if err != nil { + return "", err + } + defer file.Close() + + buffer := make([]byte, maxLen) + l, err := file.Read(buffer) + if err != nil { + return "", err + } + + if uint64(l) != maxLen { + return "", fmt.Errorf("devmapper: unable to detect filesystem type of %s, short read", device) + } + + for _, p := range probes { + if bytes.Equal([]byte(p.magic), buffer[p.offset:p.offset+uint64(len(p.magic))]) { + return p.fsName, nil + } + } + + return "", fmt.Errorf("devmapper: Unknown filesystem type on %s", device) +} + +func joinMountOptions(a, b string) string { + if a == "" { + return b + } + if b == "" { + return a + } + return a + "," + b +} diff --git a/vendor/github.com/containers/storage/drivers/driver.go b/vendor/github.com/containers/storage/drivers/driver.go new file mode 100644 index 00000000000..770b431bdd3 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/driver.go @@ -0,0 +1,408 @@ +package graphdriver + +import ( + "fmt" + "io" + "os" + "path/filepath" + "strings" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/vbatts/tar-split/tar/storage" + + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/directory" + "github.com/containers/storage/pkg/idtools" + digest "github.com/opencontainers/go-digest" +) + +// FsMagic unsigned id of the filesystem in use. +type FsMagic uint32 + +const ( + // FsMagicUnsupported is a predefined constant value other than a valid filesystem id. + FsMagicUnsupported = FsMagic(0x00000000) +) + +var ( + // All registered drivers + drivers map[string]InitFunc + + // ErrNotSupported returned when driver is not supported. + ErrNotSupported = errors.New("driver not supported") + // ErrPrerequisites returned when driver does not meet prerequisites. + ErrPrerequisites = errors.New("prerequisites for driver not satisfied (wrong filesystem?)") + // ErrIncompatibleFS returned when file system is not supported. + ErrIncompatibleFS = errors.New("backing file system is unsupported for this graph driver") + // ErrLayerUnknown returned when the specified layer is unknown by the driver. + ErrLayerUnknown = errors.New("unknown layer") +) + +//CreateOpts contains optional arguments for Create() and CreateReadWrite() +// methods. +type CreateOpts struct { + MountLabel string + StorageOpt map[string]string + *idtools.IDMappings + ignoreChownErrors bool +} + +// MountOpts contains optional arguments for LayerStope.Mount() methods. +type MountOpts struct { + // Mount label is the MAC Labels to assign to mount point (SELINUX) + MountLabel string + // UidMaps & GidMaps are the User Namespace mappings to be assigned to content in the mount point + UidMaps []idtools.IDMap // nolint: golint + GidMaps []idtools.IDMap // nolint: golint + Options []string + + // Volatile specifies whether the container storage can be optimized + // at the cost of not syncing all the dirty files in memory. + Volatile bool + + // DisableShifting forces the driver to not do any ID shifting at runtime. + DisableShifting bool +} + +// ApplyDiffOpts contains optional arguments for ApplyDiff methods. +type ApplyDiffOpts struct { + Diff io.Reader + Mappings *idtools.IDMappings + MountLabel string + IgnoreChownErrors bool + ForceMask *os.FileMode +} + +// InitFunc initializes the storage driver. +type InitFunc func(homedir string, options Options) (Driver, error) + +// ProtoDriver defines the basic capabilities of a driver. +// This interface exists solely to be a minimum set of methods +// for client code which choose not to implement the entire Driver +// interface and use the NaiveDiffDriver wrapper constructor. +// +// Use of ProtoDriver directly by client code is not recommended. +type ProtoDriver interface { + // String returns a string representation of this driver. + String() string + // CreateReadWrite creates a new, empty filesystem layer that is ready + // to be used as the storage for a container. Additional options can + // be passed in opts. parent may be "" and opts may be nil. + CreateReadWrite(id, parent string, opts *CreateOpts) error + // Create creates a new, empty, filesystem layer with the + // specified id and parent and options passed in opts. Parent + // may be "" and opts may be nil. + Create(id, parent string, opts *CreateOpts) error + // CreateFromTemplate creates a new filesystem layer with the specified id + // and parent, with contents identical to the specified template layer. + CreateFromTemplate(id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *CreateOpts, readWrite bool) error + // Remove attempts to remove the filesystem layer with this id. + Remove(id string) error + // Get returns the mountpoint for the layered filesystem referred + // to by this id. You can optionally specify a mountLabel or "". + // Optionally it gets the mappings used to create the layer. + // Returns the absolute path to the mounted layered filesystem. + Get(id string, options MountOpts) (dir string, err error) + // Put releases the system resources for the specified id, + // e.g, unmounting layered filesystem. + Put(id string) error + // Exists returns whether a filesystem layer with the specified + // ID exists on this driver. + Exists(id string) bool + // Status returns a set of key-value pairs which give low + // level diagnostic status about this driver. + Status() [][2]string + // Returns a set of key-value pairs which give low level information + // about the image/container driver is managing. + Metadata(id string) (map[string]string, error) + // ReadWriteDiskUsage returns the disk usage of the writable directory for the specified ID. + ReadWriteDiskUsage(id string) (*directory.DiskUsage, error) + // Cleanup performs necessary tasks to release resources + // held by the driver, e.g., unmounting all layered filesystems + // known to this driver. + Cleanup() error + // AdditionalImageStores returns additional image stores supported by the driver + // This API is experimental and can be changed without bumping the major version number. + AdditionalImageStores() []string +} + +// DiffDriver is the interface to use to implement graph diffs +type DiffDriver interface { + // Diff produces an archive of the changes between the specified + // layer and its parent layer which may be "". + Diff(id string, idMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, mountLabel string) (io.ReadCloser, error) + // Changes produces a list of changes between the specified layer + // and its parent layer. If parent is "", then all changes will be ADD changes. + Changes(id string, idMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, mountLabel string) ([]archive.Change, error) + // ApplyDiff extracts the changeset from the given diff into the + // layer with the specified id and parent, returning the size of the + // new layer in bytes. + // The io.Reader must be an uncompressed stream. + ApplyDiff(id string, parent string, options ApplyDiffOpts) (size int64, err error) + // DiffSize calculates the changes between the specified id + // and its parent and returns the size in bytes of the changes + // relative to its base filesystem directory. + DiffSize(id string, idMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, mountLabel string) (size int64, err error) +} + +// LayerIDMapUpdater is the interface that implements ID map changes for layers. +type LayerIDMapUpdater interface { + // UpdateLayerIDMap walks the layer's filesystem tree, changing the ownership + // information using the toContainer and toHost mappings, using them to replace + // on-disk owner UIDs and GIDs which are "host" values in the first map with + // UIDs and GIDs for "host" values from the second map which correspond to the + // same "container" IDs. This method should only be called after a layer is + // first created and populated, and before it is mounted, as other changes made + // relative to a parent layer, but before this method is called, may be discarded + // by Diff(). + UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) error + + // SupportsShifting tells whether the driver support shifting of the UIDs/GIDs in a + // image and it is not required to Chown the files when running in an user namespace. + SupportsShifting() bool +} + +// Driver is the interface for layered/snapshot file system drivers. +type Driver interface { + ProtoDriver + DiffDriver + LayerIDMapUpdater +} + +// DriverWithDifferOutput is the result of ApplyDiffWithDiffer +// This API is experimental and can be changed without bumping the major version number. +type DriverWithDifferOutput struct { + Differ Differ + Target string + Size int64 + UIDs []uint32 + GIDs []uint32 + UncompressedDigest digest.Digest + Metadata string + BigData map[string][]byte +} + +// Differ defines the interface for using a custom differ. +// This API is experimental and can be changed without bumping the major version number. +type Differ interface { + ApplyDiff(dest string, options *archive.TarOptions) (DriverWithDifferOutput, error) +} + +// DriverWithDiffer is the interface for direct diff access. +// This API is experimental and can be changed without bumping the major version number. +type DriverWithDiffer interface { + Driver + // ApplyDiffWithDiffer applies the changes using the callback function. + // If id is empty, then a staging directory is created. The staging directory is guaranteed to be usable with ApplyDiffFromStagingDirectory. + ApplyDiffWithDiffer(id, parent string, options *ApplyDiffOpts, differ Differ) (output DriverWithDifferOutput, err error) + // ApplyDiffFromStagingDirectory applies the changes using the specified staging directory. + ApplyDiffFromStagingDirectory(id, parent, stagingDirectory string, diffOutput *DriverWithDifferOutput, options *ApplyDiffOpts) error + // CleanupStagingDirectory cleanups the staging directory. It can be used to cleanup the staging directory on errors + CleanupStagingDirectory(stagingDirectory string) error + // DifferTarget gets the location where files are stored for the layer. + DifferTarget(id string) (string, error) +} + +// Capabilities defines a list of capabilities a driver may implement. +// These capabilities are not required; however, they do determine how a +// graphdriver can be used. +type Capabilities struct { + // Flags that this driver is capable of reproducing exactly equivalent + // diffs for read-only layers. If set, clients can rely on the driver + // for consistent tar streams, and avoid extra processing to account + // for potential differences (eg: the layer store's use of tar-split). + ReproducesExactDiffs bool +} + +// CapabilityDriver is the interface for layered file system drivers that +// can report on their Capabilities. +type CapabilityDriver interface { + Capabilities() Capabilities +} + +// AdditionalLayer reprents a layer that is stored in the additional layer store +// This API is experimental and can be changed without bumping the major version number. +type AdditionalLayer interface { + // CreateAs creates a new layer from this additional layer + CreateAs(id, parent string) error + + // Info returns arbitrary information stored along with this layer (i.e. `info` file) + Info() (io.ReadCloser, error) + + // Blob returns a reader of the raw contents of this layer. + Blob() (io.ReadCloser, error) + + // Release tells the additional layer store that we don't use this handler. + Release() +} + +// AdditionalLayerStoreDriver is the interface for driver that supports +// additional layer store functionality. +// This API is experimental and can be changed without bumping the major version number. +type AdditionalLayerStoreDriver interface { + Driver + + // LookupAdditionalLayer looks up additional layer store by the specified + // digest and ref and returns an object representing that layer. + LookupAdditionalLayer(d digest.Digest, ref string) (AdditionalLayer, error) + + // LookupAdditionalLayer looks up additional layer store by the specified + // ID and returns an object representing that layer. + LookupAdditionalLayerByID(id string) (AdditionalLayer, error) +} + +// DiffGetterDriver is the interface for layered file system drivers that +// provide a specialized function for getting file contents for tar-split. +type DiffGetterDriver interface { + Driver + // DiffGetter returns an interface to efficiently retrieve the contents + // of files in a layer. + DiffGetter(id string) (FileGetCloser, error) +} + +// FileGetCloser extends the storage.FileGetter interface with a Close method +// for cleaning up. +type FileGetCloser interface { + storage.FileGetter + // Close cleans up any resources associated with the FileGetCloser. + Close() error +} + +// Checker makes checks on specified filesystems. +type Checker interface { + // IsMounted returns true if the provided path is mounted for the specific checker + IsMounted(path string) bool +} + +func init() { + drivers = make(map[string]InitFunc) +} + +// Register registers an InitFunc for the driver. +func Register(name string, initFunc InitFunc) error { + if _, exists := drivers[name]; exists { + return fmt.Errorf("Name already registered %s", name) + } + drivers[name] = initFunc + + return nil +} + +// GetDriver initializes and returns the registered driver +func GetDriver(name string, config Options) (Driver, error) { + if initFunc, exists := drivers[name]; exists { + return initFunc(filepath.Join(config.Root, name), config) + } + + logrus.Errorf("Failed to GetDriver graph %s %s", name, config.Root) + return nil, errors.Wrapf(ErrNotSupported, "failed to GetDriver graph %s %s", name, config.Root) +} + +// getBuiltinDriver initializes and returns the registered driver, but does not try to load from plugins +func getBuiltinDriver(name, home string, options Options) (Driver, error) { + if initFunc, exists := drivers[name]; exists { + return initFunc(filepath.Join(home, name), options) + } + logrus.Errorf("Failed to built-in GetDriver graph %s %s", name, home) + return nil, errors.Wrapf(ErrNotSupported, "failed to built-in GetDriver graph %s %s", name, home) +} + +// Options is used to initialize a graphdriver +type Options struct { + Root string + RunRoot string + DriverOptions []string + UIDMaps []idtools.IDMap + GIDMaps []idtools.IDMap + ExperimentalEnabled bool +} + +// New creates the driver and initializes it at the specified root. +func New(name string, config Options) (Driver, error) { + if name != "" { + logrus.Debugf("[graphdriver] trying provided driver %q", name) // so the logs show specified driver + return GetDriver(name, config) + } + + // Guess for prior driver + driversMap := scanPriorDrivers(config.Root) + for _, name := range priority { + if name == "vfs" { + // don't use vfs even if there is state present. + continue + } + if _, prior := driversMap[name]; prior { + // of the state found from prior drivers, check in order of our priority + // which we would prefer + driver, err := getBuiltinDriver(name, config.Root, config) + if err != nil { + // unlike below, we will return error here, because there is prior + // state, and now it is no longer supported/prereq/compatible, so + // something changed and needs attention. Otherwise the daemon's + // images would just "disappear". + logrus.Errorf("[graphdriver] prior storage driver %s failed: %s", name, err) + return nil, err + } + + // abort starting when there are other prior configured drivers + // to ensure the user explicitly selects the driver to load + if len(driversMap)-1 > 0 { + var driversSlice []string + for name := range driversMap { + driversSlice = append(driversSlice, name) + } + + return nil, fmt.Errorf("%s contains several valid graphdrivers: %s; Please cleanup or explicitly choose storage driver (-s )", config.Root, strings.Join(driversSlice, ", ")) + } + + logrus.Infof("[graphdriver] using prior storage driver: %s", name) + return driver, nil + } + } + + // Check for priority drivers first + for _, name := range priority { + driver, err := getBuiltinDriver(name, config.Root, config) + if err != nil { + if isDriverNotSupported(err) { + continue + } + return nil, err + } + return driver, nil + } + + // Check all registered drivers if no priority driver is found + for name, initFunc := range drivers { + driver, err := initFunc(filepath.Join(config.Root, name), config) + if err != nil { + if isDriverNotSupported(err) { + continue + } + return nil, err + } + return driver, nil + } + return nil, fmt.Errorf("No supported storage backend found") +} + +// isDriverNotSupported returns true if the error initializing +// the graph driver is a non-supported error. +func isDriverNotSupported(err error) bool { + cause := errors.Cause(err) + return cause == ErrNotSupported || cause == ErrPrerequisites || cause == ErrIncompatibleFS +} + +// scanPriorDrivers returns an un-ordered scan of directories of prior storage drivers +func scanPriorDrivers(root string) map[string]bool { + driversMap := make(map[string]bool) + + for driver := range drivers { + p := filepath.Join(root, driver) + if _, err := os.Stat(p); err == nil && driver != "vfs" { + driversMap[driver] = true + } + } + return driversMap +} diff --git a/vendor/github.com/containers/storage/drivers/driver_freebsd.go b/vendor/github.com/containers/storage/drivers/driver_freebsd.go new file mode 100644 index 00000000000..e1320ee07f6 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/driver_freebsd.go @@ -0,0 +1,21 @@ +package graphdriver + +import ( + "golang.org/x/sys/unix" +) + +var ( + // Slice of drivers that should be used in an order + priority = []string{ + "zfs", + } +) + +// Mounted checks if the given path is mounted as the fs type +func Mounted(fsType FsMagic, mountPath string) (bool, error) { + var buf unix.Statfs_t + if err := unix.Statfs(mountPath, &buf); err != nil { + return false, err + } + return FsMagic(buf.Type) == fsType, nil +} diff --git a/vendor/github.com/containers/storage/drivers/driver_linux.go b/vendor/github.com/containers/storage/drivers/driver_linux.go new file mode 100644 index 00000000000..7c527d279a6 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/driver_linux.go @@ -0,0 +1,194 @@ +//go:build linux +// +build linux + +package graphdriver + +import ( + "path/filepath" + + "github.com/containers/storage/pkg/mount" + "golang.org/x/sys/unix" +) + +const ( + // FsMagicAufs filesystem id for Aufs + FsMagicAufs = FsMagic(0x61756673) + // FsMagicBtrfs filesystem id for Btrfs + FsMagicBtrfs = FsMagic(0x9123683E) + // FsMagicCramfs filesystem id for Cramfs + FsMagicCramfs = FsMagic(0x28cd3d45) + // FsMagicEcryptfs filesystem id for eCryptfs + FsMagicEcryptfs = FsMagic(0xf15f) + // FsMagicExtfs filesystem id for Extfs + FsMagicExtfs = FsMagic(0x0000EF53) + // FsMagicF2fs filesystem id for F2fs + FsMagicF2fs = FsMagic(0xF2F52010) + // FsMagicGPFS filesystem id for GPFS + FsMagicGPFS = FsMagic(0x47504653) + // FsMagicJffs2Fs filesystem if for Jffs2Fs + FsMagicJffs2Fs = FsMagic(0x000072b6) + // FsMagicJfs filesystem id for Jfs + FsMagicJfs = FsMagic(0x3153464a) + // FsMagicNfsFs filesystem id for NfsFs + FsMagicNfsFs = FsMagic(0x00006969) + // FsMagicRAMFs filesystem id for RamFs + FsMagicRAMFs = FsMagic(0x858458f6) + // FsMagicReiserFs filesystem id for ReiserFs + FsMagicReiserFs = FsMagic(0x52654973) + // FsMagicSmbFs filesystem id for SmbFs + FsMagicSmbFs = FsMagic(0x0000517B) + // FsMagicSquashFs filesystem id for SquashFs + FsMagicSquashFs = FsMagic(0x73717368) + // FsMagicTmpFs filesystem id for TmpFs + FsMagicTmpFs = FsMagic(0x01021994) + // FsMagicVxFS filesystem id for VxFs + FsMagicVxFS = FsMagic(0xa501fcf5) + // FsMagicXfs filesystem id for Xfs + FsMagicXfs = FsMagic(0x58465342) + // FsMagicZfs filesystem id for Zfs + FsMagicZfs = FsMagic(0x2fc12fc1) + // FsMagicOverlay filesystem id for overlay + FsMagicOverlay = FsMagic(0x794C7630) + // FsMagicFUSE filesystem id for FUSE + FsMagicFUSE = FsMagic(0x65735546) + // FsMagicAcfs filesystem id for Acfs + FsMagicAcfs = FsMagic(0x61636673) + // FsMagicAfs filesystem id for Afs + FsMagicAfs = FsMagic(0x5346414f) + // FsMagicCephFs filesystem id for Ceph + FsMagicCephFs = FsMagic(0x00C36400) + // FsMagicCIFS filesystem id for CIFS + FsMagicCIFS = FsMagic(0xFF534D42) + // FsMagicFHGFS filesystem id for FHGFS + FsMagicFHGFSFs = FsMagic(0x19830326) + // FsMagicIBRIX filesystem id for IBRIX + FsMagicIBRIX = FsMagic(0x013111A8) + // FsMagicKAFS filesystem id for KAFS + FsMagicKAFS = FsMagic(0x6B414653) + // FsMagicLUSTRE filesystem id for LUSTRE + FsMagicLUSTRE = FsMagic(0x0BD00BD0) + // FsMagicNCP filesystem id for NCP + FsMagicNCP = FsMagic(0x564C) + // FsMagicNFSD filesystem id for NFSD + FsMagicNFSD = FsMagic(0x6E667364) + // FsMagicOCFS2 filesystem id for OCFS2 + FsMagicOCFS2 = FsMagic(0x7461636F) + // FsMagicPANFS filesystem id for PANFS + FsMagicPANFS = FsMagic(0xAAD7AAEA) + // FsMagicPRLFS filesystem id for PRLFS + FsMagicPRLFS = FsMagic(0x7C7C6673) + // FsMagicSMB2 filesystem id for SMB2 + FsMagicSMB2 = FsMagic(0xFE534D42) + // FsMagicSNFS filesystem id for SNFS + FsMagicSNFS = FsMagic(0xBEEFDEAD) + // FsMagicVBOXSF filesystem id for VBOXSF + FsMagicVBOXSF = FsMagic(0x786F4256) + // FsMagicVXFS filesystem id for VXFS + FsMagicVXFS = FsMagic(0xA501FCF5) +) + +var ( + // Slice of drivers that should be used in an order + priority = []string{ + "overlay", + // We don't support devicemapper without configuration + // "devicemapper", + "aufs", + "btrfs", + "zfs", + "vfs", + } + + // FsNames maps filesystem id to name of the filesystem. + FsNames = map[FsMagic]string{ + FsMagicAufs: "aufs", + FsMagicBtrfs: "btrfs", + FsMagicCramfs: "cramfs", + FsMagicEcryptfs: "ecryptfs", + FsMagicExtfs: "extfs", + FsMagicF2fs: "f2fs", + FsMagicGPFS: "gpfs", + FsMagicJffs2Fs: "jffs2", + FsMagicJfs: "jfs", + FsMagicNfsFs: "nfs", + FsMagicOverlay: "overlayfs", + FsMagicRAMFs: "ramfs", + FsMagicReiserFs: "reiserfs", + FsMagicSmbFs: "smb", + FsMagicSquashFs: "squashfs", + FsMagicTmpFs: "tmpfs", + FsMagicUnsupported: "unsupported", + FsMagicVxFS: "vxfs", + FsMagicXfs: "xfs", + FsMagicZfs: "zfs", + } +) + +// GetFSMagic returns the filesystem id given the path. +func GetFSMagic(rootpath string) (FsMagic, error) { + var buf unix.Statfs_t + if err := unix.Statfs(filepath.Dir(rootpath), &buf); err != nil { + return 0, err + } + return FsMagic(buf.Type), nil +} + +// NewFsChecker returns a checker configured for the provided FsMagic +func NewFsChecker(t FsMagic) Checker { + return &fsChecker{ + t: t, + } +} + +type fsChecker struct { + t FsMagic +} + +func (c *fsChecker) IsMounted(path string) bool { + m, _ := Mounted(c.t, path) + return m +} + +// NewDefaultChecker returns a check that parses /proc/mountinfo to check +// if the specified path is mounted. +func NewDefaultChecker() Checker { + return &defaultChecker{} +} + +type defaultChecker struct { +} + +func (c *defaultChecker) IsMounted(path string) bool { + m, _ := mount.Mounted(path) + return m +} + +// isMountPoint checks that the given path is a mount point +func isMountPoint(mountPath string) (bool, error) { + // it is already the root + if mountPath == "/" { + return true, nil + } + + var s1, s2 unix.Stat_t + if err := unix.Stat(mountPath, &s1); err != nil { + return true, err + } + if err := unix.Stat(filepath.Dir(mountPath), &s2); err != nil { + return true, err + } + return s1.Dev != s2.Dev, nil +} + +// Mounted checks if the given path is mounted as the fs type +func Mounted(fsType FsMagic, mountPath string) (bool, error) { + var buf unix.Statfs_t + + if err := unix.Statfs(mountPath, &buf); err != nil { + return false, err + } + if FsMagic(buf.Type) != fsType { + return false, nil + } + return isMountPoint(mountPath) +} diff --git a/vendor/github.com/containers/storage/drivers/driver_solaris.go b/vendor/github.com/containers/storage/drivers/driver_solaris.go new file mode 100644 index 00000000000..174fa9670bf --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/driver_solaris.go @@ -0,0 +1,96 @@ +// +build solaris,cgo + +package graphdriver + +/* +#include +#include + +static inline struct statvfs *getstatfs(char *s) { + struct statvfs *buf; + int err; + buf = (struct statvfs *)malloc(sizeof(struct statvfs)); + err = statvfs(s, buf); + return buf; +} +*/ +import "C" +import ( + "path/filepath" + "unsafe" + + "github.com/containers/storage/pkg/mount" + "github.com/sirupsen/logrus" +) + +const ( + // FsMagicZfs filesystem id for Zfs + FsMagicZfs = FsMagic(0x2fc12fc1) +) + +var ( + // Slice of drivers that should be used in an order + priority = []string{ + "zfs", + } + + // FsNames maps filesystem id to name of the filesystem. + FsNames = map[FsMagic]string{ + FsMagicZfs: "zfs", + } +) + +// GetFSMagic returns the filesystem id given the path. +func GetFSMagic(rootpath string) (FsMagic, error) { + return 0, nil +} + +type fsChecker struct { + t FsMagic +} + +func (c *fsChecker) IsMounted(path string) bool { + m, _ := Mounted(c.t, path) + return m +} + +// NewFsChecker returns a checker configured for the provided FsMagic +func NewFsChecker(t FsMagic) Checker { + return &fsChecker{ + t: t, + } +} + +// NewDefaultChecker returns a check that parses /proc/mountinfo to check +// if the specified path is mounted. +// No-op on Solaris. +func NewDefaultChecker() Checker { + return &defaultChecker{} +} + +type defaultChecker struct { +} + +func (c *defaultChecker) IsMounted(path string) bool { + m, _ := mount.Mounted(path) + return m +} + +// Mounted checks if the given path is mounted as the fs type +//Solaris supports only ZFS for now +func Mounted(fsType FsMagic, mountPath string) (bool, error) { + + cs := C.CString(filepath.Dir(mountPath)) + defer C.free(unsafe.Pointer(cs)) + buf := C.getstatfs(cs) + defer C.free(unsafe.Pointer(buf)) + + // on Solaris buf.f_basetype contains ['z', 'f', 's', 0 ... ] + if (buf.f_basetype[0] != 122) || (buf.f_basetype[1] != 102) || (buf.f_basetype[2] != 115) || + (buf.f_basetype[3] != 0) { + logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", mountPath) + return false, ErrPrerequisites + } + + return true, nil +} diff --git a/vendor/github.com/containers/storage/drivers/driver_unsupported.go b/vendor/github.com/containers/storage/drivers/driver_unsupported.go new file mode 100644 index 00000000000..4a875608b0d --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/driver_unsupported.go @@ -0,0 +1,15 @@ +// +build !linux,!windows,!freebsd,!solaris + +package graphdriver + +var ( + // Slice of drivers that should be used in an order + priority = []string{ + "unsupported", + } +) + +// GetFSMagic returns the filesystem id given the path. +func GetFSMagic(rootpath string) (FsMagic, error) { + return FsMagicUnsupported, nil +} diff --git a/vendor/github.com/containers/storage/drivers/driver_windows.go b/vendor/github.com/containers/storage/drivers/driver_windows.go new file mode 100644 index 00000000000..ffd30c2950c --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/driver_windows.go @@ -0,0 +1,14 @@ +package graphdriver + +var ( + // Slice of drivers that should be used in order + priority = []string{ + "windowsfilter", + } +) + +// GetFSMagic returns the filesystem id given the path. +func GetFSMagic(rootpath string) (FsMagic, error) { + // Note it is OK to return FsMagicUnsupported on Windows. + return FsMagicUnsupported, nil +} diff --git a/vendor/github.com/containers/storage/drivers/fsdiff.go b/vendor/github.com/containers/storage/drivers/fsdiff.go new file mode 100644 index 00000000000..b7e681ace45 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/fsdiff.go @@ -0,0 +1,220 @@ +package graphdriver + +import ( + "io" + "time" + + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/chrootarchive" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/ioutils" + "github.com/opencontainers/runc/libcontainer/userns" + "github.com/sirupsen/logrus" +) + +var ( + // ApplyUncompressedLayer defines the unpack method used by the graph + // driver. + ApplyUncompressedLayer = chrootarchive.ApplyUncompressedLayer +) + +// NaiveDiffDriver takes a ProtoDriver and adds the +// capability of the Diffing methods which it may or may not +// support on its own. See the comment on the exported +// NewNaiveDiffDriver function below. +// Notably, the AUFS driver doesn't need to be wrapped like this. +type NaiveDiffDriver struct { + ProtoDriver + LayerIDMapUpdater +} + +// NewNaiveDiffDriver returns a fully functional driver that wraps the +// given ProtoDriver and adds the capability of the following methods which +// it may or may not support on its own: +// Diff(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (io.ReadCloser, error) +// Changes(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) ([]archive.Change, error) +// ApplyDiff(id, parent string, options ApplyDiffOpts) (size int64, err error) +// DiffSize(id string, idMappings *idtools.IDMappings, parent, parentMappings *idtools.IDMappings, mountLabel string) (size int64, err error) +func NewNaiveDiffDriver(driver ProtoDriver, updater LayerIDMapUpdater) Driver { + return &NaiveDiffDriver{ProtoDriver: driver, LayerIDMapUpdater: updater} +} + +// Diff produces an archive of the changes between the specified +// layer and its parent layer which may be "". +func (gdw *NaiveDiffDriver) Diff(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (arch io.ReadCloser, err error) { + startTime := time.Now() + driver := gdw.ProtoDriver + + if idMappings == nil { + idMappings = &idtools.IDMappings{} + } + if parentMappings == nil { + parentMappings = &idtools.IDMappings{} + } + + options := MountOpts{ + MountLabel: mountLabel, + } + layerFs, err := driver.Get(id, options) + if err != nil { + return nil, err + } + + defer func() { + if err != nil { + driver.Put(id) + } + }() + + if parent == "" { + archive, err := archive.TarWithOptions(layerFs, &archive.TarOptions{ + Compression: archive.Uncompressed, + UIDMaps: idMappings.UIDs(), + GIDMaps: idMappings.GIDs(), + }) + if err != nil { + return nil, err + } + return ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + driver.Put(id) + return err + }), nil + } + + options.Options = append(options.Options, "ro") + parentFs, err := driver.Get(parent, options) + if err != nil { + return nil, err + } + defer driver.Put(parent) + + changes, err := archive.ChangesDirs(layerFs, idMappings, parentFs, parentMappings) + if err != nil { + return nil, err + } + + archive, err := archive.ExportChanges(layerFs, changes, idMappings.UIDs(), idMappings.GIDs()) + if err != nil { + return nil, err + } + + return ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + driver.Put(id) + + // NaiveDiffDriver compares file metadata with parent layers. Parent layers + // are extracted from tar's with full second precision on modified time. + // We need this hack here to make sure calls within same second receive + // correct result. + time.Sleep(startTime.Truncate(time.Second).Add(time.Second).Sub(time.Now())) + return err + }), nil +} + +// Changes produces a list of changes between the specified layer +// and its parent layer. If parent is "", then all changes will be ADD changes. +func (gdw *NaiveDiffDriver) Changes(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) ([]archive.Change, error) { + driver := gdw.ProtoDriver + + if idMappings == nil { + idMappings = &idtools.IDMappings{} + } + if parentMappings == nil { + parentMappings = &idtools.IDMappings{} + } + + options := MountOpts{ + MountLabel: mountLabel, + } + layerFs, err := driver.Get(id, options) + if err != nil { + return nil, err + } + defer driver.Put(id) + + parentFs := "" + + if parent != "" { + options := MountOpts{ + MountLabel: mountLabel, + Options: []string{"ro"}, + } + parentFs, err = driver.Get(parent, options) + if err != nil { + return nil, err + } + defer driver.Put(parent) + } + + return archive.ChangesDirs(layerFs, idMappings, parentFs, parentMappings) +} + +// ApplyDiff extracts the changeset from the given diff into the +// layer with the specified id and parent, returning the size of the +// new layer in bytes. +func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, options ApplyDiffOpts) (size int64, err error) { + driver := gdw.ProtoDriver + + if options.Mappings == nil { + options.Mappings = &idtools.IDMappings{} + } + + // Mount the root filesystem so we can apply the diff/layer. + mountOpts := MountOpts{ + MountLabel: options.MountLabel, + } + layerFs, err := driver.Get(id, mountOpts) + if err != nil { + return + } + defer driver.Put(id) + + tarOptions := &archive.TarOptions{ + InUserNS: userns.RunningInUserNS(), + IgnoreChownErrors: options.IgnoreChownErrors, + } + if options.Mappings != nil { + tarOptions.UIDMaps = options.Mappings.UIDs() + tarOptions.GIDMaps = options.Mappings.GIDs() + } + start := time.Now().UTC() + logrus.Debug("Start untar layer") + if size, err = ApplyUncompressedLayer(layerFs, options.Diff, tarOptions); err != nil { + logrus.Errorf("While applying layer: %s", err) + return + } + logrus.Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds()) + + return +} + +// DiffSize calculates the changes between the specified layer +// and its parent and returns the size in bytes of the changes +// relative to its base filesystem directory. +func (gdw *NaiveDiffDriver) DiffSize(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (size int64, err error) { + driver := gdw.ProtoDriver + + if idMappings == nil { + idMappings = &idtools.IDMappings{} + } + if parentMappings == nil { + parentMappings = &idtools.IDMappings{} + } + + changes, err := gdw.Changes(id, idMappings, parent, parentMappings, mountLabel) + if err != nil { + return + } + + options := MountOpts{ + MountLabel: mountLabel, + } + layerFs, err := driver.Get(id, options) + if err != nil { + return + } + defer driver.Put(id) + + return archive.ChangesSize(layerFs, changes), nil +} diff --git a/vendor/github.com/containers/storage/drivers/jsoniter.go b/vendor/github.com/containers/storage/drivers/jsoniter.go new file mode 100644 index 00000000000..097f923ab5b --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/jsoniter.go @@ -0,0 +1,5 @@ +package graphdriver + +import jsoniter "github.com/json-iterator/go" + +var json = jsoniter.ConfigCompatibleWithStandardLibrary diff --git a/vendor/github.com/containers/storage/drivers/overlay/check.go b/vendor/github.com/containers/storage/drivers/overlay/check.go new file mode 100644 index 00000000000..44b3515a854 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/overlay/check.go @@ -0,0 +1,220 @@ +// +build linux + +package overlay + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "path/filepath" + "syscall" + + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/ioutils" + "github.com/containers/storage/pkg/mount" + "github.com/containers/storage/pkg/system" + "github.com/containers/storage/pkg/unshare" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +// doesSupportNativeDiff checks whether the filesystem has a bug +// which copies up the opaque flag when copying up an opaque +// directory or the kernel enable CONFIG_OVERLAY_FS_REDIRECT_DIR. +// When these exist naive diff should be used. +func doesSupportNativeDiff(d, mountOpts string) error { + td, err := ioutil.TempDir(d, "opaque-bug-check") + if err != nil { + return err + } + defer func() { + if err := os.RemoveAll(td); err != nil { + logrus.Warnf("Failed to remove check directory %v: %v", td, err) + } + }() + + // Make directories l1/d, l1/d1, l2/d, l3, work, merged + if err := os.MkdirAll(filepath.Join(td, "l1", "d"), 0755); err != nil { + return err + } + if err := os.MkdirAll(filepath.Join(td, "l1", "d1"), 0755); err != nil { + return err + } + if err := os.MkdirAll(filepath.Join(td, "l2", "d"), 0755); err != nil { + return err + } + if err := os.Mkdir(filepath.Join(td, "l3"), 0755); err != nil { + return err + } + if err := os.Mkdir(filepath.Join(td, "work"), 0755); err != nil { + return err + } + if err := os.Mkdir(filepath.Join(td, "merged"), 0755); err != nil { + return err + } + + // Mark l2/d as opaque + if err := system.Lsetxattr(filepath.Join(td, "l2", "d"), archive.GetOverlayXattrName("opaque"), []byte("y"), 0); err != nil { + return errors.Wrap(err, "failed to set opaque flag on middle layer") + } + + mountFlags := "lowerdir=%s:%s,upperdir=%s,workdir=%s" + if unshare.IsRootless() { + mountFlags = mountFlags + ",userxattr" + } + + opts := fmt.Sprintf(mountFlags, path.Join(td, "l2"), path.Join(td, "l1"), path.Join(td, "l3"), path.Join(td, "work")) + flags, data := mount.ParseOptions(mountOpts) + if data != "" { + opts = fmt.Sprintf("%s,%s", opts, data) + } + if err := unix.Mount("overlay", filepath.Join(td, "merged"), "overlay", uintptr(flags), opts); err != nil { + return errors.Wrap(err, "failed to mount overlay") + } + defer func() { + if err := unix.Unmount(filepath.Join(td, "merged"), 0); err != nil { + logrus.Warnf("Failed to unmount check directory %v: %v", filepath.Join(td, "merged"), err) + } + }() + + // Touch file in d to force copy up of opaque directory "d" from "l2" to "l3" + if err := ioutil.WriteFile(filepath.Join(td, "merged", "d", "f"), []byte{}, 0644); err != nil { + return errors.Wrap(err, "failed to write to merged directory") + } + + // Check l3/d does not have opaque flag + xattrOpaque, err := system.Lgetxattr(filepath.Join(td, "l3", "d"), archive.GetOverlayXattrName("opaque")) + if err != nil { + return errors.Wrap(err, "failed to read opaque flag on upper layer") + } + if string(xattrOpaque) == "y" { + return errors.New("opaque flag erroneously copied up, consider update to kernel 4.8 or later to fix") + } + + // rename "d1" to "d2" + if err := os.Rename(filepath.Join(td, "merged", "d1"), filepath.Join(td, "merged", "d2")); err != nil { + // if rename failed with syscall.EXDEV, the kernel doesn't have CONFIG_OVERLAY_FS_REDIRECT_DIR enabled + if err.(*os.LinkError).Err == syscall.EXDEV { + return nil + } + return errors.Wrap(err, "failed to rename dir in merged directory") + } + // get the xattr of "d2" + xattrRedirect, err := system.Lgetxattr(filepath.Join(td, "l3", "d2"), archive.GetOverlayXattrName("redirect")) + if err != nil { + return errors.Wrap(err, "failed to read redirect flag on upper layer") + } + + if string(xattrRedirect) == "d1" { + return errors.New("kernel has CONFIG_OVERLAY_FS_REDIRECT_DIR enabled") + } + + return nil +} + +// doesMetacopy checks if the filesystem is going to optimize changes to +// metadata by using nodes marked with an "overlay.metacopy" attribute to avoid +// copying up a file from a lower layer unless/until its contents are being +// modified +func doesMetacopy(d, mountOpts string) (bool, error) { + td, err := ioutil.TempDir(d, "metacopy-check") + if err != nil { + return false, err + } + defer func() { + if err := os.RemoveAll(td); err != nil { + logrus.Warnf("Failed to remove check directory %v: %v", td, err) + } + }() + + // Make directories l1, l2, work, merged + if err := os.MkdirAll(filepath.Join(td, "l1"), 0755); err != nil { + return false, err + } + if err := ioutils.AtomicWriteFile(filepath.Join(td, "l1", "f"), []byte{0xff}, 0700); err != nil { + return false, err + } + if err := os.MkdirAll(filepath.Join(td, "l2"), 0755); err != nil { + return false, err + } + if err := os.Mkdir(filepath.Join(td, "work"), 0755); err != nil { + return false, err + } + if err := os.Mkdir(filepath.Join(td, "merged"), 0755); err != nil { + return false, err + } + // Mount using the mandatory options and configured options + opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", path.Join(td, "l1"), path.Join(td, "l2"), path.Join(td, "work")) + if unshare.IsRootless() { + opts = fmt.Sprintf("%s,userxattr", opts) + } + flags, data := mount.ParseOptions(mountOpts) + if data != "" { + opts = fmt.Sprintf("%s,%s", opts, data) + } + if err := unix.Mount("overlay", filepath.Join(td, "merged"), "overlay", uintptr(flags), opts); err != nil { + if errors.Cause(err) == unix.EINVAL { + logrus.Info("metacopy option not supported on this kernel", mountOpts) + return false, nil + } + return false, errors.Wrapf(err, "failed to mount overlay for metacopy check with %q options", mountOpts) + } + defer func() { + if err := unix.Unmount(filepath.Join(td, "merged"), 0); err != nil { + logrus.Warnf("Failed to unmount check directory %v: %v", filepath.Join(td, "merged"), err) + } + }() + // Make a change that only impacts the inode, and check if the pulled-up copy is marked + // as a metadata-only copy + if err := os.Chmod(filepath.Join(td, "merged", "f"), 0600); err != nil { + return false, errors.Wrap(err, "error changing permissions on file for metacopy check") + } + metacopy, err := system.Lgetxattr(filepath.Join(td, "l2", "f"), archive.GetOverlayXattrName("metacopy")) + if err != nil { + if errors.Is(err, unix.ENOTSUP) { + logrus.Info("metacopy option not supported") + return false, nil + } + return false, errors.Wrap(err, "metacopy flag was not set on file in upper layer") + } + return metacopy != nil, nil +} + +// doesVolatile checks if the filesystem supports the "volatile" mount option +func doesVolatile(d string) (bool, error) { + td, err := ioutil.TempDir(d, "volatile-check") + if err != nil { + return false, err + } + defer func() { + if err := os.RemoveAll(td); err != nil { + logrus.Warnf("Failed to remove check directory %v: %v", td, err) + } + }() + + if err := os.MkdirAll(filepath.Join(td, "lower"), 0755); err != nil { + return false, err + } + if err := os.MkdirAll(filepath.Join(td, "upper"), 0755); err != nil { + return false, err + } + if err := os.Mkdir(filepath.Join(td, "work"), 0755); err != nil { + return false, err + } + if err := os.Mkdir(filepath.Join(td, "merged"), 0755); err != nil { + return false, err + } + // Mount using the mandatory options and configured options + opts := fmt.Sprintf("volatile,lowerdir=%s,upperdir=%s,workdir=%s", path.Join(td, "lower"), path.Join(td, "upper"), path.Join(td, "work")) + if err := unix.Mount("overlay", filepath.Join(td, "merged"), "overlay", 0, opts); err != nil { + return false, errors.Wrapf(err, "failed to mount overlay for volatile check") + } + defer func() { + if err := unix.Unmount(filepath.Join(td, "merged"), 0); err != nil { + logrus.Warnf("Failed to unmount check directory %v: %v", filepath.Join(td, "merged"), err) + } + }() + return true, nil +} diff --git a/vendor/github.com/containers/storage/drivers/overlay/check_115.go b/vendor/github.com/containers/storage/drivers/overlay/check_115.go new file mode 100644 index 00000000000..9ad1b863d8d --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/overlay/check_115.go @@ -0,0 +1,42 @@ +// +build !go1.16 + +package overlay + +import ( + "os" + "path/filepath" + "strings" + + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/system" +) + +func scanForMountProgramIndicators(home string) (detected bool, err error) { + err = filepath.Walk(home, func(path string, info os.FileInfo, err error) error { + if detected { + return filepath.SkipDir + } + if err != nil { + return err + } + basename := filepath.Base(path) + if strings.HasPrefix(basename, archive.WhiteoutPrefix) { + detected = true + return filepath.SkipDir + } + if info.IsDir() { + xattrs, err := system.Llistxattr(path) + if err != nil { + return err + } + for _, xattr := range xattrs { + if strings.HasPrefix(xattr, "user.fuseoverlayfs.") || strings.HasPrefix(xattr, "user.containers.") { + detected = true + return filepath.SkipDir + } + } + } + return nil + }) + return detected, err +} diff --git a/vendor/github.com/containers/storage/drivers/overlay/check_116.go b/vendor/github.com/containers/storage/drivers/overlay/check_116.go new file mode 100644 index 00000000000..6d7913cbfab --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/overlay/check_116.go @@ -0,0 +1,42 @@ +// +build go1.16 + +package overlay + +import ( + "io/fs" + "path/filepath" + "strings" + + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/system" +) + +func scanForMountProgramIndicators(home string) (detected bool, err error) { + err = filepath.WalkDir(home, func(path string, d fs.DirEntry, err error) error { + if detected { + return fs.SkipDir + } + if err != nil { + return err + } + basename := filepath.Base(path) + if strings.HasPrefix(basename, archive.WhiteoutPrefix) { + detected = true + return fs.SkipDir + } + if d.IsDir() { + xattrs, err := system.Llistxattr(path) + if err != nil { + return err + } + for _, xattr := range xattrs { + if strings.HasPrefix(xattr, "user.fuseoverlayfs.") || strings.HasPrefix(xattr, "user.containers.") { + detected = true + return fs.SkipDir + } + } + } + return nil + }) + return detected, err +} diff --git a/vendor/github.com/containers/storage/drivers/overlay/jsoniter.go b/vendor/github.com/containers/storage/drivers/overlay/jsoniter.go new file mode 100644 index 00000000000..2a1e9d0cc1a --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/overlay/jsoniter.go @@ -0,0 +1,5 @@ +package overlay + +import jsoniter "github.com/json-iterator/go" + +var json = jsoniter.ConfigCompatibleWithStandardLibrary diff --git a/vendor/github.com/containers/storage/drivers/overlay/mount.go b/vendor/github.com/containers/storage/drivers/overlay/mount.go new file mode 100644 index 00000000000..7c8fd50a3a5 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/overlay/mount.go @@ -0,0 +1,88 @@ +// +build linux + +package overlay + +import ( + "bytes" + "flag" + "fmt" + "os" + "runtime" + + "github.com/containers/storage/pkg/reexec" + "golang.org/x/sys/unix" +) + +func init() { + reexec.Register("storage-mountfrom", mountFromMain) +} + +func fatal(err error) { + fmt.Fprint(os.Stderr, err) + os.Exit(1) +} + +type mountOptions struct { + Device string + Target string + Type string + Label string + Flag uint32 +} + +func mountFrom(dir, device, target, mType string, flags uintptr, label string) error { + options := &mountOptions{ + Device: device, + Target: target, + Type: mType, + Flag: uint32(flags), + Label: label, + } + + cmd := reexec.Command("storage-mountfrom", dir) + w, err := cmd.StdinPipe() + if err != nil { + return fmt.Errorf("mountfrom error on pipe creation: %v", err) + } + + output := bytes.NewBuffer(nil) + cmd.Stdout = output + cmd.Stderr = output + if err := cmd.Start(); err != nil { + w.Close() + return fmt.Errorf("mountfrom error on re-exec cmd: %v", err) + } + //write the options to the pipe for the untar exec to read + if err := json.NewEncoder(w).Encode(options); err != nil { + w.Close() + return fmt.Errorf("mountfrom json encode to pipe failed: %v", err) + } + w.Close() + + if err := cmd.Wait(); err != nil { + return fmt.Errorf("mountfrom re-exec error: %v: output: %v", err, output) + } + return nil +} + +// mountfromMain is the entry-point for storage-mountfrom on re-exec. +func mountFromMain() { + runtime.LockOSThread() + flag.Parse() + + var options *mountOptions + + if err := json.NewDecoder(os.Stdin).Decode(&options); err != nil { + fatal(err) + } + + if err := os.Chdir(flag.Arg(0)); err != nil { + fatal(err) + } + + if err := unix.Mount(options.Device, options.Target, options.Type, uintptr(options.Flag), options.Label); err != nil { + fatal(err) + } + + os.Exit(0) +} diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go new file mode 100644 index 00000000000..b22f9dfb206 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go @@ -0,0 +1,2103 @@ +// +build linux + +package overlay + +import ( + "bytes" + "encoding/base64" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "strconv" + "strings" + "sync" + "syscall" + + graphdriver "github.com/containers/storage/drivers" + "github.com/containers/storage/drivers/overlayutils" + "github.com/containers/storage/drivers/quota" + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/chrootarchive" + "github.com/containers/storage/pkg/directory" + "github.com/containers/storage/pkg/fsutils" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/locker" + "github.com/containers/storage/pkg/mount" + "github.com/containers/storage/pkg/parsers" + "github.com/containers/storage/pkg/system" + "github.com/containers/storage/pkg/unshare" + units "github.com/docker/go-units" + "github.com/hashicorp/go-multierror" + digest "github.com/opencontainers/go-digest" + "github.com/opencontainers/runc/libcontainer/userns" + "github.com/opencontainers/selinux/go-selinux" + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/vbatts/tar-split/tar/storage" + "golang.org/x/sys/unix" +) + +var ( + // untar defines the untar method + untar = chrootarchive.UntarUncompressed +) + +const ( + defaultPerms = os.FileMode(0555) + selinuxLabelTest = "system_u:object_r:container_file_t:s0" +) + +// This backend uses the overlay union filesystem for containers +// with diff directories for each layer. + +// This version of the overlay driver requires at least kernel +// 4.0.0 in order to support mounting multiple diff directories. + +// Each container/image has at least a "diff" directory and "link" file. +// If there is also a "lower" file when there are diff layers +// below as well as "merged" and "work" directories. The "diff" directory +// has the upper layer of the overlay and is used to capture any +// changes to the layer. The "lower" file contains all the lower layer +// mounts separated by ":" and ordered from uppermost to lowermost +// layers. The overlay itself is mounted in the "merged" directory, +// and the "work" dir is needed for overlay to work. + +// The "link" file for each layer contains a unique string for the layer. +// Under the "l" directory at the root there will be a symbolic link +// with that unique string pointing the "diff" directory for the layer. +// The symbolic links are used to reference lower layers in the "lower" +// file and on mount. The links are used to shorten the total length +// of a layer reference without requiring changes to the layer identifier +// or root directory. Mounts are always done relative to root and +// referencing the symbolic links in order to ensure the number of +// lower directories can fit in a single page for making the mount +// syscall. A hard upper limit of 128 lower layers is enforced to ensure +// that mounts do not fail due to length. + +const ( + linkDir = "l" + lowerFile = "lower" + maxDepth = 128 + + // idLength represents the number of random characters + // which can be used to create the unique link identifier + // for every layer. If this value is too long then the + // page size limit for the mount command may be exceeded. + // The idLength should be selected such that following equation + // is true (512 is a buffer for label metadata). + // ((idLength + len(linkDir) + 1) * maxDepth) <= (pageSize - 512) + idLength = 26 +) + +type overlayOptions struct { + imageStores []string + layerStores []additionalLayerStore + quota quota.Quota + mountProgram string + skipMountHome bool + mountOptions string + ignoreChownErrors bool + forceMask *os.FileMode +} + +// Driver contains information about the home directory and the list of active mounts that are created using this driver. +type Driver struct { + name string + home string + runhome string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + ctr *graphdriver.RefCounter + quotaCtl *quota.Control + options overlayOptions + naiveDiff graphdriver.DiffDriver + supportsDType bool + supportsVolatile *bool + usingMetacopy bool + locker *locker.Locker +} + +type additionalLayerStore struct { + + // path is the directory where this store is available on the host. + path string + + // withReference is true when the store contains image reference information (base64-encoded) + // in its layer search path so the path to the diff will be + // /base64(reference)// + withReference bool +} + +var ( + backingFs = "" + projectQuotaSupported = false + + useNaiveDiffLock sync.Once + useNaiveDiffOnly bool +) + +func init() { + graphdriver.Register("overlay", Init) + graphdriver.Register("overlay2", Init) +} + +func hasMetacopyOption(opts []string) bool { + for _, s := range opts { + if s == "metacopy=on" { + return true + } + } + return false +} + +func stripOption(opts []string, option string) []string { + for i, s := range opts { + if s == option { + return stripOption(append(opts[:i], opts[i+1:]...), option) + } + } + return opts +} + +func hasVolatileOption(opts []string) bool { + for _, s := range opts { + if s == "volatile" { + return true + } + } + return false +} + +func getMountProgramFlagFile(path string) string { + return filepath.Join(path, ".has-mount-program") +} + +func checkSupportVolatile(home, runhome string) (bool, error) { + feature := fmt.Sprintf("volatile") + volatileCacheResult, _, err := cachedFeatureCheck(runhome, feature) + var usingVolatile bool + if err == nil { + if volatileCacheResult { + logrus.Debugf("Cached value indicated that volatile is being used") + } else { + logrus.Debugf("Cached value indicated that volatile is not being used") + } + usingVolatile = volatileCacheResult + } else { + usingVolatile, err = doesVolatile(home) + if err == nil { + if usingVolatile { + logrus.Debugf("overlay: test mount indicated that volatile is being used") + } else { + logrus.Debugf("overlay: test mount indicated that volatile is not being used") + } + if err = cachedFeatureRecord(runhome, feature, usingVolatile, ""); err != nil { + return false, errors.Wrap(err, "recording volatile-being-used status") + } + } + } + return usingVolatile, nil +} + +func checkAndRecordOverlaySupport(fsMagic graphdriver.FsMagic, home, runhome string) (bool, error) { + var supportsDType bool + + if os.Geteuid() != 0 { + return false, nil + } + + feature := "overlay" + overlayCacheResult, overlayCacheText, err := cachedFeatureCheck(runhome, feature) + if err == nil { + if overlayCacheResult { + logrus.Debugf("Cached value indicated that overlay is supported") + } else { + logrus.Debugf("Cached value indicated that overlay is not supported") + } + supportsDType = overlayCacheResult + if !supportsDType { + return false, errors.New(overlayCacheText) + } + } else { + supportsDType, err = supportsOverlay(home, fsMagic, 0, 0) + if err != nil { + os.Remove(filepath.Join(home, linkDir)) + os.Remove(home) + patherr, ok := err.(*os.PathError) + if ok && patherr.Err == syscall.ENOSPC { + return false, err + } + err = errors.Wrap(err, "kernel does not support overlay fs") + if err2 := cachedFeatureRecord(runhome, feature, false, err.Error()); err2 != nil { + return false, errors.Wrapf(err2, "recording overlay not being supported (%v)", err) + } + return false, err + } + if err = cachedFeatureRecord(runhome, feature, supportsDType, ""); err != nil { + return false, errors.Wrap(err, "recording overlay support status") + } + } + return supportsDType, nil +} + +func (d *Driver) getSupportsVolatile() (bool, error) { + if d.supportsVolatile != nil { + return *d.supportsVolatile, nil + } + supportsVolatile, err := checkSupportVolatile(d.home, d.runhome) + if err != nil { + return false, err + } + d.supportsVolatile = &supportsVolatile + return supportsVolatile, nil +} + +// isNetworkFileSystem checks if the specified file system is supported by native overlay +// as backing store when running in a user namespace. +func isNetworkFileSystem(fsMagic graphdriver.FsMagic) bool { + switch fsMagic { + // a bunch of network file systems... + case graphdriver.FsMagicNfsFs, graphdriver.FsMagicSmbFs, graphdriver.FsMagicAcfs, + graphdriver.FsMagicAfs, graphdriver.FsMagicCephFs, graphdriver.FsMagicCIFS, + graphdriver.FsMagicFHGFSFs, graphdriver.FsMagicGPFS, graphdriver.FsMagicIBRIX, + graphdriver.FsMagicKAFS, graphdriver.FsMagicLUSTRE, graphdriver.FsMagicNCP, + graphdriver.FsMagicNFSD, graphdriver.FsMagicOCFS2, graphdriver.FsMagicPANFS, + graphdriver.FsMagicPRLFS, graphdriver.FsMagicSMB2, graphdriver.FsMagicSNFS, + graphdriver.FsMagicVBOXSF, graphdriver.FsMagicVXFS: + return true + } + return false +} + +// Init returns the a native diff driver for overlay filesystem. +// If overlay filesystem is not supported on the host, a wrapped graphdriver.ErrNotSupported is returned as error. +// If an overlay filesystem is not supported over an existing filesystem then a wrapped graphdriver.ErrIncompatibleFS is returned. +func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) { + opts, err := parseOptions(options.DriverOptions) + if err != nil { + return nil, err + } + + fsMagic, err := graphdriver.GetFSMagic(home) + if err != nil { + return nil, err + } + if fsName, ok := graphdriver.FsNames[fsMagic]; ok { + backingFs = fsName + } + + if opts.mountProgram != "" { + if unshare.IsRootless() && isNetworkFileSystem(fsMagic) && opts.forceMask == nil { + m := os.FileMode(0700) + opts.forceMask = &m + logrus.Warnf("Network file system detected as backing store. Enforcing overlay option `force_mask=\"%o\"`. Add it to storage.conf to silence this warning", m) + } + + if err := ioutil.WriteFile(getMountProgramFlagFile(home), []byte("true"), 0600); err != nil { + return nil, err + } + } else { + if opts.forceMask != nil { + return nil, errors.New("'force_mask' is supported only with 'mount_program'") + } + // check if they are running over btrfs, aufs, zfs, overlay, or ecryptfs + switch fsMagic { + case graphdriver.FsMagicAufs, graphdriver.FsMagicZfs, graphdriver.FsMagicOverlay, graphdriver.FsMagicEcryptfs: + return nil, errors.Wrapf(graphdriver.ErrIncompatibleFS, "'overlay' is not supported over %s, a mount_program is required", backingFs) + } + if unshare.IsRootless() && isNetworkFileSystem(fsMagic) { + return nil, errors.Wrapf(graphdriver.ErrIncompatibleFS, "A network file system with user namespaces is not supported. Please use a mount_program") + } + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) + if err != nil { + return nil, err + } + + // Create the driver home dir + if err := idtools.MkdirAllAs(path.Join(home, linkDir), 0700, rootUID, rootGID); err != nil { + return nil, err + } + runhome := filepath.Join(options.RunRoot, filepath.Base(home)) + if err := idtools.MkdirAllAs(runhome, 0700, rootUID, rootGID); err != nil { + return nil, err + } + + var usingMetacopy bool + var supportsDType bool + var supportsVolatile *bool + if opts.mountProgram != "" { + supportsDType = true + t := true + supportsVolatile = &t + } else { + supportsDType, err = checkAndRecordOverlaySupport(fsMagic, home, runhome) + if err != nil { + return nil, err + } + feature := fmt.Sprintf("metacopy(%s)", opts.mountOptions) + metacopyCacheResult, _, err := cachedFeatureCheck(runhome, feature) + if err == nil { + if metacopyCacheResult { + logrus.Debugf("Cached value indicated that metacopy is being used") + } else { + logrus.Debugf("Cached value indicated that metacopy is not being used") + } + usingMetacopy = metacopyCacheResult + } else { + usingMetacopy, err = doesMetacopy(home, opts.mountOptions) + if err == nil { + if usingMetacopy { + logrus.Debugf("overlay: test mount indicated that metacopy is being used") + } else { + logrus.Debugf("overlay: test mount indicated that metacopy is not being used") + } + if err = cachedFeatureRecord(runhome, feature, usingMetacopy, ""); err != nil { + return nil, errors.Wrap(err, "recording metacopy-being-used status") + } + } else { + logrus.Infof("overlay: test mount did not indicate whether or not metacopy is being used: %v", err) + return nil, err + } + } + } + + if !opts.skipMountHome { + if err := mount.MakePrivate(home); err != nil { + return nil, err + } + } + + fileSystemType := graphdriver.FsMagicOverlay + if opts.mountProgram != "" { + fileSystemType = graphdriver.FsMagicFUSE + } + + d := &Driver{ + name: "overlay", + home: home, + runhome: runhome, + uidMaps: options.UIDMaps, + gidMaps: options.GIDMaps, + ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(fileSystemType)), + supportsDType: supportsDType, + usingMetacopy: usingMetacopy, + supportsVolatile: supportsVolatile, + locker: locker.New(), + options: *opts, + } + + d.naiveDiff = graphdriver.NewNaiveDiffDriver(d, graphdriver.NewNaiveLayerIDMapUpdater(d)) + if backingFs == "xfs" { + // Try to enable project quota support over xfs. + if d.quotaCtl, err = quota.NewControl(home); err == nil { + projectQuotaSupported = true + } else if opts.quota.Size > 0 || opts.quota.Inodes > 0 { + return nil, fmt.Errorf("Storage options overlay.size and overlay.inodes not supported. Filesystem does not support Project Quota: %v", err) + } + } else if opts.quota.Size > 0 || opts.quota.Inodes > 0 { + // if xfs is not the backing fs then error out if the storage-opt overlay.size is used. + return nil, fmt.Errorf("Storage option overlay.size and overlay.inodes only supported for backingFS XFS. Found %v", backingFs) + } + + logrus.Debugf("backingFs=%s, projectQuotaSupported=%v, useNativeDiff=%v, usingMetacopy=%v", backingFs, projectQuotaSupported, !d.useNaiveDiff(), d.usingMetacopy) + + return d, nil +} + +func parseOptions(options []string) (*overlayOptions, error) { + o := &overlayOptions{} + for _, option := range options { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return nil, err + } + trimkey := strings.ToLower(key) + trimkey = strings.TrimPrefix(trimkey, "overlay.") + trimkey = strings.TrimPrefix(trimkey, "overlay2.") + trimkey = strings.TrimPrefix(trimkey, ".") + switch trimkey { + case "override_kernel_check": + logrus.Debugf("overlay: override_kernel_check option was specified, but is no longer necessary") + case "mountopt": + o.mountOptions = val + case "size": + logrus.Debugf("overlay: size=%s", val) + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + o.quota.Size = uint64(size) + case "inodes": + logrus.Debugf("overlay: inodes=%s", val) + inodes, err := strconv.ParseUint(val, 10, 64) + if err != nil { + return nil, err + } + o.quota.Inodes = uint64(inodes) + case "imagestore", "additionalimagestore": + logrus.Debugf("overlay: imagestore=%s", val) + // Additional read only image stores to use for lower paths + if val == "" { + continue + } + for _, store := range strings.Split(val, ",") { + store = filepath.Clean(store) + if !filepath.IsAbs(store) { + return nil, fmt.Errorf("overlay: image path %q is not absolute. Can not be relative", store) + } + st, err := os.Stat(store) + if err != nil { + return nil, fmt.Errorf("overlay: can't stat imageStore dir %s: %v", store, err) + } + if !st.IsDir() { + return nil, fmt.Errorf("overlay: image path %q must be a directory", store) + } + o.imageStores = append(o.imageStores, store) + } + case "additionallayerstore": + logrus.Debugf("overlay: additionallayerstore=%s", val) + // Additional read only layer stores to use for lower paths + if val == "" { + continue + } + for _, lstore := range strings.Split(val, ",") { + elems := strings.Split(lstore, ":") + lstore = filepath.Clean(elems[0]) + if !filepath.IsAbs(lstore) { + return nil, fmt.Errorf("overlay: additionallayerstore path %q is not absolute. Can not be relative", lstore) + } + st, err := os.Stat(lstore) + if err != nil { + return nil, errors.Wrap(err, "overlay: can't stat additionallayerstore dir") + } + if !st.IsDir() { + return nil, fmt.Errorf("overlay: additionallayerstore path %q must be a directory", lstore) + } + var withReference bool + for _, e := range elems[1:] { + switch e { + case "ref": + if withReference { + return nil, fmt.Errorf("overlay: additionallayerstore config of %q contains %q option twice", lstore, e) + } + withReference = true + default: + return nil, fmt.Errorf("overlay: additionallayerstore config %q contains unknown option %q", lstore, e) + } + } + o.layerStores = append(o.layerStores, additionalLayerStore{ + path: lstore, + withReference: withReference, + }) + } + case "mount_program": + logrus.Debugf("overlay: mount_program=%s", val) + if val != "" { + _, err := os.Stat(val) + if err != nil { + return nil, errors.Wrapf(err, "overlay: can't stat program %q", val) + } + } + o.mountProgram = val + case "skip_mount_home": + logrus.Debugf("overlay: skip_mount_home=%s", val) + o.skipMountHome, err = strconv.ParseBool(val) + case "ignore_chown_errors": + logrus.Debugf("overlay: ignore_chown_errors=%s", val) + o.ignoreChownErrors, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + case "force_mask": + logrus.Debugf("overlay: force_mask=%s", val) + var mask int64 + switch val { + case "shared": + mask = 0755 + case "private": + mask = 0700 + default: + mask, err = strconv.ParseInt(val, 8, 32) + if err != nil { + return nil, err + } + } + m := os.FileMode(mask) + o.forceMask = &m + default: + return nil, fmt.Errorf("overlay: Unknown option %s", key) + } + } + return o, nil +} + +func cachedFeatureSet(feature string, set bool) string { + if set { + return fmt.Sprintf("%s-true", feature) + } + return fmt.Sprintf("%s-false", feature) +} + +func cachedFeatureCheck(runhome, feature string) (supported bool, text string, err error) { + content, err := ioutil.ReadFile(filepath.Join(runhome, cachedFeatureSet(feature, true))) + if err == nil { + return true, string(content), nil + } + content, err = ioutil.ReadFile(filepath.Join(runhome, cachedFeatureSet(feature, false))) + if err == nil { + return false, string(content), nil + } + return false, "", err +} + +func cachedFeatureRecord(runhome, feature string, supported bool, text string) (err error) { + f, err := os.Create(filepath.Join(runhome, cachedFeatureSet(feature, supported))) + if f != nil { + if text != "" { + fmt.Fprintf(f, "%s", text) + } + f.Close() + } + return err +} + +func SupportsNativeOverlay(graphroot, rundir string) (bool, error) { + if os.Geteuid() != 0 || graphroot == "" || rundir == "" { + return false, nil + } + + home := filepath.Join(graphroot, "overlay") + runhome := filepath.Join(rundir, "overlay") + + var contents string + flagContent, err := ioutil.ReadFile(getMountProgramFlagFile(home)) + if err == nil { + contents = strings.TrimSpace(string(flagContent)) + } + switch contents { + case "true": + logrus.Debugf("overlay: storage already configured with a mount-program") + return false, nil + default: + needsMountProgram, err := scanForMountProgramIndicators(home) + if err != nil && !os.IsNotExist(err) { + return false, err + } + if err := ioutil.WriteFile(getMountProgramFlagFile(home), []byte(fmt.Sprintf("%t", needsMountProgram)), 0600); err != nil && !os.IsNotExist(err) { + return false, err + } + if needsMountProgram { + return false, nil + } + // fall through to check if we find ourselves needing to use a + // mount program now + case "false": + } + + for _, dir := range []string{home, runhome} { + if _, err := os.Stat(dir); err != nil { + _ = idtools.MkdirAllAs(dir, 0700, 0, 0) + } + } + + fsMagic, err := graphdriver.GetFSMagic(home) + if err != nil { + return false, err + } + + supportsDType, _ := checkAndRecordOverlaySupport(fsMagic, home, runhome) + return supportsDType, nil +} + +func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGID int) (supportsDType bool, err error) { + // We can try to modprobe overlay first + + exec.Command("modprobe", "overlay").Run() + + logLevel := logrus.ErrorLevel + if unshare.IsRootless() { + logLevel = logrus.DebugLevel + } + + layerDir, err := ioutil.TempDir(home, "compat") + if err != nil { + patherr, ok := err.(*os.PathError) + if ok && patherr.Err == syscall.ENOSPC { + return false, err + } + } + if err == nil { + // Check if reading the directory's contents populates the d_type field, which is required + // for proper operation of the overlay filesystem. + supportsDType, err = fsutils.SupportsDType(layerDir) + if err != nil { + return false, err + } + if !supportsDType { + return false, overlayutils.ErrDTypeNotSupported("overlay", backingFs) + } + + // Try a test mount in the specific location we're looking at using. + mergedDir := filepath.Join(layerDir, "merged") + lower1Dir := filepath.Join(layerDir, "lower1") + lower2Dir := filepath.Join(layerDir, "lower2") + upperDir := filepath.Join(layerDir, "upper") + workDir := filepath.Join(layerDir, "work") + defer func() { + // Permitted to fail, since the various subdirectories + // can be empty or not even there, and the home might + // legitimately be not empty + _ = unix.Unmount(mergedDir, unix.MNT_DETACH) + _ = os.RemoveAll(layerDir) + _ = os.Remove(home) + }() + _ = idtools.MkdirAs(mergedDir, 0700, rootUID, rootGID) + _ = idtools.MkdirAs(lower1Dir, 0700, rootUID, rootGID) + _ = idtools.MkdirAs(lower2Dir, 0700, rootUID, rootGID) + _ = idtools.MkdirAs(upperDir, 0700, rootUID, rootGID) + _ = idtools.MkdirAs(workDir, 0700, rootUID, rootGID) + flags := fmt.Sprintf("lowerdir=%s:%s,upperdir=%s,workdir=%s", lower1Dir, lower2Dir, upperDir, workDir) + if selinux.GetEnabled() && + selinux.SecurityCheckContext(selinuxLabelTest) == nil { + // Linux 5.11 introduced unprivileged overlay mounts but it has an issue + // when used together with selinux labels. + // Check that overlay supports selinux labels as well. + flags = label.FormatMountLabel(flags, selinuxLabelTest) + } + if unshare.IsRootless() { + flags = fmt.Sprintf("%s,userxattr", flags) + } + if err := syscall.Mknod(filepath.Join(upperDir, "whiteout"), syscall.S_IFCHR|0600, int(unix.Mkdev(0, 0))); err != nil { + logrus.Debugf("Unable to create kernel-style whiteout: %v", err) + return supportsDType, errors.Wrapf(err, "unable to create kernel-style whiteout") + } + + if len(flags) < unix.Getpagesize() { + err := unix.Mount("overlay", mergedDir, "overlay", 0, flags) + if err == nil { + logrus.Debugf("overlay: test mount with multiple lowers succeeded") + return supportsDType, nil + } + logrus.Debugf("overlay: test mount with multiple lowers failed %v", err) + } + flags = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lower1Dir, upperDir, workDir) + if selinux.GetEnabled() { + flags = label.FormatMountLabel(flags, selinuxLabelTest) + } + if len(flags) < unix.Getpagesize() { + err := unix.Mount("overlay", mergedDir, "overlay", 0, flags) + if err == nil { + logrus.StandardLogger().Logf(logLevel, "overlay: test mount with multiple lowers failed, but succeeded with a single lower") + return supportsDType, errors.Wrap(graphdriver.ErrNotSupported, "kernel too old to provide multiple lowers feature for overlay") + } + logrus.Debugf("overlay: test mount with a single lower failed %v", err) + } + logrus.StandardLogger().Logf(logLevel, "'overlay' is not supported over %s at %q", backingFs, home) + return supportsDType, errors.Wrapf(graphdriver.ErrIncompatibleFS, "'overlay' is not supported over %s at %q", backingFs, home) + } + + logrus.StandardLogger().Logf(logLevel, "'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.") + return supportsDType, errors.Wrap(graphdriver.ErrNotSupported, "'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.") +} + +func (d *Driver) useNaiveDiff() bool { + useNaiveDiffLock.Do(func() { + if d.options.mountProgram != "" { + useNaiveDiffOnly = true + return + } + feature := fmt.Sprintf("native-diff(%s)", d.options.mountOptions) + nativeDiffCacheResult, nativeDiffCacheText, err := cachedFeatureCheck(d.runhome, feature) + if err == nil { + if nativeDiffCacheResult { + logrus.Debugf("Cached value indicated that native-diff is usable") + } else { + logrus.Debugf("Cached value indicated that native-diff is not being used") + logrus.Info(nativeDiffCacheText) + } + useNaiveDiffOnly = !nativeDiffCacheResult + return + } + if err := doesSupportNativeDiff(d.home, d.options.mountOptions); err != nil { + nativeDiffCacheText = fmt.Sprintf("Not using native diff for overlay, this may cause degraded performance for building images: %v", err) + logrus.Info(nativeDiffCacheText) + useNaiveDiffOnly = true + } + cachedFeatureRecord(d.runhome, feature, !useNaiveDiffOnly, nativeDiffCacheText) + }) + return useNaiveDiffOnly +} + +func (d *Driver) String() string { + return d.name +} + +// Status returns current driver information in a two dimensional string array. +// Output contains "Backing Filesystem" used in this implementation. +func (d *Driver) Status() [][2]string { + return [][2]string{ + {"Backing Filesystem", backingFs}, + {"Supports d_type", strconv.FormatBool(d.supportsDType)}, + {"Native Overlay Diff", strconv.FormatBool(!d.useNaiveDiff())}, + {"Using metacopy", strconv.FormatBool(d.usingMetacopy)}, + } +} + +// Metadata returns meta data about the overlay driver such as +// LowerDir, UpperDir, WorkDir and MergeDir used to store data. +func (d *Driver) Metadata(id string) (map[string]string, error) { + dir := d.dir(id) + if _, err := os.Stat(dir); err != nil { + return nil, err + } + + metadata := map[string]string{ + "WorkDir": path.Join(dir, "work"), + "MergedDir": path.Join(dir, "merged"), + "UpperDir": path.Join(dir, "diff"), + } + + lowerDirs, err := d.getLowerDirs(id) + if err != nil { + return nil, err + } + if len(lowerDirs) > 0 { + metadata["LowerDir"] = strings.Join(lowerDirs, ":") + } + + return metadata, nil +} + +// Cleanup any state created by overlay which should be cleaned when daemon +// is being shutdown. For now, we just have to unmount the bind mounted +// we had created. +func (d *Driver) Cleanup() error { + _ = os.RemoveAll(d.getStagingDir()) + return mount.Unmount(d.home) +} + +// LookupAdditionalLayer looks up additional layer store by the specified +// digest and ref and returns an object representing that layer. +// This API is experimental and can be changed without bumping the major version number. +// TODO: to remove the comment once it's no longer experimental. +func (d *Driver) LookupAdditionalLayer(dgst digest.Digest, ref string) (graphdriver.AdditionalLayer, error) { + l, err := d.getAdditionalLayerPath(dgst, ref) + if err != nil { + return nil, err + } + // Tell the additional layer store that we use this layer. + // This will increase reference counter on the store's side. + // This will be decreased on Release() method. + notifyUseAdditionalLayer(l) + return &additionalLayer{ + path: l, + d: d, + }, nil +} + +// LookupAdditionalLayerByID looks up additional layer store by the specified +// ID and returns an object representing that layer. +// This API is experimental and can be changed without bumping the major version number. +// TODO: to remove the comment once it's no longer experimental. +func (d *Driver) LookupAdditionalLayerByID(id string) (graphdriver.AdditionalLayer, error) { + l, err := d.getAdditionalLayerPathByID(id) + if err != nil { + return nil, err + } + // Tell the additional layer store that we use this layer. + // This will increase reference counter on the store's side. + // This will be decreased on Release() method. + notifyUseAdditionalLayer(l) + return &additionalLayer{ + path: l, + d: d, + }, nil +} + +// CreateFromTemplate creates a layer with the same contents and parent as another layer. +func (d *Driver) CreateFromTemplate(id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *graphdriver.CreateOpts, readWrite bool) error { + if readWrite { + return d.CreateReadWrite(id, template, opts) + } + return d.Create(id, template, opts) +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + if opts != nil && len(opts.StorageOpt) != 0 && !projectQuotaSupported { + return fmt.Errorf("--storage-opt is supported only for overlay over xfs with 'pquota' mount option") + } + + if opts == nil { + opts = &graphdriver.CreateOpts{ + StorageOpt: map[string]string{}, + } + } + + if _, ok := opts.StorageOpt["size"]; !ok { + if opts.StorageOpt == nil { + opts.StorageOpt = map[string]string{} + } + opts.StorageOpt["size"] = strconv.FormatUint(d.options.quota.Size, 10) + } + + if _, ok := opts.StorageOpt["inodes"]; !ok { + if opts.StorageOpt == nil { + opts.StorageOpt = map[string]string{} + } + opts.StorageOpt["inodes"] = strconv.FormatUint(d.options.quota.Inodes, 10) + } + + return d.create(id, parent, opts, false) +} + +// Create is used to create the upper, lower, and merge directories required for overlay fs for a given id. +// The parent filesystem is used to configure these directories for the overlay. +func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr error) { + if opts != nil && len(opts.StorageOpt) != 0 { + if _, ok := opts.StorageOpt["size"]; ok { + return fmt.Errorf("--storage-opt size is only supported for ReadWrite Layers") + } + + if _, ok := opts.StorageOpt["inodes"]; ok { + return fmt.Errorf("--storage-opt inodes is only supported for ReadWrite Layers") + } + } + + return d.create(id, parent, opts, true) +} + +func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disableQuota bool) (retErr error) { + dir := d.dir(id) + + uidMaps := d.uidMaps + gidMaps := d.gidMaps + + if opts != nil && opts.IDMappings != nil { + uidMaps = opts.IDMappings.UIDs() + gidMaps = opts.IDMappings.GIDs() + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return err + } + + idPair := idtools.IDPair{ + UID: rootUID, + GID: rootGID, + } + + // Make the link directory if it does not exist + if err := idtools.MkdirAllAndChownNew(path.Join(d.home, linkDir), 0700, idPair); err != nil { + return err + } + + if err := idtools.MkdirAllAndChownNew(path.Dir(dir), 0700, idPair); err != nil { + return err + } + if parent != "" { + st, err := system.Stat(d.dir(parent)) + if err != nil { + return err + } + rootUID = int(st.UID()) + rootGID = int(st.GID()) + } + if err := idtools.MkdirAllAndChownNew(dir, 0700, idPair); err != nil { + return err + } + + defer func() { + // Clean up on failure + if retErr != nil { + os.RemoveAll(dir) + } + }() + + if d.quotaCtl != nil && !disableQuota { + quota := quota.Quota{} + if opts != nil && len(opts.StorageOpt) > 0 { + driver := &Driver{} + if err := d.parseStorageOpt(opts.StorageOpt, driver); err != nil { + return err + } + if driver.options.quota.Size > 0 { + quota.Size = driver.options.quota.Size + } + if driver.options.quota.Inodes > 0 { + quota.Inodes = driver.options.quota.Inodes + } + } + // Set container disk quota limit + // If it is set to 0, we will track the disk usage, but not enforce a limit + if err := d.quotaCtl.SetQuota(dir, quota); err != nil { + return err + } + } + + perms := defaultPerms + if d.options.forceMask != nil { + perms = *d.options.forceMask + } + if parent != "" { + st, err := system.Stat(filepath.Join(d.dir(parent), "diff")) + if err != nil { + return err + } + perms = os.FileMode(st.Mode()) + } + + if err := idtools.MkdirAs(path.Join(dir, "diff"), perms, rootUID, rootGID); err != nil { + return err + } + + lid := generateID(idLength) + if err := os.Symlink(path.Join("..", id, "diff"), path.Join(d.home, linkDir, lid)); err != nil { + return err + } + + // Write link id to link file + if err := ioutil.WriteFile(path.Join(dir, "link"), []byte(lid), 0644); err != nil { + return err + } + + if err := idtools.MkdirAs(path.Join(dir, "work"), 0700, rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(path.Join(dir, "merged"), 0700, rootUID, rootGID); err != nil { + return err + } + + // if no parent directory, create a dummy lower directory and skip writing a "lowers" file + if parent == "" { + return idtools.MkdirAs(path.Join(dir, "empty"), 0700, rootUID, rootGID) + } + + lower, err := d.getLower(parent) + if err != nil { + return err + } + if lower != "" { + if err := ioutil.WriteFile(path.Join(dir, lowerFile), []byte(lower), 0666); err != nil { + return err + } + } + + return nil +} + +// Parse overlay storage options +func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) error { + // Read size to set the disk project quota per container + for key, val := range storageOpt { + key := strings.ToLower(key) + switch key { + case "size": + size, err := units.RAMInBytes(val) + if err != nil { + return err + } + driver.options.quota.Size = uint64(size) + case "inodes": + inodes, err := strconv.ParseUint(val, 10, 64) + if err != nil { + return err + } + driver.options.quota.Inodes = uint64(inodes) + default: + return fmt.Errorf("Unknown option %s", key) + } + } + + return nil +} + +func (d *Driver) getLower(parent string) (string, error) { + parentDir := d.dir(parent) + + // Ensure parent exists + if _, err := os.Lstat(parentDir); err != nil { + return "", err + } + + // Read Parent link fileA + parentLink, err := ioutil.ReadFile(path.Join(parentDir, "link")) + if err != nil { + if !os.IsNotExist(err) { + return "", err + } + logrus.Warnf("Can't read parent link %q because it does not exist. Going through storage to recreate the missing links.", path.Join(parentDir, "link")) + if err := d.recreateSymlinks(); err != nil { + return "", errors.Wrap(err, "recreating the links") + } + parentLink, err = ioutil.ReadFile(path.Join(parentDir, "link")) + if err != nil { + return "", err + } + } + lowers := []string{path.Join(linkDir, string(parentLink))} + + parentLower, err := ioutil.ReadFile(path.Join(parentDir, lowerFile)) + if err == nil { + parentLowers := strings.Split(string(parentLower), ":") + lowers = append(lowers, parentLowers...) + } + return strings.Join(lowers, ":"), nil +} + +func (d *Driver) dir(id string) string { + p, _ := d.dir2(id) + return p +} + +func (d *Driver) dir2(id string) (string, bool) { + newpath := path.Join(d.home, id) + if _, err := os.Stat(newpath); err != nil { + for _, p := range d.AdditionalImageStores() { + l := path.Join(p, d.name, id) + _, err = os.Stat(l) + if err == nil { + return l, true + } + } + } + return newpath, false +} + +func (d *Driver) getLowerDirs(id string) ([]string, error) { + var lowersArray []string + lowers, err := ioutil.ReadFile(path.Join(d.dir(id), lowerFile)) + if err == nil { + for _, s := range strings.Split(string(lowers), ":") { + lower := d.dir(s) + lp, err := os.Readlink(lower) + // if the link does not exist, we lost the symlinks during a sudden reboot. + // Let's go ahead and recreate those symlinks. + if err != nil { + if os.IsNotExist(err) { + logrus.Warnf("Can't read link %q because it does not exist. A storage corruption might have occurred, attempting to recreate the missing symlinks. It might be best wipe the storage to avoid further errors due to storage corruption.", lower) + if err := d.recreateSymlinks(); err != nil { + return nil, fmt.Errorf("recreating the missing symlinks: %v", err) + } + // let's call Readlink on lower again now that we have recreated the missing symlinks + lp, err = os.Readlink(lower) + if err != nil { + return nil, err + } + } else { + return nil, err + } + } + lowersArray = append(lowersArray, path.Clean(d.dir(path.Join("link", lp)))) + } + } else if !os.IsNotExist(err) { + return nil, err + } + return lowersArray, nil +} + +func (d *Driver) optsAppendMappings(opts string, uidMaps, gidMaps []idtools.IDMap) string { + if uidMaps == nil { + uidMaps = d.uidMaps + } + if gidMaps == nil { + gidMaps = d.gidMaps + } + if uidMaps != nil { + var uids, gids bytes.Buffer + if len(uidMaps) == 1 && uidMaps[0].Size == 1 { + uids.WriteString(fmt.Sprintf("squash_to_uid=%d", uidMaps[0].HostID)) + } else { + uids.WriteString("uidmapping=") + for _, i := range uidMaps { + if uids.Len() > 0 { + uids.WriteString(":") + } + uids.WriteString(fmt.Sprintf("%d:%d:%d", i.ContainerID, i.HostID, i.Size)) + } + } + if len(gidMaps) == 1 && gidMaps[0].Size == 1 { + gids.WriteString(fmt.Sprintf("squash_to_gid=%d", gidMaps[0].HostID)) + } else { + gids.WriteString("gidmapping=") + for _, i := range gidMaps { + if gids.Len() > 0 { + gids.WriteString(":") + } + gids.WriteString(fmt.Sprintf("%d:%d:%d", i.ContainerID, i.HostID, i.Size)) + } + } + return fmt.Sprintf("%s,%s,%s", opts, uids.String(), gids.String()) + } + return opts +} + +// Remove cleans the directories that are created for this id. +func (d *Driver) Remove(id string) error { + d.locker.Lock(id) + defer d.locker.Unlock(id) + + dir := d.dir(id) + lid, err := ioutil.ReadFile(path.Join(dir, "link")) + if err == nil { + if err := os.RemoveAll(path.Join(d.home, linkDir, string(lid))); err != nil { + logrus.Debugf("Failed to remove link: %v", err) + } + } + + d.releaseAdditionalLayerByID(id) + + if err := system.EnsureRemoveAll(dir); err != nil && !os.IsNotExist(err) { + return err + } + return nil +} + +// recreateSymlinks goes through the driver's home directory and checks if the diff directory +// under each layer has a symlink created for it under the linkDir. If the symlink does not +// exist, it creates them +func (d *Driver) recreateSymlinks() error { + // List all the directories under the home directory + dirs, err := ioutil.ReadDir(d.home) + if err != nil { + return fmt.Errorf("reading driver home directory %q: %v", d.home, err) + } + linksDir := filepath.Join(d.home, "l") + // This makes the link directory if it doesn't exist + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + return err + } + if err := idtools.MkdirAllAs(path.Join(d.home, linkDir), 0700, rootUID, rootGID); err != nil { + return err + } + // Keep looping as long as we take some corrective action in each iteration + var errs *multierror.Error + madeProgress := true + for madeProgress { + errs = nil + madeProgress = false + // Check that for each layer, there's a link in "l" with the name in + // the layer's "link" file that points to the layer's "diff" directory. + for _, dir := range dirs { + // Skip over the linkDir and anything that is not a directory + if dir.Name() == linkDir || !dir.Mode().IsDir() { + continue + } + // Read the "link" file under each layer to get the name of the symlink + data, err := ioutil.ReadFile(path.Join(d.dir(dir.Name()), "link")) + if err != nil { + errs = multierror.Append(errs, errors.Wrapf(err, "reading name of symlink for %q", dir.Name())) + continue + } + linkPath := path.Join(d.home, linkDir, strings.Trim(string(data), "\n")) + // Check if the symlink exists, and if it doesn't, create it again with the + // name we got from the "link" file + _, err = os.Lstat(linkPath) + if err != nil && os.IsNotExist(err) { + if err := os.Symlink(path.Join("..", dir.Name(), "diff"), linkPath); err != nil { + errs = multierror.Append(errs, err) + continue + } + madeProgress = true + } else if err != nil { + errs = multierror.Append(errs, err) + continue + } + } + // Now check if we somehow lost a "link" file, by making sure + // that each symlink we have corresponds to one. + links, err := ioutil.ReadDir(linksDir) + if err != nil { + errs = multierror.Append(errs, err) + continue + } + // Go through all of the symlinks in the "l" directory + for _, link := range links { + // Read the symlink's target, which should be "../$layer/diff" + target, err := os.Readlink(filepath.Join(linksDir, link.Name())) + if err != nil { + errs = multierror.Append(errs, err) + continue + } + targetComponents := strings.Split(target, string(os.PathSeparator)) + if len(targetComponents) != 3 || targetComponents[0] != ".." || targetComponents[2] != "diff" { + errs = multierror.Append(errs, errors.Errorf("link target of %q looks weird: %q", link, target)) + // force the link to be recreated on the next pass + os.Remove(filepath.Join(linksDir, link.Name())) + madeProgress = true + continue + } + // Reconstruct the name of the target's link file and check that + // it has the basename of our symlink in it. + targetID := targetComponents[1] + linkFile := filepath.Join(d.dir(targetID), "link") + data, err := ioutil.ReadFile(linkFile) + if err != nil || string(data) != link.Name() { + if err := ioutil.WriteFile(linkFile, []byte(link.Name()), 0644); err != nil { + errs = multierror.Append(errs, errors.Wrapf(err, "correcting link for layer %s", targetID)) + continue + } + madeProgress = true + } + } + } + if errs != nil { + return errs.ErrorOrNil() + } + return nil +} + +// Get creates and mounts the required file system for the given id and returns the mount path. +func (d *Driver) Get(id string, options graphdriver.MountOpts) (_ string, retErr error) { + return d.get(id, false, options) +} + +func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountOpts) (_ string, retErr error) { + d.locker.Lock(id) + defer d.locker.Unlock(id) + dir, inAdditionalStore := d.dir2(id) + if _, err := os.Stat(dir); err != nil { + return "", err + } + readWrite := !inAdditionalStore + + if !d.SupportsShifting() || options.DisableShifting { + disableShifting = true + } + + logLevel := logrus.WarnLevel + if unshare.IsRootless() { + logLevel = logrus.DebugLevel + } + optsList := options.Options + if len(optsList) == 0 { + optsList = strings.Split(d.options.mountOptions, ",") + } else { + // If metacopy=on is present in d.options.mountOptions it must be present in the mount + // options otherwise the kernel refuses to follow the metacopy xattr. + if hasMetacopyOption(strings.Split(d.options.mountOptions, ",")) && !hasMetacopyOption(options.Options) { + if d.usingMetacopy { + logrus.StandardLogger().Logf(logrus.DebugLevel, "Adding metacopy option, configured globally") + optsList = append(optsList, "metacopy=on") + } + } + } + if !d.usingMetacopy { + if hasMetacopyOption(optsList) { + logrus.StandardLogger().Logf(logLevel, "Ignoring global metacopy option, not supported with booted kernel") + } + optsList = stripOption(optsList, "metacopy=on") + } + + for _, o := range optsList { + if o == "ro" { + readWrite = false + break + } + } + + lowers, err := ioutil.ReadFile(path.Join(dir, lowerFile)) + if err != nil && !os.IsNotExist(err) { + return "", err + } + splitLowers := strings.Split(string(lowers), ":") + if len(splitLowers) > maxDepth { + return "", errors.New("max depth exceeded") + } + + // absLowers is the list of lowers as absolute paths, which works well with additional stores. + absLowers := []string{} + // relLowers is the list of lowers as paths relative to the driver's home directory. + relLowers := []string{} + + // Check if $link/../diff{1-*} exist. If they do, add them, in order, as the front of the lowers + // lists that we're building. "diff" itself is the upper, so it won't be in the lists. + link, err := ioutil.ReadFile(path.Join(dir, "link")) + if err != nil { + if !os.IsNotExist(err) { + return "", err + } + logrus.Warnf("Can't read parent link %q because it does not exist. Going through storage to recreate the missing links.", path.Join(dir, "link")) + if err := d.recreateSymlinks(); err != nil { + return "", errors.Wrap(err, "recreating the links") + } + link, err = ioutil.ReadFile(path.Join(dir, "link")) + if err != nil { + return "", err + } + } + diffN := 1 + perms := defaultPerms + if d.options.forceMask != nil { + perms = *d.options.forceMask + } + permsKnown := false + st, err := os.Stat(filepath.Join(dir, nameWithSuffix("diff", diffN))) + if err == nil { + perms = os.FileMode(st.Mode()) + permsKnown = true + } + for err == nil { + absLowers = append(absLowers, filepath.Join(dir, nameWithSuffix("diff", diffN))) + relLowers = append(relLowers, dumbJoin(string(link), "..", nameWithSuffix("diff", diffN))) + diffN++ + st, err = os.Stat(filepath.Join(dir, nameWithSuffix("diff", diffN))) + if err == nil && !permsKnown { + perms = os.FileMode(st.Mode()) + permsKnown = true + } + } + + // For each lower, resolve its path, and append it and any additional diffN + // directories to the lowers list. + for _, l := range splitLowers { + if l == "" { + continue + } + lower := "" + newpath := path.Join(d.home, l) + if st, err := os.Stat(newpath); err != nil { + for _, p := range d.AdditionalImageStores() { + lower = path.Join(p, d.name, l) + if st2, err2 := os.Stat(lower); err2 == nil { + if !permsKnown { + perms = os.FileMode(st2.Mode()) + permsKnown = true + } + break + } + lower = "" + } + // if it is a "not found" error, that means the symlinks were lost in a sudden reboot + // so call the recreateSymlinks function to go through all the layer dirs and recreate + // the symlinks with the name from their respective "link" files + if lower == "" && os.IsNotExist(err) { + logrus.Warnf("Can't stat lower layer %q because it does not exist. Going through storage to recreate the missing symlinks.", newpath) + if err := d.recreateSymlinks(); err != nil { + return "", fmt.Errorf("Recreating the missing symlinks: %v", err) + } + lower = newpath + } else if lower == "" { + return "", fmt.Errorf("Can't stat lower layer %q: %v", newpath, err) + } + } else { + if !permsKnown { + perms = os.FileMode(st.Mode()) + permsKnown = true + } + lower = newpath + } + absLowers = append(absLowers, lower) + relLowers = append(relLowers, l) + diffN = 1 + _, err = os.Stat(dumbJoin(lower, "..", nameWithSuffix("diff", diffN))) + for err == nil { + absLowers = append(absLowers, dumbJoin(lower, "..", nameWithSuffix("diff", diffN))) + relLowers = append(relLowers, dumbJoin(l, "..", nameWithSuffix("diff", diffN))) + diffN++ + _, err = os.Stat(dumbJoin(lower, "..", nameWithSuffix("diff", diffN))) + } + } + + if len(absLowers) == 0 { + absLowers = append(absLowers, path.Join(dir, "empty")) + relLowers = append(relLowers, path.Join(id, "empty")) + } + // user namespace requires this to move a directory from lower to upper. + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + return "", err + } + diffDir := path.Join(dir, "diff") + if err := idtools.MkdirAllAs(diffDir, perms, rootUID, rootGID); err != nil { + return "", err + } + + mergedDir := path.Join(dir, "merged") + // Create the driver merged dir + if err := idtools.MkdirAs(mergedDir, 0700, rootUID, rootGID); err != nil && !os.IsExist(err) { + return "", err + } + if count := d.ctr.Increment(mergedDir); count > 1 { + return mergedDir, nil + } + defer func() { + if retErr != nil { + if c := d.ctr.Decrement(mergedDir); c <= 0 { + if mntErr := unix.Unmount(mergedDir, 0); mntErr != nil { + logrus.Errorf("Unmounting %v: %v", mergedDir, mntErr) + } + } + } + }() + + workdir := path.Join(dir, "work") + + var opts string + if readWrite { + opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(absLowers, ":"), diffDir, workdir) + } else { + opts = fmt.Sprintf("lowerdir=%s:%s", diffDir, strings.Join(absLowers, ":")) + } + if len(optsList) > 0 { + opts = fmt.Sprintf("%s,%s", strings.Join(optsList, ","), opts) + } + + if d.options.mountProgram == "" && unshare.IsRootless() { + opts = fmt.Sprintf("%s,userxattr", opts) + } + + // If "volatile" is not supported by the file system, just ignore the request + if options.Volatile && !hasVolatileOption(strings.Split(opts, ",")) { + supported, err := d.getSupportsVolatile() + if err != nil { + return "", err + } + if supported { + opts = fmt.Sprintf("%s,volatile", opts) + } + } + + mountData := label.FormatMountLabel(opts, options.MountLabel) + mountFunc := unix.Mount + mountTarget := mergedDir + + pageSize := unix.Getpagesize() + + // Use relative paths and mountFrom when the mount data has exceeded + // the page size. The mount syscall fails if the mount data cannot + // fit within a page and relative links make the mount data much + // smaller at the expense of requiring a fork exec to chroot. + if d.options.mountProgram != "" { + mountFunc = func(source string, target string, mType string, flags uintptr, label string) error { + if !disableShifting { + label = d.optsAppendMappings(label, options.UidMaps, options.GidMaps) + } + + // if forceMask is in place, tell fuse-overlayfs to write the permissions mask to an unprivileged xattr as well. + if d.options.forceMask != nil { + label = label + ",xattr_permissions=2" + } + + mountProgram := exec.Command(d.options.mountProgram, "-o", label, target) + mountProgram.Dir = d.home + var b bytes.Buffer + mountProgram.Stderr = &b + err := mountProgram.Run() + if err != nil { + output := b.String() + if output == "" { + output = "" + } + return errors.Wrapf(err, "using mount program %s: %s", d.options.mountProgram, output) + } + return nil + } + } else if len(mountData) > pageSize { + workdir = path.Join(id, "work") + //FIXME: We need to figure out to get this to work with additional stores + if readWrite { + diffDir := path.Join(id, "diff") + opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(relLowers, ":"), diffDir, workdir) + } else { + opts = fmt.Sprintf("lowerdir=%s", strings.Join(absLowers, ":")) + } + mountData = label.FormatMountLabel(opts, options.MountLabel) + if len(mountData) > pageSize { + return "", fmt.Errorf("cannot mount layer, mount label %q too large %d > page size %d", options.MountLabel, len(mountData), pageSize) + } + mountFunc = func(source string, target string, mType string, flags uintptr, label string) error { + return mountFrom(d.home, source, target, mType, flags, label) + } + mountTarget = path.Join(id, "merged") + } + + // overlay has a check in place to prevent mounting the same file system twice + // if volatile was already specified. + err = os.RemoveAll(filepath.Join(workdir, "work/incompat/volatile")) + if err != nil && !os.IsNotExist(err) { + return "", err + } + + flags, data := mount.ParseOptions(mountData) + logrus.Debugf("overlay: mount_data=%s", mountData) + if err := mountFunc("overlay", mountTarget, "overlay", uintptr(flags), data); err != nil { + return "", fmt.Errorf("creating overlay mount to %s, mount_data=%q: %v", mountTarget, mountData, err) + } + + return mergedDir, nil +} + +// Put unmounts the mount path created for the give id. +func (d *Driver) Put(id string) error { + d.locker.Lock(id) + defer d.locker.Unlock(id) + dir := d.dir(id) + if _, err := os.Stat(dir); err != nil { + return err + } + mountpoint := path.Join(dir, "merged") + if count := d.ctr.Decrement(mountpoint); count > 0 { + return nil + } + if _, err := ioutil.ReadFile(path.Join(dir, lowerFile)); err != nil && !os.IsNotExist(err) { + return err + } + + unmounted := false + + if d.options.mountProgram != "" { + // Attempt to unmount the FUSE mount using either fusermount or fusermount3. + // If they fail, fallback to unix.Unmount + for _, v := range []string{"fusermount3", "fusermount"} { + err := exec.Command(v, "-u", mountpoint).Run() + if err != nil && errors.Cause(err) != exec.ErrNotFound { + logrus.Debugf("Error unmounting %s with %s - %v", mountpoint, v, err) + } + if err == nil { + unmounted = true + break + } + } + // If fusermount|fusermount3 failed to unmount the FUSE file system, make sure all + // pending changes are propagated to the file system + if !unmounted { + fd, err := unix.Open(mountpoint, unix.O_DIRECTORY, 0) + if err == nil { + if err := unix.Syncfs(fd); err != nil { + logrus.Debugf("Error Syncfs(%s) - %v", mountpoint, err) + } + unix.Close(fd) + } + } + } + + if !unmounted { + if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil && !os.IsNotExist(err) { + logrus.Debugf("Failed to unmount %s overlay: %s - %v", id, mountpoint, err) + } + } + + if err := unix.Rmdir(mountpoint); err != nil && !os.IsNotExist(err) { + logrus.Debugf("Failed to remove mountpoint %s overlay: %s - %v", id, mountpoint, err) + } + + return nil +} + +// Exists checks to see if the id is already mounted. +func (d *Driver) Exists(id string) bool { + _, err := os.Stat(d.dir(id)) + return err == nil +} + +// isParent returns if the passed in parent is the direct parent of the passed in layer +func (d *Driver) isParent(id, parent string) bool { + lowers, err := d.getLowerDirs(id) + if err != nil { + return false + } + if parent == "" && len(lowers) > 0 { + return false + } + + parentDir := d.dir(parent) + var ld string + if len(lowers) > 0 { + ld = filepath.Dir(lowers[0]) + } + if ld == "" && parent == "" { + return true + } + return ld == parentDir +} + +func (d *Driver) getWhiteoutFormat() archive.WhiteoutFormat { + whiteoutFormat := archive.OverlayWhiteoutFormat + if d.options.mountProgram != "" { + // If we are using a mount program, we are most likely running + // as an unprivileged user that cannot use mknod, so fallback to the + // AUFS whiteout format. + whiteoutFormat = archive.AUFSWhiteoutFormat + } + return whiteoutFormat +} + +type fileGetNilCloser struct { + storage.FileGetter +} + +func (f fileGetNilCloser) Close() error { + return nil +} + +func (d *Driver) getStagingDir() string { + return filepath.Join(d.home, "staging") +} + +// DiffGetter returns a FileGetCloser that can read files from the directory that +// contains files for the layer differences. Used for direct access for tar-split. +func (d *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) { + p, err := d.getDiffPath(id) + if err != nil { + return nil, err + } + return fileGetNilCloser{storage.NewPathFileGetter(p)}, nil +} + +// CleanupStagingDirectory cleanups the staging directory. +func (d *Driver) CleanupStagingDirectory(stagingDirectory string) error { + return os.RemoveAll(stagingDirectory) +} + +// ApplyDiff applies the changes in the new layer using the specified function +func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.ApplyDiffOpts, differ graphdriver.Differ) (output graphdriver.DriverWithDifferOutput, err error) { + var idMappings *idtools.IDMappings + if options != nil { + idMappings = options.Mappings + } + if idMappings == nil { + idMappings = &idtools.IDMappings{} + } + + applyDir := "" + + if id == "" { + err := os.MkdirAll(d.getStagingDir(), 0700) + if err != nil && !os.IsExist(err) { + return graphdriver.DriverWithDifferOutput{}, err + } + applyDir, err = ioutil.TempDir(d.getStagingDir(), "") + if err != nil { + return graphdriver.DriverWithDifferOutput{}, err + } + + } else { + var err error + applyDir, err = d.getDiffPath(id) + if err != nil { + return graphdriver.DriverWithDifferOutput{}, err + } + } + + logrus.Debugf("Applying differ in %s", applyDir) + + out, err := differ.ApplyDiff(applyDir, &archive.TarOptions{ + UIDMaps: idMappings.UIDs(), + GIDMaps: idMappings.GIDs(), + IgnoreChownErrors: d.options.ignoreChownErrors, + WhiteoutFormat: d.getWhiteoutFormat(), + InUserNS: userns.RunningInUserNS(), + }) + out.Target = applyDir + return out, err +} + +// ApplyDiffFromStagingDirectory applies the changes using the specified staging directory. +func (d *Driver) ApplyDiffFromStagingDirectory(id, parent, stagingDirectory string, diffOutput *graphdriver.DriverWithDifferOutput, options *graphdriver.ApplyDiffOpts) error { + if filepath.Dir(stagingDirectory) != d.getStagingDir() { + return fmt.Errorf("%q is not a staging directory", stagingDirectory) + } + + diff, err := d.getDiffPath(id) + if err != nil { + return err + } + if err := os.RemoveAll(diff); err != nil && !os.IsNotExist(err) { + return err + } + return os.Rename(stagingDirectory, diff) +} + +// DifferTarget gets the location where files are stored for the layer. +func (d *Driver) DifferTarget(id string) (string, error) { + return d.getDiffPath(id) +} + +// ApplyDiff applies the new layer into a root +func (d *Driver) ApplyDiff(id, parent string, options graphdriver.ApplyDiffOpts) (size int64, err error) { + + if !d.isParent(id, parent) { + if d.options.ignoreChownErrors { + options.IgnoreChownErrors = d.options.ignoreChownErrors + } + if d.options.forceMask != nil { + options.ForceMask = d.options.forceMask + } + return d.naiveDiff.ApplyDiff(id, parent, options) + } + + idMappings := options.Mappings + if idMappings == nil { + idMappings = &idtools.IDMappings{} + } + + applyDir, err := d.getDiffPath(id) + if err != nil { + return 0, err + } + + logrus.Debugf("Applying tar in %s", applyDir) + // Overlay doesn't need the parent id to apply the diff + if err := untar(options.Diff, applyDir, &archive.TarOptions{ + UIDMaps: idMappings.UIDs(), + GIDMaps: idMappings.GIDs(), + IgnoreChownErrors: d.options.ignoreChownErrors, + ForceMask: d.options.forceMask, + WhiteoutFormat: d.getWhiteoutFormat(), + InUserNS: userns.RunningInUserNS(), + }); err != nil { + return 0, err + } + + return directory.Size(applyDir) +} + +func (d *Driver) getDiffPath(id string) (string, error) { + dir := d.dir(id) + return redirectDiffIfAdditionalLayer(path.Join(dir, "diff")) +} + +func (d *Driver) getLowerDiffPaths(id string) ([]string, error) { + layers, err := d.getLowerDirs(id) + if err != nil { + return nil, err + } + for i, l := range layers { + layers[i], err = redirectDiffIfAdditionalLayer(l) + if err != nil { + return nil, err + } + } + return layers, nil +} + +// DiffSize calculates the changes between the specified id +// and its parent and returns the size in bytes of the changes +// relative to its base filesystem directory. +func (d *Driver) DiffSize(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (size int64, err error) { + if d.options.mountProgram == "" && (d.useNaiveDiff() || !d.isParent(id, parent)) { + return d.naiveDiff.DiffSize(id, idMappings, parent, parentMappings, mountLabel) + } + + p, err := d.getDiffPath(id) + if err != nil { + return 0, err + } + return directory.Size(p) +} + +// Diff produces an archive of the changes between the specified +// layer and its parent layer which may be "". +func (d *Driver) Diff(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (io.ReadCloser, error) { + if d.useNaiveDiff() || !d.isParent(id, parent) { + return d.naiveDiff.Diff(id, idMappings, parent, parentMappings, mountLabel) + } + + if idMappings == nil { + idMappings = &idtools.IDMappings{} + } + + lowerDirs, err := d.getLowerDiffPaths(id) + if err != nil { + return nil, err + } + + diffPath, err := d.getDiffPath(id) + if err != nil { + return nil, err + } + logrus.Debugf("Tar with options on %s", diffPath) + return archive.TarWithOptions(diffPath, &archive.TarOptions{ + Compression: archive.Uncompressed, + UIDMaps: idMappings.UIDs(), + GIDMaps: idMappings.GIDs(), + WhiteoutFormat: d.getWhiteoutFormat(), + WhiteoutData: lowerDirs, + }) +} + +// Changes produces a list of changes between the specified layer +// and its parent layer. If parent is "", then all changes will be ADD changes. +func (d *Driver) Changes(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) ([]archive.Change, error) { + if d.useNaiveDiff() || !d.isParent(id, parent) { + return d.naiveDiff.Changes(id, idMappings, parent, parentMappings, mountLabel) + } + // Overlay doesn't have snapshots, so we need to get changes from all parent + // layers. + diffPath, err := d.getDiffPath(id) + if err != nil { + return nil, err + } + layers, err := d.getLowerDiffPaths(id) + if err != nil { + return nil, err + } + + return archive.OverlayChanges(layers, diffPath) +} + +// AdditionalImageStores returns additional image stores supported by the driver +func (d *Driver) AdditionalImageStores() []string { + return d.options.imageStores +} + +// UpdateLayerIDMap updates ID mappings in a from matching the ones specified +// by toContainer to those specified by toHost. +func (d *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) error { + var err error + dir := d.dir(id) + diffDir := filepath.Join(dir, "diff") + + rootUID, rootGID := 0, 0 + if toHost != nil { + rootUID, rootGID, err = idtools.GetRootUIDGID(toHost.UIDs(), toHost.GIDs()) + if err != nil { + return err + } + } + + // Mount the new layer and handle ownership changes and possible copy_ups in it. + options := graphdriver.MountOpts{ + MountLabel: mountLabel, + Options: strings.Split(d.options.mountOptions, ","), + } + layerFs, err := d.get(id, true, options) + if err != nil { + return err + } + err = graphdriver.ChownPathByMaps(layerFs, toContainer, toHost) + if err != nil { + if err2 := d.Put(id); err2 != nil { + logrus.Errorf("%v; unmounting %v: %v", err, id, err2) + } + return err + } + if err = d.Put(id); err != nil { + return err + } + + // Rotate the diff directories. + i := 0 + perms := defaultPerms + st, err := os.Stat(nameWithSuffix(diffDir, i)) + if d.options.forceMask != nil { + perms = *d.options.forceMask + } else { + if err == nil { + perms = os.FileMode(st.Mode()) + } + } + for err == nil { + i++ + _, err = os.Stat(nameWithSuffix(diffDir, i)) + } + + for i > 0 { + err = os.Rename(nameWithSuffix(diffDir, i-1), nameWithSuffix(diffDir, i)) + if err != nil { + return err + } + i-- + } + + // We need to re-create the work directory as it might keep a reference + // to the old upper layer in the index. + workDir := filepath.Join(dir, "work") + if err := os.RemoveAll(workDir); err == nil { + if err := idtools.MkdirAs(workDir, defaultPerms, rootUID, rootGID); err != nil { + return err + } + } + + // Re-create the directory that we're going to use as the upper layer. + if err := idtools.MkdirAs(diffDir, perms, rootUID, rootGID); err != nil { + return err + } + return nil +} + +// SupportsShifting tells whether the driver support shifting of the UIDs/GIDs in an userNS +func (d *Driver) SupportsShifting() bool { + if os.Getenv("_TEST_FORCE_SUPPORT_SHIFTING") == "yes-please" { + return true + } + return d.options.mountProgram != "" +} + +// dumbJoin is more or less a dumber version of filepath.Join, but one which +// won't Clean() the path, allowing us to append ".." as a component and trust +// pathname resolution to do some non-obvious work. +func dumbJoin(names ...string) string { + if len(names) == 0 { + return string(os.PathSeparator) + } + return strings.Join(names, string(os.PathSeparator)) +} + +func nameWithSuffix(name string, number int) string { + if number == 0 { + return name + } + return fmt.Sprintf("%s%d", name, number) +} + +func (d *Driver) getAdditionalLayerPath(dgst digest.Digest, ref string) (string, error) { + refElem := base64.StdEncoding.EncodeToString([]byte(ref)) + for _, ls := range d.options.layerStores { + ref := "" + if ls.withReference { + ref = refElem + } + target := path.Join(ls.path, ref, dgst.String()) + // Check if all necessary files exist + for _, p := range []string{ + filepath.Join(target, "diff"), + filepath.Join(target, "info"), + filepath.Join(target, "blob"), + } { + if _, err := os.Stat(p); err != nil { + return "", errors.Wrapf(graphdriver.ErrLayerUnknown, + "failed to stat additional layer %q: %v", p, err) + } + } + return target, nil + } + + return "", errors.Wrapf(graphdriver.ErrLayerUnknown, + "additional layer (%q, %q) not found", dgst, ref) +} + +func (d *Driver) releaseAdditionalLayerByID(id string) { + if al, err := d.getAdditionalLayerPathByID(id); err == nil { + notifyReleaseAdditionalLayer(al) + } else if !os.IsNotExist(err) { + logrus.Warnf("Unexpected error on reading Additional Layer Store pointer %v", err) + } +} + +// additionalLayer represents a layer in Additional Layer Store. +type additionalLayer struct { + path string + d *Driver + releaseOnce sync.Once +} + +// Info returns arbitrary information stored along with this layer (i.e. `info` file). +// This API is experimental and can be changed without bumping the major version number. +// TODO: to remove the comment once it's no longer experimental. +func (al *additionalLayer) Info() (io.ReadCloser, error) { + return os.Open(filepath.Join(al.path, "info")) +} + +// Blob returns a reader of the raw contents of this layer. +func (al *additionalLayer) Blob() (io.ReadCloser, error) { + return os.Open(filepath.Join(al.path, "blob")) +} + +// CreateAs creates a new layer from this additional layer. +// This API is experimental and can be changed without bumping the major version number. +// TODO: to remove the comment once it's no longer experimental. +func (al *additionalLayer) CreateAs(id, parent string) error { + // TODO: support opts + if err := al.d.Create(id, parent, nil); err != nil { + return err + } + dir := al.d.dir(id) + diffDir := path.Join(dir, "diff") + if err := os.RemoveAll(diffDir); err != nil { + return err + } + // tell the additional layer store that we use this layer. + // mark this layer as "additional layer" + if err := ioutil.WriteFile(path.Join(dir, "additionallayer"), []byte(al.path), 0644); err != nil { + return err + } + notifyUseAdditionalLayer(al.path) + return os.Symlink(filepath.Join(al.path, "diff"), diffDir) +} + +func (d *Driver) getAdditionalLayerPathByID(id string) (string, error) { + al, err := ioutil.ReadFile(path.Join(d.dir(id), "additionallayer")) + if err != nil { + return "", err + } + return string(al), nil +} + +// Release tells the additional layer store that we don't use this handler. +// This API is experimental and can be changed without bumping the major version number. +// TODO: to remove the comment once it's no longer experimental. +func (al *additionalLayer) Release() { + // Tell the additional layer store that we don't use this layer handler. + // This will decrease the reference counter on the store's side, which was + // increased in LookupAdditionalLayer (so this must be called only once). + al.releaseOnce.Do(func() { + notifyReleaseAdditionalLayer(al.path) + }) +} + +// notifyUseAdditionalLayer notifies Additional Layer Store that we use the specified layer. +// This is done by creating "use" file in the layer directory. This is useful for +// Additional Layer Store to consider when to perform GC. Notification-aware Additional +// Layer Store must return ENOENT. +func notifyUseAdditionalLayer(al string) { + if !path.IsAbs(al) { + logrus.Warnf("additionallayer must be absolute (got: %v)", al) + return + } + useFile := path.Join(al, "use") + f, err := os.Create(useFile) + if os.IsNotExist(err) { + return + } else if err == nil { + f.Close() + if err := os.Remove(useFile); err != nil { + logrus.Warnf("Failed to remove use file") + } + } + logrus.Warnf("Unexpected error by Additional Layer Store %v during use; GC doesn't seem to be supported", err) +} + +// notifyReleaseAdditionalLayer notifies Additional Layer Store that we don't use the specified +// layer anymore. This is done by rmdir-ing the layer directory. This is useful for +// Additional Layer Store to consider when to perform GC. Notification-aware Additional +// Layer Store must return ENOENT. +func notifyReleaseAdditionalLayer(al string) { + if !path.IsAbs(al) { + logrus.Warnf("additionallayer must be absolute (got: %v)", al) + return + } + // tell the additional layer store that we don't use this layer anymore. + err := unix.Rmdir(al) + if os.IsNotExist(err) { + return + } + logrus.Warnf("Unexpected error by Additional Layer Store %v during release; GC doesn't seem to be supported", err) +} + +// redirectDiffIfAdditionalLayer checks if the passed diff path is Additional Layer and +// returns the redirected path. If the passed diff is not the one in Additional Layer +// Store, it returns the original path without changes. +func redirectDiffIfAdditionalLayer(diffPath string) (string, error) { + if ld, err := os.Readlink(diffPath); err == nil { + // diff is the link to Additional Layer Store + if !path.IsAbs(ld) { + return "", fmt.Errorf("linkpath must be absolute (got: %q)", ld) + } + diffPath = ld + } else if err.(*os.PathError).Err != syscall.EINVAL { + return "", err + } + return diffPath, nil +} diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay_cgo.go b/vendor/github.com/containers/storage/drivers/overlay/overlay_cgo.go new file mode 100644 index 00000000000..0b70a5d92bc --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/overlay/overlay_cgo.go @@ -0,0 +1,21 @@ +// +build linux,cgo + +package overlay + +import ( + "path" + + "github.com/containers/storage/pkg/directory" +) + +// ReadWriteDiskUsage returns the disk usage of the writable directory for the ID. +// For Overlay, it attempts to check the XFS quota for size, and falls back to +// finding the size of the "diff" directory. +func (d *Driver) ReadWriteDiskUsage(id string) (*directory.DiskUsage, error) { + usage := &directory.DiskUsage{} + if d.quotaCtl != nil { + err := d.quotaCtl.GetDiskUsage(d.dir(id), usage) + return usage, err + } + return directory.Usage(path.Join(d.dir(id), "diff")) +} diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay_nocgo.go b/vendor/github.com/containers/storage/drivers/overlay/overlay_nocgo.go new file mode 100644 index 00000000000..1cdac777751 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/overlay/overlay_nocgo.go @@ -0,0 +1,16 @@ +// +build linux,!cgo + +package overlay + +import ( + "path" + + "github.com/containers/storage/pkg/directory" +) + +// ReadWriteDiskUsage returns the disk usage of the writable directory for the ID. +// For Overlay, it attempts to check the XFS quota for size, and falls back to +// finding the size of the "diff" directory. +func (d *Driver) ReadWriteDiskUsage(id string) (*directory.DiskUsage, error) { + return directory.Usage(path.Join(d.dir(id), "diff")) +} diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay_unsupported.go b/vendor/github.com/containers/storage/drivers/overlay/overlay_unsupported.go new file mode 100644 index 00000000000..49af84a229f --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/overlay/overlay_unsupported.go @@ -0,0 +1,7 @@ +// +build !linux + +package overlay + +func SupportsNativeOverlay(graphroot, rundir string) (bool, error) { + return false, nil +} diff --git a/vendor/github.com/containers/storage/drivers/overlay/randomid.go b/vendor/github.com/containers/storage/drivers/overlay/randomid.go new file mode 100644 index 00000000000..736c48b9c1a --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/overlay/randomid.go @@ -0,0 +1,81 @@ +// +build linux + +package overlay + +import ( + "crypto/rand" + "encoding/base32" + "fmt" + "io" + "os" + "syscall" + "time" + + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +// generateID creates a new random string identifier with the given length +func generateID(l int) string { + const ( + // ensures we backoff for less than 450ms total. Use the following to + // select new value, in units of 10ms: + // n*(n+1)/2 = d -> n^2 + n - 2d -> n = (sqrt(8d + 1) - 1)/2 + maxretries = 9 + backoff = time.Millisecond * 10 + ) + + var ( + totalBackoff time.Duration + count int + retries int + size = (l*5 + 7) / 8 + u = make([]byte, size) + ) + // TODO: Include time component, counter component, random component + + for { + // This should never block but the read may fail. Because of this, + // we just try to read the random number generator until we get + // something. This is a very rare condition but may happen. + b := time.Duration(retries) * backoff + time.Sleep(b) + totalBackoff += b + + n, err := io.ReadFull(rand.Reader, u[count:]) + if err != nil { + if retryOnError(err) && retries < maxretries { + count += n + retries++ + logrus.Errorf("Generating version 4 uuid, retrying: %v", err) + continue + } + + // Any other errors represent a system problem. What did someone + // do to /dev/urandom? + panic(fmt.Errorf("error reading random number generator, retried for %v: %v", totalBackoff.String(), err)) + } + + break + } + + s := base32.StdEncoding.EncodeToString(u) + + return s[:l] +} + +// retryOnError tries to detect whether or not retrying would be fruitful. +func retryOnError(err error) bool { + switch err := err.(type) { + case *os.PathError: + return retryOnError(err.Err) // unpack the target error + case syscall.Errno: + if err == unix.EPERM { + // EPERM represents an entropy pool exhaustion, a condition under + // which we backoff and retry. + return true + } + } + + return false +} diff --git a/vendor/github.com/containers/storage/drivers/overlayutils/overlayutils.go b/vendor/github.com/containers/storage/drivers/overlayutils/overlayutils.go new file mode 100644 index 00000000000..9fc57b36bf9 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/overlayutils/overlayutils.go @@ -0,0 +1,20 @@ +// +build linux + +package overlayutils + +import ( + "fmt" + + graphdriver "github.com/containers/storage/drivers" + "github.com/pkg/errors" +) + +// ErrDTypeNotSupported denotes that the backing filesystem doesn't support d_type. +func ErrDTypeNotSupported(driver, backingFs string) error { + msg := fmt.Sprintf("%s: the backing %s filesystem is formatted without d_type support, which leads to incorrect behavior.", driver, backingFs) + if backingFs == "xfs" { + msg += " Reformat the filesystem with ftype=1 to enable d_type support." + } + msg += " Running without d_type is not supported." + return errors.Wrap(graphdriver.ErrNotSupported, msg) +} diff --git a/vendor/github.com/containers/storage/drivers/quota/projectquota.go b/vendor/github.com/containers/storage/drivers/quota/projectquota.go new file mode 100644 index 00000000000..0609f970c28 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/quota/projectquota.go @@ -0,0 +1,404 @@ +// +build linux,!exclude_disk_quota,cgo + +// +// projectquota.go - implements XFS project quota controls +// for setting quota limits on a newly created directory. +// It currently supports the legacy XFS specific ioctls. +// +// TODO: use generic quota control ioctl FS_IOC_FS{GET,SET}XATTR +// for both xfs/ext4 for kernel version >= v4.5 +// + +package quota + +/* +#include +#include +#include +#include +#include + +#ifndef FS_XFLAG_PROJINHERIT +struct fsxattr { + __u32 fsx_xflags; + __u32 fsx_extsize; + __u32 fsx_nextents; + __u32 fsx_projid; + unsigned char fsx_pad[12]; +}; +#define FS_XFLAG_PROJINHERIT 0x00000200 +#endif +#ifndef FS_IOC_FSGETXATTR +#define FS_IOC_FSGETXATTR _IOR ('X', 31, struct fsxattr) +#endif +#ifndef FS_IOC_FSSETXATTR +#define FS_IOC_FSSETXATTR _IOW ('X', 32, struct fsxattr) +#endif + +#ifndef PRJQUOTA +#define PRJQUOTA 2 +#endif +#ifndef FS_PROJ_QUOTA +#define FS_PROJ_QUOTA 2 +#endif +#ifndef Q_XSETPQLIM +#define Q_XSETPQLIM QCMD(Q_XSETQLIM, PRJQUOTA) +#endif +#ifndef Q_XGETPQUOTA +#define Q_XGETPQUOTA QCMD(Q_XGETQUOTA, PRJQUOTA) +#endif +*/ +import "C" +import ( + "fmt" + "io/ioutil" + "math" + "os" + "path" + "path/filepath" + "syscall" + "unsafe" + + "github.com/containers/storage/pkg/directory" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +const projectIDsAllocatedPerQuotaHome = 10000 + +// Quota limit params - currently we only control blocks hard limit and inodes +type Quota struct { + Size uint64 + Inodes uint64 +} + +// Control - Context to be used by storage driver (e.g. overlay) +// who wants to apply project quotas to container dirs +type Control struct { + backingFsBlockDev string + nextProjectID uint32 + quotas map[string]uint32 +} + +// Attempt to generate a unigue projectid. Multiple directories +// per file system can have quota and they need a group of unique +// ids. This function attempts to allocate at least projectIDsAllocatedPerQuotaHome(10000) +// unique projectids, based on the inode of the basepath. +func generateUniqueProjectID(path string) (uint32, error) { + fileinfo, err := os.Stat(path) + if err != nil { + return 0, err + } + stat, ok := fileinfo.Sys().(*syscall.Stat_t) + if !ok { + return 0, fmt.Errorf("Not a syscall.Stat_t %s", path) + + } + projectID := projectIDsAllocatedPerQuotaHome + (stat.Ino*projectIDsAllocatedPerQuotaHome)%(math.MaxUint32-projectIDsAllocatedPerQuotaHome) + return uint32(projectID), nil +} + +// NewControl - initialize project quota support. +// Test to make sure that quota can be set on a test dir and find +// the first project id to be used for the next container create. +// +// Returns nil (and error) if project quota is not supported. +// +// First get the project id of the basePath directory. +// This test will fail if the backing fs is not xfs. +// +// xfs_quota tool can be used to assign a project id to the driver home directory, e.g.: +// echo 100000:/var/lib/containers/storage/overlay >> /etc/projects +// echo 200000:/var/lib/containers/storage/volumes >> /etc/projects +// echo storage:100000 >> /etc/projid +// echo volumes:200000 >> /etc/projid +// xfs_quota -x -c 'project -s storage volumes' / +// +// In the example above, the storage directory project id will be used as a +// "start offset" and all containers will be assigned larger project ids +// (e.g. >= 100000). Then the volumes directory project id will be used as a +// "start offset" and all volumes will be assigned larger project ids +// (e.g. >= 200000). +// This is a way to prevent xfs_quota management from conflicting with +// containers/storage. + +// +// Then try to create a test directory with the next project id and set a quota +// on it. If that works, continue to scan existing containers to map allocated +// project ids. +// +func NewControl(basePath string) (*Control, error) { + // + // Get project id of parent dir as minimal id to be used by driver + // + minProjectID, err := getProjectID(basePath) + if err != nil { + return nil, err + } + if minProjectID == 0 { + // Indicates the storage was never initialized + // Generate a unique range of Projectids for this basepath + minProjectID, err = generateUniqueProjectID(basePath) + if err != nil { + return nil, err + } + + } + // + // create backing filesystem device node + // + backingFsBlockDev, err := makeBackingFsDev(basePath) + if err != nil { + return nil, err + } + + // + // Test if filesystem supports project quotas by trying to set + // a quota on the first available project id + // + quota := Quota{ + Size: 0, + Inodes: 0, + } + if err := setProjectQuota(backingFsBlockDev, minProjectID, quota); err != nil { + return nil, err + } + + q := Control{ + backingFsBlockDev: backingFsBlockDev, + nextProjectID: minProjectID + 1, + quotas: make(map[string]uint32), + } + + // + // get first project id to be used for next container + // + err = q.findNextProjectID(basePath) + if err != nil { + return nil, err + } + + logrus.Debugf("NewControl(%s): nextProjectID = %d", basePath, q.nextProjectID) + return &q, nil +} + +// SetQuota - assign a unique project id to directory and set the quota limits +// for that project id +func (q *Control) SetQuota(targetPath string, quota Quota) error { + + projectID, ok := q.quotas[targetPath] + if !ok { + projectID = q.nextProjectID + + // + // assign project id to new container directory + // + err := setProjectID(targetPath, projectID) + if err != nil { + return err + } + + q.quotas[targetPath] = projectID + q.nextProjectID++ + } + + // + // set the quota limit for the container's project id + // + logrus.Debugf("SetQuota path=%s, size=%d, inodes=%d, projectID=%d", targetPath, quota.Size, quota.Inodes, projectID) + return setProjectQuota(q.backingFsBlockDev, projectID, quota) +} + +// setProjectQuota - set the quota for project id on xfs block device +func setProjectQuota(backingFsBlockDev string, projectID uint32, quota Quota) error { + var d C.fs_disk_quota_t + d.d_version = C.FS_DQUOT_VERSION + d.d_id = C.__u32(projectID) + d.d_flags = C.FS_PROJ_QUOTA + + if quota.Size > 0 { + d.d_fieldmask = d.d_fieldmask | C.FS_DQ_BHARD | C.FS_DQ_BSOFT + d.d_blk_hardlimit = C.__u64(quota.Size / 512) + d.d_blk_softlimit = d.d_blk_hardlimit + } + if quota.Inodes > 0 { + d.d_fieldmask = d.d_fieldmask | C.FS_DQ_IHARD | C.FS_DQ_ISOFT + d.d_ino_hardlimit = C.__u64(quota.Inodes) + d.d_ino_softlimit = d.d_ino_hardlimit + } + + var cs = C.CString(backingFsBlockDev) + defer C.free(unsafe.Pointer(cs)) + + _, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_XSETPQLIM, + uintptr(unsafe.Pointer(cs)), uintptr(d.d_id), + uintptr(unsafe.Pointer(&d)), 0, 0) + if errno != 0 { + return fmt.Errorf("Failed to set quota limit for projid %d on %s: %v", + projectID, backingFsBlockDev, errno.Error()) + } + + return nil +} + +// GetQuota - get the quota limits of a directory that was configured with SetQuota +func (q *Control) GetQuota(targetPath string, quota *Quota) error { + d, err := q.fsDiskQuotaFromPath(targetPath) + if err != nil { + return err + } + quota.Size = uint64(d.d_blk_hardlimit) * 512 + quota.Inodes = uint64(d.d_ino_hardlimit) + return nil +} + +// GetDiskUsage - get the current disk usage of a directory that was configured with SetQuota +func (q *Control) GetDiskUsage(targetPath string, usage *directory.DiskUsage) error { + d, err := q.fsDiskQuotaFromPath(targetPath) + if err != nil { + return err + } + usage.Size = int64(d.d_bcount) * 512 + usage.InodeCount = int64(d.d_icount) + + return nil +} + +func (q *Control) fsDiskQuotaFromPath(targetPath string) (C.fs_disk_quota_t, error) { + var d C.fs_disk_quota_t + + projectID, ok := q.quotas[targetPath] + if !ok { + return d, fmt.Errorf("quota not found for path : %s", targetPath) + } + + // + // get the quota limit for the container's project id + // + var cs = C.CString(q.backingFsBlockDev) + defer C.free(unsafe.Pointer(cs)) + + _, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_XGETPQUOTA, + uintptr(unsafe.Pointer(cs)), uintptr(C.__u32(projectID)), + uintptr(unsafe.Pointer(&d)), 0, 0) + if errno != 0 { + return d, fmt.Errorf("Failed to get quota limit for projid %d on %s: %v", + projectID, q.backingFsBlockDev, errno.Error()) + } + + return d, nil +} + +// getProjectID - get the project id of path on xfs +func getProjectID(targetPath string) (uint32, error) { + dir, err := openDir(targetPath) + if err != nil { + return 0, err + } + defer closeDir(dir) + + var fsx C.struct_fsxattr + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR, + uintptr(unsafe.Pointer(&fsx))) + if errno != 0 { + return 0, fmt.Errorf("Failed to get projid for %s: %v", targetPath, errno.Error()) + } + + return uint32(fsx.fsx_projid), nil +} + +// setProjectID - set the project id of path on xfs +func setProjectID(targetPath string, projectID uint32) error { + dir, err := openDir(targetPath) + if err != nil { + return err + } + defer closeDir(dir) + + var fsx C.struct_fsxattr + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR, + uintptr(unsafe.Pointer(&fsx))) + if errno != 0 { + return fmt.Errorf("Failed to get projid for %s: %v", targetPath, errno.Error()) + } + fsx.fsx_projid = C.__u32(projectID) + fsx.fsx_xflags |= C.FS_XFLAG_PROJINHERIT + _, _, errno = unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSSETXATTR, + uintptr(unsafe.Pointer(&fsx))) + if errno != 0 { + return fmt.Errorf("Failed to set projid for %s: %v", targetPath, errno.Error()) + } + + return nil +} + +// findNextProjectID - find the next project id to be used for containers +// by scanning driver home directory to find used project ids +func (q *Control) findNextProjectID(home string) error { + files, err := ioutil.ReadDir(home) + if err != nil { + return fmt.Errorf("read directory failed : %s", home) + } + for _, file := range files { + if !file.IsDir() { + continue + } + path := filepath.Join(home, file.Name()) + projid, err := getProjectID(path) + if err != nil { + return err + } + if projid > 0 { + q.quotas[path] = projid + } + if q.nextProjectID <= projid { + q.nextProjectID = projid + 1 + } + } + + return nil +} + +func free(p *C.char) { + C.free(unsafe.Pointer(p)) +} + +func openDir(path string) (*C.DIR, error) { + Cpath := C.CString(path) + defer free(Cpath) + + dir := C.opendir(Cpath) + if dir == nil { + return nil, fmt.Errorf("Can't open dir") + } + return dir, nil +} + +func closeDir(dir *C.DIR) { + if dir != nil { + C.closedir(dir) + } +} + +func getDirFd(dir *C.DIR) uintptr { + return uintptr(C.dirfd(dir)) +} + +// Get the backing block device of the driver home directory +// and create a block device node under the home directory +// to be used by quotactl commands +func makeBackingFsDev(home string) (string, error) { + var stat unix.Stat_t + if err := unix.Stat(home, &stat); err != nil { + return "", err + } + + backingFsBlockDev := path.Join(home, "backingFsBlockDev") + // Re-create just in case someone copied the home directory over to a new device + unix.Unlink(backingFsBlockDev) + if err := unix.Mknod(backingFsBlockDev, unix.S_IFBLK|0600, int(stat.Dev)); err != nil { + return "", fmt.Errorf("Failed to mknod %s: %v", backingFsBlockDev, err) + } + + return backingFsBlockDev, nil +} diff --git a/vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go b/vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go new file mode 100644 index 00000000000..7469138db7a --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go @@ -0,0 +1,33 @@ +// +build !linux exclude_disk_quota !cgo + +package quota + +import ( + "github.com/pkg/errors" +) + +// Quota limit params - currently we only control blocks hard limit +type Quota struct { + Size uint64 + Inodes uint64 +} + +// Control - Context to be used by storage driver (e.g. overlay) +// who wants to apply project quotas to container dirs +type Control struct { +} + +func NewControl(basePath string) (*Control, error) { + return nil, errors.New("filesystem does not support, or has not enabled quotas") +} + +// SetQuota - assign a unique project id to directory and set the quota limits +// for that project id +func (q *Control) SetQuota(targetPath string, quota Quota) error { + return errors.New("filesystem does not support, or has not enabled quotas") +} + +// GetQuota - get the quota limits of a directory that was configured with SetQuota +func (q *Control) GetQuota(targetPath string, quota *Quota) error { + return errors.New("filesystem does not support, or has not enabled quotas") +} diff --git a/vendor/github.com/containers/storage/drivers/register/register_aufs.go b/vendor/github.com/containers/storage/drivers/register/register_aufs.go new file mode 100644 index 00000000000..7743dcedbd0 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/register/register_aufs.go @@ -0,0 +1,8 @@ +// +build !exclude_graphdriver_aufs,linux + +package register + +import ( + // register the aufs graphdriver + _ "github.com/containers/storage/drivers/aufs" +) diff --git a/vendor/github.com/containers/storage/drivers/register/register_btrfs.go b/vendor/github.com/containers/storage/drivers/register/register_btrfs.go new file mode 100644 index 00000000000..40ff1cdd0df --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/register/register_btrfs.go @@ -0,0 +1,8 @@ +// +build !exclude_graphdriver_btrfs,linux + +package register + +import ( + // register the btrfs graphdriver + _ "github.com/containers/storage/drivers/btrfs" +) diff --git a/vendor/github.com/containers/storage/drivers/register/register_devicemapper.go b/vendor/github.com/containers/storage/drivers/register/register_devicemapper.go new file mode 100644 index 00000000000..cefe2e8c754 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/register/register_devicemapper.go @@ -0,0 +1,8 @@ +// +build !exclude_graphdriver_devicemapper,linux,cgo + +package register + +import ( + // register the devmapper graphdriver + _ "github.com/containers/storage/drivers/devmapper" +) diff --git a/vendor/github.com/containers/storage/drivers/register/register_overlay.go b/vendor/github.com/containers/storage/drivers/register/register_overlay.go new file mode 100644 index 00000000000..30e3b4d7475 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/register/register_overlay.go @@ -0,0 +1,8 @@ +// +build !exclude_graphdriver_overlay,linux,cgo + +package register + +import ( + // register the overlay graphdriver + _ "github.com/containers/storage/drivers/overlay" +) diff --git a/vendor/github.com/containers/storage/drivers/register/register_vfs.go b/vendor/github.com/containers/storage/drivers/register/register_vfs.go new file mode 100644 index 00000000000..691ce859291 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/register/register_vfs.go @@ -0,0 +1,6 @@ +package register + +import ( + // register vfs + _ "github.com/containers/storage/drivers/vfs" +) diff --git a/vendor/github.com/containers/storage/drivers/register/register_windows.go b/vendor/github.com/containers/storage/drivers/register/register_windows.go new file mode 100644 index 00000000000..048b27097d1 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/register/register_windows.go @@ -0,0 +1,6 @@ +package register + +import ( + // register the windows graph driver + _ "github.com/containers/storage/drivers/windows" +) diff --git a/vendor/github.com/containers/storage/drivers/register/register_zfs.go b/vendor/github.com/containers/storage/drivers/register/register_zfs.go new file mode 100644 index 00000000000..c748468e5cb --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/register/register_zfs.go @@ -0,0 +1,8 @@ +// +build !exclude_graphdriver_zfs,linux !exclude_graphdriver_zfs,freebsd, solaris + +package register + +import ( + // register the zfs driver + _ "github.com/containers/storage/drivers/zfs" +) diff --git a/vendor/github.com/containers/storage/drivers/template.go b/vendor/github.com/containers/storage/drivers/template.go new file mode 100644 index 00000000000..d40d71cfc1e --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/template.go @@ -0,0 +1,52 @@ +package graphdriver + +import ( + "github.com/sirupsen/logrus" + + "github.com/containers/storage/pkg/idtools" +) + +// TemplateDriver is just barely enough of a driver that we can implement a +// naive version of CreateFromTemplate on top of it. +type TemplateDriver interface { + DiffDriver + CreateReadWrite(id, parent string, opts *CreateOpts) error + Create(id, parent string, opts *CreateOpts) error + Remove(id string) error +} + +// CreateFromTemplate creates a layer with the same contents and parent as +// another layer. Internally, it may even depend on that other layer +// continuing to exist, as if it were actually a child of the child layer. +func NaiveCreateFromTemplate(d TemplateDriver, id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *CreateOpts, readWrite bool) error { + var err error + if readWrite { + err = d.CreateReadWrite(id, parent, opts) + } else { + err = d.Create(id, parent, opts) + } + if err != nil { + return err + } + diff, err := d.Diff(template, templateIDMappings, parent, parentIDMappings, opts.MountLabel) + if err != nil { + if err2 := d.Remove(id); err2 != nil { + logrus.Errorf("Removing layer %q: %v", id, err2) + } + return err + } + + applyOptions := ApplyDiffOpts{ + Diff: diff, + Mappings: templateIDMappings, + MountLabel: opts.MountLabel, + IgnoreChownErrors: opts.ignoreChownErrors, + } + if _, err = d.ApplyDiff(id, parent, applyOptions); err != nil { + if err2 := d.Remove(id); err2 != nil { + logrus.Errorf("Removing layer %q: %v", id, err2) + } + return err + } + return nil +} diff --git a/vendor/github.com/containers/storage/drivers/vfs/copy_linux.go b/vendor/github.com/containers/storage/drivers/vfs/copy_linux.go new file mode 100644 index 00000000000..bf22a5f6fd5 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/vfs/copy_linux.go @@ -0,0 +1,7 @@ +package vfs + +import "github.com/containers/storage/drivers/copy" + +func dirCopy(srcDir, dstDir string) error { + return copy.DirCopy(srcDir, dstDir, copy.Content, true) +} diff --git a/vendor/github.com/containers/storage/drivers/vfs/copy_unsupported.go b/vendor/github.com/containers/storage/drivers/vfs/copy_unsupported.go new file mode 100644 index 00000000000..8ac80ee1dba --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/vfs/copy_unsupported.go @@ -0,0 +1,9 @@ +// +build !linux + +package vfs // import "github.com/containers/storage/drivers/vfs" + +import "github.com/containers/storage/pkg/chrootarchive" + +func dirCopy(srcDir, dstDir string) error { + return chrootarchive.NewArchiver(nil).CopyWithTar(srcDir, dstDir) +} diff --git a/vendor/github.com/containers/storage/drivers/vfs/driver.go b/vendor/github.com/containers/storage/drivers/vfs/driver.go new file mode 100644 index 00000000000..1b58e2f63e1 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/vfs/driver.go @@ -0,0 +1,299 @@ +package vfs + +import ( + "fmt" + "io" + "os" + "path/filepath" + "strconv" + "strings" + + graphdriver "github.com/containers/storage/drivers" + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/directory" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/parsers" + "github.com/containers/storage/pkg/system" + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/sirupsen/logrus" + "github.com/vbatts/tar-split/tar/storage" +) + +var ( + // CopyDir defines the copy method to use. + CopyDir = dirCopy +) + +const defaultPerms = os.FileMode(0555) + +func init() { + graphdriver.Register("vfs", Init) +} + +// Init returns a new VFS driver. +// This sets the home directory for the driver and returns NaiveDiffDriver. +func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) { + d := &Driver{ + name: "vfs", + homes: []string{home}, + idMappings: idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps), + } + + rootIDs := d.idMappings.RootPair() + if err := idtools.MkdirAllAndChown(home, 0700, rootIDs); err != nil { + return nil, err + } + for _, option := range options.DriverOptions { + + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return nil, err + } + key = strings.ToLower(key) + switch key { + case "vfs.imagestore", ".imagestore": + d.homes = append(d.homes, strings.Split(val, ",")...) + continue + case "vfs.mountopt": + return nil, fmt.Errorf("vfs driver does not support mount options") + case ".ignore_chown_errors", "vfs.ignore_chown_errors": + logrus.Debugf("vfs: ignore_chown_errors=%s", val) + var err error + d.ignoreChownErrors, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("vfs driver does not support %s options", key) + } + } + d.updater = graphdriver.NewNaiveLayerIDMapUpdater(d) + d.naiveDiff = graphdriver.NewNaiveDiffDriver(d, d.updater) + + return d, nil +} + +// Driver holds information about the driver, home directory of the driver. +// Driver implements graphdriver.ProtoDriver. It uses only basic vfs operations. +// In order to support layering, files are copied from the parent layer into the new layer. There is no copy-on-write support. +// Driver must be wrapped in NaiveDiffDriver to be used as a graphdriver.Driver +type Driver struct { + name string + homes []string + idMappings *idtools.IDMappings + ignoreChownErrors bool + naiveDiff graphdriver.DiffDriver + updater graphdriver.LayerIDMapUpdater +} + +func (d *Driver) String() string { + return "vfs" +} + +// Status is used for implementing the graphdriver.ProtoDriver interface. VFS does not currently have any status information. +func (d *Driver) Status() [][2]string { + return nil +} + +// Metadata is used for implementing the graphdriver.ProtoDriver interface. VFS does not currently have any meta data. +func (d *Driver) Metadata(id string) (map[string]string, error) { + return nil, nil +} + +// Cleanup is used to implement graphdriver.ProtoDriver. There is no cleanup required for this driver. +func (d *Driver) Cleanup() error { + return nil +} + +type fileGetNilCloser struct { + storage.FileGetter +} + +func (f fileGetNilCloser) Close() error { + return nil +} + +// DiffGetter returns a FileGetCloser that can read files from the directory that +// contains files for the layer differences. Used for direct access for tar-split. +func (d *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) { + p := d.dir(id) + return fileGetNilCloser{storage.NewPathFileGetter(p)}, nil +} + +// CreateFromTemplate creates a layer with the same contents and parent as another layer. +func (d *Driver) CreateFromTemplate(id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *graphdriver.CreateOpts, readWrite bool) error { + if readWrite { + return d.CreateReadWrite(id, template, opts) + } + return d.Create(id, template, opts) +} + +// ApplyDiff applies the new layer into a root +func (d *Driver) ApplyDiff(id, parent string, options graphdriver.ApplyDiffOpts) (size int64, err error) { + if d.ignoreChownErrors { + options.IgnoreChownErrors = d.ignoreChownErrors + } + return d.naiveDiff.ApplyDiff(id, parent, options) +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + return d.create(id, parent, opts, false) +} + +// Create prepares the filesystem for the VFS driver and copies the directory for the given id under the parent. +func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { + return d.create(id, parent, opts, true) +} + +func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, ro bool) (retErr error) { + if opts != nil && len(opts.StorageOpt) != 0 { + return fmt.Errorf("--storage-opt is not supported for vfs") + } + + idMappings := d.idMappings + if opts != nil && opts.IDMappings != nil { + idMappings = opts.IDMappings + } + + dir := d.dir(id) + rootIDs := idMappings.RootPair() + if err := idtools.MkdirAllAndChown(filepath.Dir(dir), 0700, rootIDs); err != nil { + return err + } + + defer func() { + if retErr != nil { + os.RemoveAll(dir) + } + }() + + rootPerms := defaultPerms + if parent != "" { + st, err := system.Stat(d.dir(parent)) + if err != nil { + return err + } + rootPerms = os.FileMode(st.Mode()) + rootIDs.UID = int(st.UID()) + rootIDs.GID = int(st.GID()) + } + if err := idtools.MkdirAndChown(dir, rootPerms, rootIDs); err != nil { + return err + } + labelOpts := []string{"level:s0"} + if _, mountLabel, err := label.InitLabels(labelOpts); err == nil { + label.SetFileLabel(dir, mountLabel) + } + if parent != "" { + parentDir, err := d.Get(parent, graphdriver.MountOpts{}) + if err != nil { + return fmt.Errorf("%s: %s", parent, err) + } + if err := dirCopy(parentDir, dir); err != nil { + return err + } + } + + return nil + +} + +func (d *Driver) dir(id string) string { + for i, home := range d.homes { + if i > 0 { + home = filepath.Join(home, d.String()) + } + candidate := filepath.Join(home, "dir", filepath.Base(id)) + fi, err := os.Stat(candidate) + if err == nil && fi.IsDir() { + return candidate + } + } + return filepath.Join(d.homes[0], "dir", filepath.Base(id)) +} + +// Remove deletes the content from the directory for a given id. +func (d *Driver) Remove(id string) error { + return system.EnsureRemoveAll(d.dir(id)) +} + +// Get returns the directory for the given id. +func (d *Driver) Get(id string, options graphdriver.MountOpts) (_ string, retErr error) { + dir := d.dir(id) + switch len(options.Options) { + case 0: + case 1: + if options.Options[0] == "ro" { + // ignore "ro" option + break + } + fallthrough + default: + return "", fmt.Errorf("vfs driver does not support mount options") + } + if st, err := os.Stat(dir); err != nil { + return "", err + } else if !st.IsDir() { + return "", fmt.Errorf("%s: not a directory", dir) + } + return dir, nil +} + +// Put is a noop for vfs that return nil for the error, since this driver has no runtime resources to clean up. +func (d *Driver) Put(id string) error { + // The vfs driver has no runtime resources (e.g. mounts) + // to clean up, so we don't need anything here + return nil +} + +// ReadWriteDiskUsage returns the disk usage of the writable directory for the ID. +// For VFS, it queries the directory for this ID. +func (d *Driver) ReadWriteDiskUsage(id string) (*directory.DiskUsage, error) { + return directory.Usage(d.dir(id)) +} + +// Exists checks to see if the directory exists for the given id. +func (d *Driver) Exists(id string) bool { + _, err := os.Stat(d.dir(id)) + return err == nil +} + +// AdditionalImageStores returns additional image stores supported by the driver +func (d *Driver) AdditionalImageStores() []string { + if len(d.homes) > 1 { + return d.homes[1:] + } + return nil +} + +// SupportsShifting tells whether the driver support shifting of the UIDs/GIDs in an userNS +func (d *Driver) SupportsShifting() bool { + return d.updater.SupportsShifting() +} + +// UpdateLayerIDMap updates ID mappings in a from matching the ones specified +// by toContainer to those specified by toHost. +func (d *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) error { + return d.updater.UpdateLayerIDMap(id, toContainer, toHost, mountLabel) +} + +// Changes produces a list of changes between the specified layer +// and its parent layer. If parent is "", then all changes will be ADD changes. +func (d *Driver) Changes(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) ([]archive.Change, error) { + return d.naiveDiff.Changes(id, idMappings, parent, parentMappings, mountLabel) +} + +// Diff produces an archive of the changes between the specified +// layer and its parent layer which may be "". +func (d *Driver) Diff(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (io.ReadCloser, error) { + return d.naiveDiff.Diff(id, idMappings, parent, parentMappings, mountLabel) +} + +// DiffSize calculates the changes between the specified id +// and its parent and returns the size in bytes of the changes +// relative to its base filesystem directory. +func (d *Driver) DiffSize(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (size int64, err error) { + return d.naiveDiff.DiffSize(id, idMappings, parent, parentMappings, mountLabel) +} diff --git a/vendor/github.com/containers/storage/drivers/windows/jsoniter_windows.go b/vendor/github.com/containers/storage/drivers/windows/jsoniter_windows.go new file mode 100644 index 00000000000..fa141263019 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/windows/jsoniter_windows.go @@ -0,0 +1,5 @@ +package windows + +import jsoniter "github.com/json-iterator/go" + +var json = jsoniter.ConfigCompatibleWithStandardLibrary diff --git a/vendor/github.com/containers/storage/drivers/windows/windows.go b/vendor/github.com/containers/storage/drivers/windows/windows.go new file mode 100644 index 00000000000..1491517413c --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/windows/windows.go @@ -0,0 +1,1006 @@ +//+build windows + +package windows + +import ( + "archive/tar" + "bufio" + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path" + "path/filepath" + "strconv" + "strings" + "sync" + "syscall" + "time" + "unsafe" + + "github.com/Microsoft/go-winio" + "github.com/Microsoft/go-winio/backuptar" + "github.com/Microsoft/hcsshim" + "github.com/containers/storage/drivers" + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/directory" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/ioutils" + "github.com/containers/storage/pkg/longpath" + "github.com/containers/storage/pkg/reexec" + "github.com/containers/storage/pkg/system" + units "github.com/docker/go-units" + "github.com/sirupsen/logrus" + "golang.org/x/sys/windows" +) + +// filterDriver is an HCSShim driver type for the Windows Filter driver. +const filterDriver = 1 + +var ( + // mutatedFiles is a list of files that are mutated by the import process + // and must be backed up and restored. + mutatedFiles = map[string]string{ + "UtilityVM/Files/EFI/Microsoft/Boot/BCD": "bcd.bak", + "UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG": "bcd.log.bak", + "UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG1": "bcd.log1.bak", + "UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG2": "bcd.log2.bak", + } + noreexec = false +) + +// init registers the windows graph drivers to the register. +func init() { + graphdriver.Register("windowsfilter", InitFilter) + // DOCKER_WINDOWSFILTER_NOREEXEC allows for inline processing which makes + // debugging issues in the re-exec codepath significantly easier. + if os.Getenv("DOCKER_WINDOWSFILTER_NOREEXEC") != "" { + logrus.Warnf("WindowsGraphDriver is set to not re-exec. This is intended for debugging purposes only.") + noreexec = true + } else { + reexec.Register("docker-windows-write-layer", writeLayerReexec) + } +} + +type checker struct { +} + +func (c *checker) IsMounted(path string) bool { + return false +} + +// Driver represents a windows graph driver. +type Driver struct { + // info stores the shim driver information + info hcsshim.DriverInfo + ctr *graphdriver.RefCounter + // it is safe for windows to use a cache here because it does not support + // restoring containers when the daemon dies. + cacheMu sync.Mutex + cache map[string]string +} + +// InitFilter returns a new Windows storage filter driver. +func InitFilter(home string, options graphdriver.Options) (graphdriver.Driver, error) { + logrus.Debugf("WindowsGraphDriver InitFilter at %s", home) + + for _, option := range options.DriverOptions { + if strings.HasPrefix(option, "windows.mountopt=") { + return nil, fmt.Errorf("windows driver does not support mount options") + } else { + return nil, fmt.Errorf("option %s not supported", option) + } + } + + fsType, err := getFileSystemType(string(home[0])) + if err != nil { + return nil, err + } + if strings.ToLower(fsType) == "refs" { + return nil, fmt.Errorf("%s is on an ReFS volume - ReFS volumes are not supported", home) + } + + if err := idtools.MkdirAllAs(home, 0700, 0, 0); err != nil { + return nil, fmt.Errorf("windowsfilter failed to create '%s': %v", home, err) + } + + d := &Driver{ + info: hcsshim.DriverInfo{ + HomeDir: home, + Flavour: filterDriver, + }, + cache: make(map[string]string), + ctr: graphdriver.NewRefCounter(&checker{}), + } + return d, nil +} + +// win32FromHresult is a helper function to get the win32 error code from an HRESULT +func win32FromHresult(hr uintptr) uintptr { + if hr&0x1fff0000 == 0x00070000 { + return hr & 0xffff + } + return hr +} + +// getFileSystemType obtains the type of a file system through GetVolumeInformation +// https://msdn.microsoft.com/en-us/library/windows/desktop/aa364993(v=vs.85).aspx +func getFileSystemType(drive string) (fsType string, hr error) { + var ( + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + procGetVolumeInformation = modkernel32.NewProc("GetVolumeInformationW") + buf = make([]uint16, 255) + size = windows.MAX_PATH + 1 + ) + if len(drive) != 1 { + hr = errors.New("getFileSystemType must be called with a drive letter") + return + } + drive += `:\` + n := uintptr(unsafe.Pointer(nil)) + r0, _, _ := syscall.Syscall9(procGetVolumeInformation.Addr(), 8, uintptr(unsafe.Pointer(windows.StringToUTF16Ptr(drive))), n, n, n, n, n, uintptr(unsafe.Pointer(&buf[0])), uintptr(size), 0) + if int32(r0) < 0 { + hr = syscall.Errno(win32FromHresult(r0)) + } + fsType = windows.UTF16ToString(buf) + return +} + +// String returns the string representation of a driver. This should match +// the name the graph driver has been registered with. +func (d *Driver) String() string { + return "windowsfilter" +} + +// Status returns the status of the driver. +func (d *Driver) Status() [][2]string { + return [][2]string{ + {"Windows", ""}, + } +} + +// panicIfUsedByLcow does exactly what it says. +// TODO @jhowardmsft - this is a temporary measure for the bring-up of +// Linux containers on Windows. It is a failsafe to ensure that the right +// graphdriver is used. +func panicIfUsedByLcow() { + if system.LCOWSupported() { + panic("inconsistency - windowsfilter graphdriver should not be used when in LCOW mode") + } +} + +// Exists returns true if the given id is registered with this driver. +func (d *Driver) Exists(id string) bool { + panicIfUsedByLcow() + rID, err := d.resolveID(id) + if err != nil { + return false + } + result, err := hcsshim.LayerExists(d.info, rID) + if err != nil { + return false + } + return result +} + +// CreateFromTemplate creates a layer with the same contents and parent as another layer. +func (d *Driver) CreateFromTemplate(id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *graphdriver.CreateOpts, readWrite bool) error { + return graphdriver.NaiveCreateFromTemplate(d, id, template, templateIDMappings, parent, parentIDMappings, opts, readWrite) +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + panicIfUsedByLcow() + if opts != nil { + return d.create(id, parent, opts.MountLabel, false, opts.StorageOpt) + } + return d.create(id, parent, "", false, nil) +} + +// Create creates a new read-only layer with the given id. +func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { + panicIfUsedByLcow() + if opts != nil { + return d.create(id, parent, opts.MountLabel, true, opts.StorageOpt) + } + return d.create(id, parent, "", true, nil) +} + +func (d *Driver) create(id, parent, mountLabel string, readOnly bool, storageOpt map[string]string) error { + rPId, err := d.resolveID(parent) + if err != nil { + return err + } + + parentChain, err := d.getLayerChain(rPId) + if err != nil { + return err + } + + var layerChain []string + + if rPId != "" { + parentPath, err := hcsshim.GetLayerMountPath(d.info, rPId) + if err != nil { + return err + } + if _, err := os.Stat(filepath.Join(parentPath, "Files")); err == nil { + // This is a legitimate parent layer (not the empty "-init" layer), + // so include it in the layer chain. + layerChain = []string{parentPath} + } + } + + layerChain = append(layerChain, parentChain...) + + if readOnly { + if err := hcsshim.CreateLayer(d.info, id, rPId); err != nil { + return err + } + } else { + var parentPath string + if len(layerChain) != 0 { + parentPath = layerChain[0] + } + + if err := hcsshim.CreateSandboxLayer(d.info, id, parentPath, layerChain); err != nil { + return err + } + + storageOptions, err := parseStorageOpt(storageOpt) + if err != nil { + return fmt.Errorf("Failed to parse storage options - %s", err) + } + + if storageOptions.size != 0 { + if err := hcsshim.ExpandSandboxSize(d.info, id, storageOptions.size); err != nil { + return err + } + } + } + + if _, err := os.Lstat(d.dir(parent)); err != nil { + if err2 := hcsshim.DestroyLayer(d.info, id); err2 != nil { + logrus.Warnf("Failed to DestroyLayer %s: %s", id, err2) + } + return fmt.Errorf("Cannot create layer with missing parent %s: %s", parent, err) + } + + if err := d.setLayerChain(id, layerChain); err != nil { + if err2 := hcsshim.DestroyLayer(d.info, id); err2 != nil { + logrus.Warnf("Failed to DestroyLayer %s: %s", id, err2) + } + return err + } + + return nil +} + +// dir returns the absolute path to the layer. +func (d *Driver) dir(id string) string { + return filepath.Join(d.info.HomeDir, filepath.Base(id)) +} + +// Remove unmounts and removes the dir information. +func (d *Driver) Remove(id string) error { + panicIfUsedByLcow() + rID, err := d.resolveID(id) + if err != nil { + return err + } + + // This retry loop is due to a bug in Windows (Internal bug #9432268) + // if GetContainers fails with ErrVmcomputeOperationInvalidState + // it is a transient error. Retry until it succeeds. + var computeSystems []hcsshim.ContainerProperties + retryCount := 0 + osv := system.GetOSVersion() + for { + // Get and terminate any template VMs that are currently using the layer. + // Note: It is unfortunate that we end up in the graphdrivers Remove() call + // for both containers and images, but the logic for template VMs is only + // needed for images - specifically we are looking to see if a base layer + // is in use by a template VM as a result of having started a Hyper-V + // container at some point. + // + // We have a retry loop for ErrVmcomputeOperationInvalidState and + // ErrVmcomputeOperationAccessIsDenied as there is a race condition + // in RS1 and RS2 building during enumeration when a silo is going away + // for example under it, in HCS. AccessIsDenied added to fix 30278. + // + // TODO @jhowardmsft - For RS3, we can remove the retries. Also consider + // using platform APIs (if available) to get this more succinctly. Also + // consider enhancing the Remove() interface to have context of why + // the remove is being called - that could improve efficiency by not + // enumerating compute systems during a remove of a container as it's + // not required. + computeSystems, err = hcsshim.GetContainers(hcsshim.ComputeSystemQuery{}) + if err != nil { + if (osv.Build < 15139) && + ((err == hcsshim.ErrVmcomputeOperationInvalidState) || (err == hcsshim.ErrVmcomputeOperationAccessIsDenied)) { + if retryCount >= 500 { + break + } + retryCount++ + time.Sleep(10 * time.Millisecond) + continue + } + return err + } + break + } + + for _, computeSystem := range computeSystems { + if strings.Contains(computeSystem.RuntimeImagePath, id) && computeSystem.IsRuntimeTemplate { + container, err := hcsshim.OpenContainer(computeSystem.ID) + if err != nil { + return err + } + defer container.Close() + err = container.Terminate() + if hcsshim.IsPending(err) { + err = container.Wait() + } else if hcsshim.IsAlreadyStopped(err) { + err = nil + } + + if err != nil { + return err + } + } + } + + layerPath := filepath.Join(d.info.HomeDir, rID) + tmpID := fmt.Sprintf("%s-removing", rID) + tmpLayerPath := filepath.Join(d.info.HomeDir, tmpID) + if err := os.Rename(layerPath, tmpLayerPath); err != nil && !os.IsNotExist(err) { + return err + } + if err := hcsshim.DestroyLayer(d.info, tmpID); err != nil { + logrus.Errorf("Failed to DestroyLayer %s: %s", id, err) + } + + return nil +} + +// Get returns the rootfs path for the id. This will mount the dir at its given path. +func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) { + panicIfUsedByLcow() + logrus.Debugf("WindowsGraphDriver Get() id %s mountLabel %s", id, options.MountLabel) + var dir string + + switch len(options.Options) { + case 0: + case 1: + if options.Options[0] == "ro" { + // ignore "ro" option + break + } + fallthrough + default: + return "", fmt.Errorf("windows driver does not support mount options") + } + rID, err := d.resolveID(id) + if err != nil { + return "", err + } + if count := d.ctr.Increment(rID); count > 1 { + return d.cache[rID], nil + } + + // Getting the layer paths must be done outside of the lock. + layerChain, err := d.getLayerChain(rID) + if err != nil { + d.ctr.Decrement(rID) + return "", err + } + + if err := hcsshim.ActivateLayer(d.info, rID); err != nil { + d.ctr.Decrement(rID) + return "", err + } + if err := hcsshim.PrepareLayer(d.info, rID, layerChain); err != nil { + d.ctr.Decrement(rID) + if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil { + logrus.Warnf("Failed to Deactivate %s: %s", id, err) + } + return "", err + } + + mountPath, err := hcsshim.GetLayerMountPath(d.info, rID) + if err != nil { + d.ctr.Decrement(rID) + if err := hcsshim.UnprepareLayer(d.info, rID); err != nil { + logrus.Warnf("Failed to Unprepare %s: %s", id, err) + } + if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil { + logrus.Warnf("Failed to Deactivate %s: %s", id, err) + } + return "", err + } + d.cacheMu.Lock() + d.cache[rID] = mountPath + d.cacheMu.Unlock() + + // If the layer has a mount path, use that. Otherwise, use the + // folder path. + if mountPath != "" { + dir = mountPath + } else { + dir = d.dir(id) + } + + return dir, nil +} + +// ReadWriteDiskUsage returns the disk usage of the writable directory for the ID. +// For VFS, it queries the directory for this ID. +func (d *Driver) ReadWriteDiskUsage(id string) (*directory.DiskUsage, error) { + return directory.Usage(d.dir(id)) +} + +// Put adds a new layer to the driver. +func (d *Driver) Put(id string) error { + panicIfUsedByLcow() + logrus.Debugf("WindowsGraphDriver Put() id %s", id) + + rID, err := d.resolveID(id) + if err != nil { + return err + } + if count := d.ctr.Decrement(rID); count > 0 { + return nil + } + d.cacheMu.Lock() + _, exists := d.cache[rID] + delete(d.cache, rID) + d.cacheMu.Unlock() + + // If the cache was not populated, then the layer was left unprepared and deactivated + if !exists { + return nil + } + + if err := hcsshim.UnprepareLayer(d.info, rID); err != nil { + return err + } + return hcsshim.DeactivateLayer(d.info, rID) +} + +// Cleanup ensures the information the driver stores is properly removed. +// We use this opportunity to cleanup any -removing folders which may be +// still left if the daemon was killed while it was removing a layer. +func (d *Driver) Cleanup() error { + items, err := ioutil.ReadDir(d.info.HomeDir) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + + // Note we don't return an error below - it's possible the files + // are locked. However, next time around after the daemon exits, + // we likely will be able to to cleanup successfully. Instead we log + // warnings if there are errors. + for _, item := range items { + if item.IsDir() && strings.HasSuffix(item.Name(), "-removing") { + if err := hcsshim.DestroyLayer(d.info, item.Name()); err != nil { + logrus.Warnf("Failed to cleanup %s: %s", item.Name(), err) + } else { + logrus.Infof("Cleaned up %s", item.Name()) + } + } + } + + return nil +} + +// Diff produces an archive of the changes between the specified +// layer and its parent layer which may be "". +// The layer should be mounted when calling this function +func (d *Driver) Diff(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (_ io.ReadCloser, err error) { + panicIfUsedByLcow() + rID, err := d.resolveID(id) + if err != nil { + return + } + + layerChain, err := d.getLayerChain(rID) + if err != nil { + return + } + + // this is assuming that the layer is unmounted + if err := hcsshim.UnprepareLayer(d.info, rID); err != nil { + return nil, err + } + prepare := func() { + if err := hcsshim.PrepareLayer(d.info, rID, layerChain); err != nil { + logrus.Warnf("Failed to Deactivate %s: %s", rID, err) + } + } + + arch, err := d.exportLayer(rID, layerChain) + if err != nil { + prepare() + return + } + return ioutils.NewReadCloserWrapper(arch, func() error { + err := arch.Close() + prepare() + return err + }), nil +} + +// Changes produces a list of changes between the specified layer +// and its parent layer. If parent is "", then all changes will be ADD changes. +// The layer should not be mounted when calling this function. +func (d *Driver) Changes(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) ([]archive.Change, error) { + panicIfUsedByLcow() + rID, err := d.resolveID(id) + if err != nil { + return nil, err + } + parentChain, err := d.getLayerChain(rID) + if err != nil { + return nil, err + } + + if err := hcsshim.ActivateLayer(d.info, rID); err != nil { + return nil, err + } + defer func() { + if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil { + logrus.Errorf("changes() failed to DeactivateLayer %s %s: %s", id, rID, err2) + } + }() + + var changes []archive.Change + err = winio.RunWithPrivilege(winio.SeBackupPrivilege, func() error { + r, err := hcsshim.NewLayerReader(d.info, id, parentChain) + if err != nil { + return err + } + defer r.Close() + + for { + name, _, fileInfo, err := r.Next() + if err == io.EOF { + return nil + } + if err != nil { + return err + } + name = filepath.ToSlash(name) + if fileInfo == nil { + changes = append(changes, archive.Change{Path: name, Kind: archive.ChangeDelete}) + } else { + // Currently there is no way to tell between an add and a modify. + changes = append(changes, archive.Change{Path: name, Kind: archive.ChangeModify}) + } + } + }) + if err != nil { + return nil, err + } + + return changes, nil +} + +// ApplyDiff extracts the changeset from the given diff into the +// layer with the specified id and parent, returning the size of the +// new layer in bytes. +// The layer should not be mounted when calling this function +func (d *Driver) ApplyDiff(id, parent string, options graphdriver.ApplyDiffOpts) (int64, error) { + panicIfUsedByLcow() + var layerChain []string + if parent != "" { + rPId, err := d.resolveID(parent) + if err != nil { + return 0, err + } + parentChain, err := d.getLayerChain(rPId) + if err != nil { + return 0, err + } + parentPath, err := hcsshim.GetLayerMountPath(d.info, rPId) + if err != nil { + return 0, err + } + layerChain = append(layerChain, parentPath) + layerChain = append(layerChain, parentChain...) + } + + size, err := d.importLayer(id, options.Diff, layerChain) + if err != nil { + return 0, err + } + + if err = d.setLayerChain(id, layerChain); err != nil { + return 0, err + } + + return size, nil +} + +// DiffSize calculates the changes between the specified layer +// and its parent and returns the size in bytes of the changes +// relative to its base filesystem directory. +func (d *Driver) DiffSize(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (size int64, err error) { + panicIfUsedByLcow() + rPId, err := d.resolveID(parent) + if err != nil { + return + } + + changes, err := d.Changes(id, idMappings, rPId, parentMappings, mountLabel) + if err != nil { + return + } + + layerFs, err := d.Get(id, graphdriver.MountOpts{}) + if err != nil { + return + } + defer d.Put(id) + + return archive.ChangesSize(layerFs, changes), nil +} + +// Metadata returns custom driver information. +func (d *Driver) Metadata(id string) (map[string]string, error) { + panicIfUsedByLcow() + m := make(map[string]string) + m["dir"] = d.dir(id) + return m, nil +} + +func writeTarFromLayer(r hcsshim.LayerReader, w io.Writer) error { + t := tar.NewWriter(w) + for { + name, size, fileInfo, err := r.Next() + if err == io.EOF { + break + } + if err != nil { + return err + } + if fileInfo == nil { + // Write a whiteout file. + hdr := &tar.Header{ + Name: filepath.ToSlash(filepath.Join(filepath.Dir(name), archive.WhiteoutPrefix+filepath.Base(name))), + } + err := t.WriteHeader(hdr) + if err != nil { + return err + } + } else { + err = backuptar.WriteTarFileFromBackupStream(t, r, name, size, fileInfo) + if err != nil { + return err + } + } + } + return t.Close() +} + +// exportLayer generates an archive from a layer based on the given ID. +func (d *Driver) exportLayer(id string, parentLayerPaths []string) (io.ReadCloser, error) { + archive, w := io.Pipe() + go func() { + err := winio.RunWithPrivilege(winio.SeBackupPrivilege, func() error { + r, err := hcsshim.NewLayerReader(d.info, id, parentLayerPaths) + if err != nil { + return err + } + + err = writeTarFromLayer(r, w) + cerr := r.Close() + if err == nil { + err = cerr + } + return err + }) + w.CloseWithError(err) + }() + + return archive, nil +} + +// writeBackupStreamFromTarAndSaveMutatedFiles reads data from a tar stream and +// writes it to a backup stream, and also saves any files that will be mutated +// by the import layer process to a backup location. +func writeBackupStreamFromTarAndSaveMutatedFiles(buf *bufio.Writer, w io.Writer, t *tar.Reader, hdr *tar.Header, root string) (nextHdr *tar.Header, err error) { + var bcdBackup *os.File + var bcdBackupWriter *winio.BackupFileWriter + if backupPath, ok := mutatedFiles[hdr.Name]; ok { + bcdBackup, err = os.Create(filepath.Join(root, backupPath)) + if err != nil { + return nil, err + } + defer func() { + cerr := bcdBackup.Close() + if err == nil { + err = cerr + } + }() + + bcdBackupWriter = winio.NewBackupFileWriter(bcdBackup, false) + defer func() { + cerr := bcdBackupWriter.Close() + if err == nil { + err = cerr + } + }() + + buf.Reset(io.MultiWriter(w, bcdBackupWriter)) + } else { + buf.Reset(w) + } + + defer func() { + ferr := buf.Flush() + if err == nil { + err = ferr + } + }() + + return backuptar.WriteBackupStreamFromTarFile(buf, t, hdr) +} + +func writeLayerFromTar(r io.Reader, w hcsshim.LayerWriter, root string) (int64, error) { + t := tar.NewReader(r) + hdr, err := t.Next() + totalSize := int64(0) + buf := bufio.NewWriter(nil) + for err == nil { + base := path.Base(hdr.Name) + if strings.HasPrefix(base, archive.WhiteoutPrefix) { + name := path.Join(path.Dir(hdr.Name), base[len(archive.WhiteoutPrefix):]) + err = w.Remove(filepath.FromSlash(name)) + if err != nil { + return 0, err + } + hdr, err = t.Next() + } else if hdr.Typeflag == tar.TypeLink { + err = w.AddLink(filepath.FromSlash(hdr.Name), filepath.FromSlash(hdr.Linkname)) + if err != nil { + return 0, err + } + hdr, err = t.Next() + } else { + var ( + name string + size int64 + fileInfo *winio.FileBasicInfo + ) + name, size, fileInfo, err = backuptar.FileInfoFromHeader(hdr) + if err != nil { + return 0, err + } + err = w.Add(filepath.FromSlash(name), fileInfo) + if err != nil { + return 0, err + } + hdr, err = writeBackupStreamFromTarAndSaveMutatedFiles(buf, w, t, hdr, root) + totalSize += size + } + } + if err != io.EOF { + return 0, err + } + return totalSize, nil +} + +// importLayer adds a new layer to the tag and graph store based on the given data. +func (d *Driver) importLayer(id string, layerData io.Reader, parentLayerPaths []string) (size int64, err error) { + if !noreexec { + cmd := reexec.Command(append([]string{"docker-windows-write-layer", d.info.HomeDir, id}, parentLayerPaths...)...) + output := bytes.NewBuffer(nil) + cmd.Stdin = layerData + cmd.Stdout = output + cmd.Stderr = output + + if err = cmd.Start(); err != nil { + return + } + + if err = cmd.Wait(); err != nil { + return 0, fmt.Errorf("re-exec error: %v: output: %s", err, output) + } + + return strconv.ParseInt(output.String(), 10, 64) + } + return writeLayer(layerData, d.info.HomeDir, id, parentLayerPaths...) +} + +// writeLayerReexec is the re-exec entry point for writing a layer from a tar file +func writeLayerReexec() { + size, err := writeLayer(os.Stdin, os.Args[1], os.Args[2], os.Args[3:]...) + if err != nil { + fmt.Fprint(os.Stderr, err) + os.Exit(1) + } + fmt.Fprint(os.Stdout, size) +} + +// writeLayer writes a layer from a tar file. +func writeLayer(layerData io.Reader, home string, id string, parentLayerPaths ...string) (int64, error) { + err := winio.EnableProcessPrivileges([]string{winio.SeBackupPrivilege, winio.SeRestorePrivilege}) + if err != nil { + return 0, err + } + if noreexec { + defer func() { + if err := winio.DisableProcessPrivileges([]string{winio.SeBackupPrivilege, winio.SeRestorePrivilege}); err != nil { + // This should never happen, but just in case when in debugging mode. + // See https://github.com/docker/docker/pull/28002#discussion_r86259241 for rationale. + panic("Failed to disabled process privileges while in non re-exec mode") + } + }() + } + + info := hcsshim.DriverInfo{ + Flavour: filterDriver, + HomeDir: home, + } + + w, err := hcsshim.NewLayerWriter(info, id, parentLayerPaths) + if err != nil { + return 0, err + } + + size, err := writeLayerFromTar(layerData, w, filepath.Join(home, id)) + if err != nil { + return 0, err + } + + err = w.Close() + if err != nil { + return 0, err + } + + return size, nil +} + +// resolveID computes the layerID information based on the given id. +func (d *Driver) resolveID(id string) (string, error) { + content, err := ioutil.ReadFile(filepath.Join(d.dir(id), "layerID")) + if os.IsNotExist(err) { + return id, nil + } else if err != nil { + return "", err + } + return string(content), nil +} + +// setID stores the layerId in disk. +func (d *Driver) setID(id, altID string) error { + return ioutil.WriteFile(filepath.Join(d.dir(id), "layerId"), []byte(altID), 0600) +} + +// getLayerChain returns the layer chain information. +func (d *Driver) getLayerChain(id string) ([]string, error) { + jPath := filepath.Join(d.dir(id), "layerchain.json") + content, err := ioutil.ReadFile(jPath) + if os.IsNotExist(err) { + return nil, nil + } else if err != nil { + return nil, fmt.Errorf("Unable to read layerchain file - %s", err) + } + + var layerChain []string + err = json.Unmarshal(content, &layerChain) + if err != nil { + return nil, fmt.Errorf("Failed to unmarshall layerchain json - %s", err) + } + + return layerChain, nil +} + +// setLayerChain stores the layer chain information in disk. +func (d *Driver) setLayerChain(id string, chain []string) error { + content, err := json.Marshal(&chain) + if err != nil { + return fmt.Errorf("Failed to marshall layerchain json - %s", err) + } + + jPath := filepath.Join(d.dir(id), "layerchain.json") + err = ioutil.WriteFile(jPath, content, 0600) + if err != nil { + return fmt.Errorf("Unable to write layerchain file - %s", err) + } + + return nil +} + +type fileGetCloserWithBackupPrivileges struct { + path string +} + +func (fg *fileGetCloserWithBackupPrivileges) Get(filename string) (io.ReadCloser, error) { + if backupPath, ok := mutatedFiles[filename]; ok { + return os.Open(filepath.Join(fg.path, backupPath)) + } + + var f *os.File + // Open the file while holding the Windows backup privilege. This ensures that the + // file can be opened even if the caller does not actually have access to it according + // to the security descriptor. Also use sequential file access to avoid depleting the + // standby list - Microsoft VSO Bug Tracker #9900466 + err := winio.RunWithPrivilege(winio.SeBackupPrivilege, func() error { + path := longpath.AddPrefix(filepath.Join(fg.path, filename)) + p, err := windows.UTF16FromString(path) + if err != nil { + return err + } + const fileFlagSequentialScan = 0x08000000 // FILE_FLAG_SEQUENTIAL_SCAN + h, err := windows.CreateFile(&p[0], windows.GENERIC_READ, windows.FILE_SHARE_READ, nil, windows.OPEN_EXISTING, windows.FILE_FLAG_BACKUP_SEMANTICS|fileFlagSequentialScan, 0) + if err != nil { + return &os.PathError{Op: "open", Path: path, Err: err} + } + f = os.NewFile(uintptr(h), path) + return nil + }) + return f, err +} + +func (fg *fileGetCloserWithBackupPrivileges) Close() error { + return nil +} + +// DiffGetter returns a FileGetCloser that can read files from the directory that +// contains files for the layer differences. Used for direct access for tar-split. +func (d *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) { + panicIfUsedByLcow() + id, err := d.resolveID(id) + if err != nil { + return nil, err + } + + return &fileGetCloserWithBackupPrivileges{d.dir(id)}, nil +} + +// AdditionalImageStores returns additional image stores supported by the driver +func (d *Driver) AdditionalImageStores() []string { + return nil +} + +// UpdateLayerIDMap changes ownerships in the layer's filesystem tree from +// matching those in toContainer to matching those in toHost. +func (d *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) error { + return fmt.Errorf("windows doesn't support changing ID mappings") +} + +// SupportsShifting tells whether the driver support shifting of the UIDs/GIDs in an userNS +func (d *Driver) SupportsShifting() bool { + return false +} + +type storageOptions struct { + size uint64 +} + +func parseStorageOpt(storageOpt map[string]string) (*storageOptions, error) { + options := storageOptions{} + + // Read size to change the block device size per container. + for key, val := range storageOpt { + key := strings.ToLower(key) + switch key { + case "size": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + options.size = uint64(size) + default: + return nil, fmt.Errorf("Unknown storage option: %s", key) + } + } + return &options, nil +} diff --git a/vendor/github.com/containers/storage/drivers/zfs/MAINTAINERS b/vendor/github.com/containers/storage/drivers/zfs/MAINTAINERS new file mode 100644 index 00000000000..9c270c541f5 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/zfs/MAINTAINERS @@ -0,0 +1,2 @@ +Jörg Thalheim (@Mic92) +Arthur Gautier (@baloose) diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs.go b/vendor/github.com/containers/storage/drivers/zfs/zfs.go new file mode 100644 index 00000000000..e034bf152c9 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/zfs/zfs.go @@ -0,0 +1,512 @@ +// +build linux freebsd + +package zfs + +import ( + "fmt" + "os" + "os/exec" + "path" + "strconv" + "strings" + "sync" + "time" + + graphdriver "github.com/containers/storage/drivers" + "github.com/containers/storage/pkg/directory" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/mount" + "github.com/containers/storage/pkg/parsers" + "github.com/mistifyio/go-zfs" + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +type zfsOptions struct { + fsName string + mountPath string + mountOptions string +} + +const defaultPerms = os.FileMode(0555) + +func init() { + graphdriver.Register("zfs", Init) +} + +// Logger returns a zfs logger implementation. +type Logger struct{} + +// Log wraps log message from ZFS driver with a prefix '[zfs]'. +func (*Logger) Log(cmd []string) { + logrus.WithField("storage-driver", "zfs").Debugf("%s", strings.Join(cmd, " ")) +} + +// Init returns a new ZFS driver. +// It takes base mount path and an array of options which are represented as key value pairs. +// Each option is in the for key=value. 'zfs.fsname' is expected to be a valid key in the options. +func Init(base string, opt graphdriver.Options) (graphdriver.Driver, error) { + var err error + + logger := logrus.WithField("storage-driver", "zfs") + + if _, err := exec.LookPath("zfs"); err != nil { + logger.Debugf("zfs command is not available: %v", err) + return nil, errors.Wrap(graphdriver.ErrPrerequisites, "the 'zfs' command is not available") + } + + file, err := os.OpenFile("/dev/zfs", os.O_RDWR, 0600) + if err != nil { + logger.Debugf("cannot open /dev/zfs: %v", err) + return nil, errors.Wrapf(graphdriver.ErrPrerequisites, "could not open /dev/zfs: %v", err) + } + defer file.Close() + + options, err := parseOptions(opt.DriverOptions) + if err != nil { + return nil, err + } + options.mountPath = base + + rootdir := path.Dir(base) + + if options.fsName == "" { + err = checkRootdirFs(rootdir) + if err != nil { + return nil, err + } + } + + if options.fsName == "" { + options.fsName, err = lookupZfsDataset(rootdir) + if err != nil { + return nil, err + } + } + + zfs.SetLogger(new(Logger)) + + filesystems, err := zfs.Filesystems(options.fsName) + if err != nil { + return nil, fmt.Errorf("Cannot find root filesystem %s: %v", options.fsName, err) + } + + filesystemsCache := make(map[string]bool, len(filesystems)) + var rootDataset *zfs.Dataset + for _, fs := range filesystems { + if fs.Name == options.fsName { + rootDataset = fs + } + filesystemsCache[fs.Name] = true + } + + if rootDataset == nil { + return nil, fmt.Errorf("BUG: zfs get all -t filesystem -rHp '%s' should contain '%s'", options.fsName, options.fsName) + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(opt.UIDMaps, opt.GIDMaps) + if err != nil { + return nil, fmt.Errorf("Failed to get root uid/gid: %v", err) + } + if err := idtools.MkdirAllAs(base, 0700, rootUID, rootGID); err != nil { + return nil, fmt.Errorf("Failed to create '%s': %v", base, err) + } + + d := &Driver{ + dataset: rootDataset, + options: options, + filesystemsCache: filesystemsCache, + uidMaps: opt.UIDMaps, + gidMaps: opt.GIDMaps, + ctr: graphdriver.NewRefCounter(graphdriver.NewDefaultChecker()), + } + return graphdriver.NewNaiveDiffDriver(d, graphdriver.NewNaiveLayerIDMapUpdater(d)), nil +} + +func parseOptions(opt []string) (zfsOptions, error) { + var options zfsOptions + options.fsName = "" + for _, option := range opt { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return options, err + } + key = strings.ToLower(key) + switch key { + case "zfs.fsname": + options.fsName = val + case "zfs.mountopt": + options.mountOptions = val + default: + return options, fmt.Errorf("Unknown option %s", key) + } + } + return options, nil +} + +func lookupZfsDataset(rootdir string) (string, error) { + var stat unix.Stat_t + if err := unix.Stat(rootdir, &stat); err != nil { + return "", fmt.Errorf("Failed to access '%s': %s", rootdir, err) + } + wantedDev := stat.Dev + + mounts, err := mount.GetMounts() + if err != nil { + return "", err + } + for _, m := range mounts { + if err := unix.Stat(m.Mountpoint, &stat); err != nil { + logrus.WithField("storage-driver", "zfs").Debugf("failed to stat '%s' while scanning for zfs mount: %v", m.Mountpoint, err) + continue // may fail on fuse file systems + } + + if stat.Dev == wantedDev && m.FSType == "zfs" { + return m.Source, nil + } + } + + return "", fmt.Errorf("Failed to find zfs dataset mounted on '%s' in /proc/mounts", rootdir) +} + +// Driver holds information about the driver, such as zfs dataset, options and cache. +type Driver struct { + dataset *zfs.Dataset + options zfsOptions + sync.Mutex // protects filesystem cache against concurrent access + filesystemsCache map[string]bool + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + ctr *graphdriver.RefCounter +} + +func (d *Driver) String() string { + return "zfs" +} + +// Cleanup is called on when program exits, it is a no-op for ZFS. +func (d *Driver) Cleanup() error { + return nil +} + +// Status returns information about the ZFS filesystem. It returns a two dimensional array of information +// such as pool name, dataset name, disk usage, parent quota and compression used. +// Currently it return 'Zpool', 'Zpool Health', 'Parent Dataset', 'Space Used By Parent', +// 'Space Available', 'Parent Quota' and 'Compression'. +func (d *Driver) Status() [][2]string { + parts := strings.Split(d.dataset.Name, "/") + pool, err := zfs.GetZpool(parts[0]) + + var poolName, poolHealth string + if err == nil { + poolName = pool.Name + poolHealth = pool.Health + } else { + poolName = fmt.Sprintf("error while getting pool information %v", err) + poolHealth = "not available" + } + + quota := "no" + if d.dataset.Quota != 0 { + quota = strconv.FormatUint(d.dataset.Quota, 10) + } + + return [][2]string{ + {"Zpool", poolName}, + {"Zpool Health", poolHealth}, + {"Parent Dataset", d.dataset.Name}, + {"Space Used By Parent", strconv.FormatUint(d.dataset.Used, 10)}, + {"Space Available", strconv.FormatUint(d.dataset.Avail, 10)}, + {"Parent Quota", quota}, + {"Compression", d.dataset.Compression}, + } +} + +// Metadata returns image/container metadata related to graph driver +func (d *Driver) Metadata(id string) (map[string]string, error) { + return map[string]string{ + "Mountpoint": d.mountPath(id), + "Dataset": d.zfsPath(id), + }, nil +} + +func (d *Driver) cloneFilesystem(name, parentName string) error { + snapshotName := fmt.Sprintf("%d", time.Now().Nanosecond()) + parentDataset := zfs.Dataset{Name: parentName} + snapshot, err := parentDataset.Snapshot(snapshotName /*recursive */, false) + if err != nil { + return err + } + + _, err = snapshot.Clone(name, map[string]string{"mountpoint": "legacy"}) + if err == nil { + d.Lock() + d.filesystemsCache[name] = true + d.Unlock() + } + + if err != nil { + snapshot.Destroy(zfs.DestroyDeferDeletion) + return err + } + return snapshot.Destroy(zfs.DestroyDeferDeletion) +} + +func (d *Driver) zfsPath(id string) string { + return d.options.fsName + "/" + id +} + +func (d *Driver) mountPath(id string) string { + return path.Join(d.options.mountPath, "graph", getMountpoint(id)) +} + +// CreateFromTemplate creates a layer with the same contents and parent as another layer. +func (d *Driver) CreateFromTemplate(id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *graphdriver.CreateOpts, readWrite bool) error { + return d.Create(id, template, opts) +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + return d.Create(id, parent, opts) +} + +// Create prepares the dataset and filesystem for the ZFS driver for the given id under the parent. +func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { + err := d.create(id, parent, opts) + if err == nil { + return nil + } + if zfsError, ok := err.(*zfs.Error); ok { + if !strings.HasSuffix(zfsError.Stderr, "dataset already exists\n") { + return err + } + // aborted build -> cleanup + } else { + return err + } + + dataset := zfs.Dataset{Name: d.zfsPath(id)} + if err := dataset.Destroy(zfs.DestroyRecursiveClones); err != nil { + return err + } + + // retry + return d.create(id, parent, opts) +} + +func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) error { + var storageOpt map[string]string + if opts != nil { + storageOpt = opts.StorageOpt + } + + name := d.zfsPath(id) + mountpoint := d.mountPath(id) + quota, err := parseStorageOpt(storageOpt) + if err != nil { + return err + } + if parent == "" { + var rootUID, rootGID int + var mountLabel string + if opts != nil { + rootUID, rootGID, err = idtools.GetRootUIDGID(opts.UIDs(), opts.GIDs()) + if err != nil { + return fmt.Errorf("Failed to get root uid/gid: %v", err) + } + mountLabel = opts.MountLabel + } + mountoptions := map[string]string{"mountpoint": "legacy"} + fs, err := zfs.CreateFilesystem(name, mountoptions) + if err == nil { + err = setQuota(name, quota) + if err == nil { + d.Lock() + d.filesystemsCache[fs.Name] = true + d.Unlock() + } + + if err := idtools.MkdirAllAs(mountpoint, defaultPerms, rootUID, rootGID); err != nil { + return err + } + defer func() { + if err := unix.Rmdir(mountpoint); err != nil && !os.IsNotExist(err) { + logrus.Debugf("Failed to remove %s mount point %s: %v", id, mountpoint, err) + } + }() + + mountOpts := label.FormatMountLabel(d.options.mountOptions, mountLabel) + + if err := mount.Mount(name, mountpoint, "zfs", mountOpts); err != nil { + return errors.Wrap(err, "error creating zfs mount") + } + defer func() { + if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil { + logrus.Warnf("Failed to unmount %s mount %s: %v", id, mountpoint, err) + } + }() + + if err := os.Chmod(mountpoint, defaultPerms); err != nil { + return errors.Wrap(err, "error setting permissions on zfs mount") + } + + // this is our first mount after creation of the filesystem, and the root dir may still have root + // permissions instead of the remapped root uid:gid (if user namespaces are enabled): + if err := os.Chown(mountpoint, rootUID, rootGID); err != nil { + return errors.Wrapf(err, "modifying zfs mountpoint (%s) ownership", mountpoint) + } + + } + return err + } + err = d.cloneFilesystem(name, d.zfsPath(parent)) + if err == nil { + err = setQuota(name, quota) + } + return err +} + +func parseStorageOpt(storageOpt map[string]string) (string, error) { + // Read size to change the disk quota per container + for k, v := range storageOpt { + key := strings.ToLower(k) + switch key { + case "size": + return v, nil + default: + return "0", fmt.Errorf("Unknown option %s", key) + } + } + return "0", nil +} + +func setQuota(name string, quota string) error { + if quota == "0" { + return nil + } + fs, err := zfs.GetDataset(name) + if err != nil { + return err + } + return fs.SetProperty("quota", quota) +} + +// Remove deletes the dataset, filesystem and the cache for the given id. +func (d *Driver) Remove(id string) error { + name := d.zfsPath(id) + dataset := zfs.Dataset{Name: name} + err := dataset.Destroy(zfs.DestroyRecursive) + if err == nil { + d.Lock() + delete(d.filesystemsCache, name) + d.Unlock() + } + return err +} + +// Get returns the mountpoint for the given id after creating the target directories if necessary. +func (d *Driver) Get(id string, options graphdriver.MountOpts) (_ string, retErr error) { + + mountpoint := d.mountPath(id) + if count := d.ctr.Increment(mountpoint); count > 1 { + return mountpoint, nil + } + defer func() { + if retErr != nil { + if c := d.ctr.Decrement(mountpoint); c <= 0 { + if mntErr := unix.Unmount(mountpoint, 0); mntErr != nil { + logrus.WithField("storage-driver", "zfs").Errorf("Error unmounting %v: %v", mountpoint, mntErr) + } + if rmErr := unix.Rmdir(mountpoint); rmErr != nil && !os.IsNotExist(rmErr) { + logrus.WithField("storage-driver", "zfs").Debugf("Failed to remove %s: %v", id, rmErr) + } + + } + } + }() + + // In the case of a read-only mount we first mount read-write so we can set the + // correct permissions on the mount point and remount read-only afterwards. + remountReadOnly := false + mountOptions := d.options.mountOptions + if len(options.Options) > 0 { + var newOptions []string + for _, option := range options.Options { + if option == "ro" { + // Filter out read-only mount option but remember for later remounting. + remountReadOnly = true + } else { + newOptions = append(newOptions, option) + } + } + mountOptions = strings.Join(newOptions, ",") + } + + filesystem := d.zfsPath(id) + opts := label.FormatMountLabel(mountOptions, options.MountLabel) + logrus.WithField("storage-driver", "zfs").Debugf(`mount("%s", "%s", "%s")`, filesystem, mountpoint, opts) + + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + return "", err + } + // Create the target directories if they don't exist + if err := idtools.MkdirAllAs(mountpoint, 0755, rootUID, rootGID); err != nil { + return "", err + } + + if err := mount.Mount(filesystem, mountpoint, "zfs", opts); err != nil { + return "", errors.Wrap(err, "error creating zfs mount") + } + + if remountReadOnly { + opts = label.FormatMountLabel("remount,ro", options.MountLabel) + if err := mount.Mount(filesystem, mountpoint, "zfs", opts); err != nil { + return "", errors.Wrap(err, "error remounting zfs mount read-only") + } + } + + return mountpoint, nil +} + +// Put removes the existing mountpoint for the given id if it exists. +func (d *Driver) Put(id string) error { + mountpoint := d.mountPath(id) + if count := d.ctr.Decrement(mountpoint); count > 0 { + return nil + } + + logger := logrus.WithField("storage-driver", "zfs") + + logger.Debugf(`unmount("%s")`, mountpoint) + + if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil { + logger.Warnf("Failed to unmount %s mount %s: %v", id, mountpoint, err) + } + if err := unix.Rmdir(mountpoint); err != nil && !os.IsNotExist(err) { + logger.Debugf("Failed to remove %s mount point %s: %v", id, mountpoint, err) + } + + return nil +} + +// ReadWriteDiskUsage returns the disk usage of the writable directory for the ID. +// For ZFS, it queries the full mount path for this ID. +func (d *Driver) ReadWriteDiskUsage(id string) (*directory.DiskUsage, error) { + return directory.Usage(d.mountPath(id)) +} + +// Exists checks to see if the cache entry exists for the given id. +func (d *Driver) Exists(id string) bool { + d.Lock() + defer d.Unlock() + return d.filesystemsCache[d.zfsPath(id)] +} + +// AdditionalImageStores returns additional image stores supported by the driver +func (d *Driver) AdditionalImageStores() []string { + return nil +} diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go b/vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go new file mode 100644 index 00000000000..bf690515984 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go @@ -0,0 +1,39 @@ +package zfs + +import ( + "fmt" + "strings" + + "github.com/containers/storage/drivers" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +func checkRootdirFs(rootdir string) error { + var buf unix.Statfs_t + if err := unix.Statfs(rootdir, &buf); err != nil { + return fmt.Errorf("Failed to access '%s': %s", rootdir, err) + } + + // on FreeBSD buf.Fstypename contains ['z', 'f', 's', 0 ... ] + if (buf.Fstypename[0] != 122) || (buf.Fstypename[1] != 102) || (buf.Fstypename[2] != 115) || (buf.Fstypename[3] != 0) { + logrus.WithField("storage-driver", "zfs").Debugf("no zfs dataset found for rootdir '%s'", rootdir) + return errors.Wrapf(graphdriver.ErrPrerequisites, "no zfs dataset found for rootdir '%s'", rootdir) + } + + return nil +} + +func getMountpoint(id string) string { + maxlen := 12 + + // we need to preserve filesystem suffix + suffix := strings.SplitN(id, "-", 2) + + if len(suffix) > 1 { + return id[:maxlen] + "-" + suffix[1] + } + + return id[:maxlen] +} diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go b/vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go new file mode 100644 index 00000000000..edcb1da36b7 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go @@ -0,0 +1,29 @@ +package zfs + +import ( + graphdriver "github.com/containers/storage/drivers" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +func checkRootdirFs(rootDir string) error { + fsMagic, err := graphdriver.GetFSMagic(rootDir) + if err != nil { + return err + } + backingFS := "unknown" + if fsName, ok := graphdriver.FsNames[fsMagic]; ok { + backingFS = fsName + } + + if fsMagic != graphdriver.FsMagicZfs { + logrus.WithField("root", rootDir).WithField("backingFS", backingFS).WithField("storage-driver", "zfs").Error("No zfs dataset found for root") + return errors.Wrapf(graphdriver.ErrPrerequisites, "no zfs dataset found for rootdir '%s'", rootDir) + } + + return nil +} + +func getMountpoint(id string) string { + return id +} diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs_unsupported.go b/vendor/github.com/containers/storage/drivers/zfs/zfs_unsupported.go new file mode 100644 index 00000000000..643b169bc5c --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/zfs/zfs_unsupported.go @@ -0,0 +1,11 @@ +// +build !linux,!freebsd + +package zfs + +func checkRootdirFs(rootdir string) error { + return nil +} + +func getMountpoint(id string) string { + return id +} diff --git a/vendor/github.com/containers/storage/errors.go b/vendor/github.com/containers/storage/errors.go new file mode 100644 index 00000000000..6deea0c829d --- /dev/null +++ b/vendor/github.com/containers/storage/errors.go @@ -0,0 +1,63 @@ +package storage + +import ( + "errors" + + "github.com/containers/storage/types" +) + +var ( + // ErrContainerUnknown indicates that there was no container with the specified name or ID. + ErrContainerUnknown = types.ErrContainerUnknown + // ErrDigestUnknown indicates that we were unable to compute the digest of a specified item. + ErrDigestUnknown = types.ErrDigestUnknown + // ErrDuplicateID indicates that an ID which is to be assigned to a new item is already being used. + ErrDuplicateID = types.ErrDuplicateID + // ErrDuplicateImageNames indicates that the read-only store uses the same name for multiple images. + ErrDuplicateImageNames = types.ErrDuplicateImageNames + // ErrDuplicateLayerNames indicates that the read-only store uses the same name for multiple layers. + ErrDuplicateLayerNames = types.ErrDuplicateLayerNames + // ErrDuplicateName indicates that a name which is to be assigned to a new item is already being used. + ErrDuplicateName = types.ErrDuplicateName + // ErrImageUnknown indicates that there was no image with the specified name or ID. + ErrImageUnknown = types.ErrImageUnknown + // ErrImageUsedByContainer is returned when the caller attempts to delete an image that is a container's image. + ErrImageUsedByContainer = types.ErrImageUsedByContainer + // ErrIncompleteOptions is returned when the caller attempts to initialize a Store without providing required information. + ErrIncompleteOptions = types.ErrIncompleteOptions + // ErrInvalidBigDataName indicates that the name for a big data item is not acceptable; it may be empty. + ErrInvalidBigDataName = types.ErrInvalidBigDataName + // ErrLayerHasChildren is returned when the caller attempts to delete a layer that has children. + ErrLayerHasChildren = types.ErrLayerHasChildren + // ErrLayerNotMounted is returned when the requested information can only be computed for a mounted layer, and the layer is not mounted. + ErrLayerNotMounted = types.ErrLayerNotMounted + // ErrLayerUnknown indicates that there was no layer with the specified name or ID. + ErrLayerUnknown = types.ErrLayerUnknown + // ErrLayerUsedByContainer is returned when the caller attempts to delete a layer that is a container's layer. + ErrLayerUsedByContainer = types.ErrLayerUsedByContainer + // ErrLayerUsedByImage is returned when the caller attempts to delete a layer that is an image's top layer. + ErrLayerUsedByImage = types.ErrLayerUsedByImage + // ErrLoadError indicates that there was an initialization error. + ErrLoadError = types.ErrLoadError + // ErrNotAContainer is returned when the caller attempts to delete a container that isn't a container. + ErrNotAContainer = types.ErrNotAContainer + // ErrNotALayer is returned when the caller attempts to delete a layer that isn't a layer. + ErrNotALayer = types.ErrNotALayer + // ErrNotAnID is returned when the caller attempts to read or write metadata from an item that doesn't exist. + ErrNotAnID = types.ErrNotAnID + // ErrNotAnImage is returned when the caller attempts to delete an image that isn't an image. + ErrNotAnImage = types.ErrNotAnImage + // ErrParentIsContainer is returned when a caller attempts to create a layer as a child of a container's layer. + ErrParentIsContainer = types.ErrParentIsContainer + // ErrParentUnknown indicates that we didn't record the ID of the parent of the specified layer. + ErrParentUnknown = types.ErrParentUnknown + // ErrSizeUnknown is returned when the caller asks for the size of a big data item, but the Store couldn't determine the answer. + ErrSizeUnknown = types.ErrSizeUnknown + // ErrStoreIsReadOnly is returned when the caller makes a call to a read-only store that would require modifying its contents. + ErrStoreIsReadOnly = types.ErrStoreIsReadOnly + // ErrNotSupported is returned when the requested functionality is not supported. + ErrNotSupported = types.ErrNotSupported + // ErrInvalidNameOperation is returned when updateName is called with invalid operation. + // Internal error + errInvalidUpdateNameOperation = errors.New("invalid update name operation") +) diff --git a/vendor/github.com/containers/storage/go.mod b/vendor/github.com/containers/storage/go.mod new file mode 100644 index 00000000000..a2aff490248 --- /dev/null +++ b/vendor/github.com/containers/storage/go.mod @@ -0,0 +1,34 @@ +go 1.14 + +module github.com/containers/storage + +require ( + github.com/BurntSushi/toml v1.0.0 + github.com/Microsoft/go-winio v0.5.1 + github.com/Microsoft/hcsshim v0.9.2 + github.com/containerd/stargz-snapshotter/estargz v0.11.0 + github.com/cyphar/filepath-securejoin v0.2.3 + github.com/docker/go-units v0.4.0 + github.com/google/go-intervals v0.0.2 + github.com/hashicorp/go-multierror v1.1.1 + github.com/json-iterator/go v1.1.12 + github.com/klauspost/compress v1.14.2 + github.com/klauspost/pgzip v1.2.5 + github.com/mattn/go-shellwords v1.0.12 + github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible + github.com/moby/sys/mountinfo v0.5.0 + github.com/opencontainers/go-digest v1.0.0 + github.com/opencontainers/runc v1.1.0 + github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 + github.com/opencontainers/selinux v1.10.0 + github.com/pkg/errors v0.9.1 + github.com/sirupsen/logrus v1.8.1 + github.com/stretchr/testify v1.7.0 + github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 + github.com/tchap/go-patricia v2.3.0+incompatible + github.com/ulikunitz/xz v0.5.10 + github.com/vbatts/tar-split v0.11.2 + golang.org/x/net v0.0.0-20210825183410-e898025ed96a + golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e + gotest.tools v2.2.0+incompatible +) diff --git a/vendor/github.com/containers/storage/go.sum b/vendor/github.com/containers/storage/go.sum new file mode 100644 index 00000000000..b211efd3723 --- /dev/null +++ b/vendor/github.com/containers/storage/go.sum @@ -0,0 +1,1069 @@ +bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= +github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= +github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v1.0.0 h1:dtDWrepsVPfW9H/4y7dDgFc2MBUSeJhlaDtK13CxFlU= +github.com/BurntSushi/toml v1.0.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= +github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= +github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= +github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.5.1 h1:aPJp2QD7OOrhO5tQXqQoGSJc+DjDtWTGLOmNyAm6FgY= +github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= +github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8= +github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg= +github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= +github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= +github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= +github.com/Microsoft/hcsshim v0.9.2 h1:wB06W5aYFfUB3IvootYAY2WnOmIdgPGfqSI6tufQNnY= +github.com/Microsoft/hcsshim v0.9.2/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= +github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= +github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= +github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= +github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= +github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= +github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= +github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= +github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= +github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= +github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= +github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= +github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= +github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc= +github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= +github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE= +github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU= +github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= +github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= +github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E= +github.com/containerd/btrfs v0.0.0-20210316141732-918d888fb676/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= +github.com/containerd/btrfs v1.0.0/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= +github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI= +github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= +github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM= +github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= +github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= +github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= +github.com/containerd/cgroups v1.0.1 h1:iJnMvco9XGvKUvNQkv88bE4uJXxRQH18efbKo9w5vHQ= +github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= +github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= +github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw= +github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= +github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= +github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.1-0.20191213020239-082f7e3aed57/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ= +github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU= +github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI= +github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s= +github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= +github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c= +github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo= +github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y= +github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ= +github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM= +github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= +github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= +github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= +github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= +github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU= +github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk= +github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= +github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= +github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g= +github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= +github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= +github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0= +github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA= +github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow= +github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms= +github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c= +github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= +github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= +github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM= +github.com/containerd/stargz-snapshotter/estargz v0.11.0 h1:t0IW5kOmY7AXDAWRUs2uVzDhijAUOAYVr/dyRhOQvBg= +github.com/containerd/stargz-snapshotter/estargz v0.11.0/go.mod h1:/KsZXsJRllMbTKFfG0miFQWViQKdI9+9aSXs+HN0+ac= +github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= +github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= +github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= +github.com/containerd/ttrpc v1.1.0/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ= +github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= +github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk= +github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg= +github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s= +github.com/containerd/zfs v0.0.0-20200918131355-0a33824f23a2/go.mod h1:8IgZOBdv8fAgXddBT4dBXJPtxyRsejFIpXoklgxgEjw= +github.com/containerd/zfs v0.0.0-20210301145711-11e8f1707f62/go.mod h1:A9zfAbMlQwE+/is6hi0Xw8ktpL+6glmqZYtevJgaB8Y= +github.com/containerd/zfs v0.0.0-20210315114300-dde8f0fda960/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containerd/zfs v0.0.0-20210324211415-d5c4544f0433/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containerd/zfs v1.0.0/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM= +github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8= +github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc= +github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4= +github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= +github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= +github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= +github.com/cyphar/filepath-securejoin v0.2.3 h1:YX6ebbZCZP7VkM3scTTokDgBL2TY741X51MTk3ycuNI= +github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= +github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= +github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= +github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= +github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= +github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= +github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= +github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= +github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= +github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= +github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= +github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= +github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= +github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= +github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU= +github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0= +github.com/google/go-intervals v0.0.2 h1:FGrVEiUnTRKR8yE04qzXYaJMtnIYqobR5QbblK3ixcM= +github.com/google/go-intervals v0.0.2/go.mod h1:MkaR3LNRfeKLPmqgJYs4E66z5InYjmCjbbr4TQlcT6Y= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs= +github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= +github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= +github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.14.2 h1:S0OHlFk/Gbon/yauFJ4FfJJF5V0fc5HbBTJazi28pRw= +github.com/klauspost/compress v1.14.2/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE= +github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk= +github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY= +github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible h1:aKW/4cBs+yK6gpqU3K/oIwk9Q/XICqd3zOX/UFuvqmk= +github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= +github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= +github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/sys/mountinfo v0.5.0 h1:2Ks8/r6lopsxWi9m58nlwjaeSzUX9iiL1vj5qB/9ObI= +github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= +github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= +github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= +github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= +github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= +github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= +github.com/opencontainers/runc v1.1.0 h1:O9+X96OcDjkmmZyfaG996kV7yq8HsoU2h1XRRQcefG8= +github.com/opencontainers/runc v1.1.0/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc= +github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 h1:3snG66yBm59tKhhSPQrQ/0bCrv1LQbKt40LnUPiUxdc= +github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= +github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= +github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= +github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= +github.com/opencontainers/selinux v1.10.0 h1:rAiKF8hTcgLI3w0DHm6i0ylVVcOrlgR1kK99DRLDhyU= +github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= +github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= +github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= +github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= +github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= +github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI= +github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= +github.com/tchap/go-patricia v2.3.0+incompatible h1:GkY4dP3cEfEASBPPkWd+AmjYxhmDkqO9/zg7R0lSQRs= +github.com/tchap/go-patricia v2.3.0+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/ulikunitz/xz v0.5.10 h1:t92gobL9l3HE202wg3rlk19F6X+JOxl9BBrCCMYEYd8= +github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli v1.22.4/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/vbatts/tar-split v0.11.2 h1:Via6XqJr0hceW4wff3QRzD5gAk/tatMw/4ZA7cTlIME= +github.com/vbatts/tar-split v0.11.2/go.mod h1:vV3ZuO2yWSVsz+pfFzDG/upWH1JhjOiEaWq6kXyQ3VI= +github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= +github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= +github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= +github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= +github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= +github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= +github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= +github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= +github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= +github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= +go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210825183410-e898025ed96a h1:bRuuGXV8wwSdGTB+CtJf+FjgO1APK1CoO39T4BN/XBw= +golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190522044717-8097e1b27ff5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e h1:fLOSk5Q00efkSvAm+4xcoXD+RRmLmmulPn5I3Y9F2EM= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200916195026-c9a70fc28ce3/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190522204451-c2c4e71fbf69/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= +gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= +k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= +k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= +k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= +k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= +k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= +k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= +k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= +k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= +k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= +k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= +k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= +k8s.io/code-generator v0.19.7/go.mod h1:lwEq3YnLYb/7uVXLorOJfxg+cUu2oihFhHZ0n9NIla0= +k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= +k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= +k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM= +k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM= +k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= +k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= +k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc= +k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= +k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= +k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= +k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/vendor/github.com/containers/storage/idset.go b/vendor/github.com/containers/storage/idset.go new file mode 100644 index 00000000000..f870b9ceed3 --- /dev/null +++ b/vendor/github.com/containers/storage/idset.go @@ -0,0 +1,220 @@ +package storage + +import ( + "github.com/containers/storage/pkg/idtools" + "github.com/google/go-intervals/intervalset" + "github.com/pkg/errors" +) + +// idSet represents a set of integer IDs. It is stored as an ordered set of intervals. +type idSet struct { + set *intervalset.ImmutableSet +} + +func newIDSet(intervals []interval) *idSet { + s := intervalset.Empty() + for _, i := range intervals { + s.Add(intervalset.NewSet([]intervalset.Interval{i})) + } + return &idSet{set: s.ImmutableSet()} +} + +// getHostIDs returns all the host ids in the id map. +func getHostIDs(idMaps []idtools.IDMap) *idSet { + var intervals []interval + for _, m := range idMaps { + intervals = append(intervals, interval{start: m.HostID, end: m.HostID + m.Size}) + } + return newIDSet(intervals) +} + +// getContainerIDs returns all the container ids in the id map. +func getContainerIDs(idMaps []idtools.IDMap) *idSet { + var intervals []interval + for _, m := range idMaps { + intervals = append(intervals, interval{start: m.ContainerID, end: m.ContainerID + m.Size}) + } + return newIDSet(intervals) +} + +// subtract returns the subtraction of `s` and `t`. `s` and `t` are unchanged. +func (s *idSet) subtract(t *idSet) *idSet { + if s == nil || t == nil { + return s + } + return &idSet{set: s.set.Sub(t.set)} +} + +// union returns the union of `s` and `t`. `s` and `t` are unchanged. +func (s *idSet) union(t *idSet) *idSet { + if s == nil { + return t + } else if t == nil { + return s + } + return &idSet{set: s.set.Union(t.set)} +} + +// Methods to iterate over the intervals of the idSet. intervalset doesn't provide one :-( + +// iterator to idSet. Returns nil if iteration finishes. +type iteratorFn func() *interval + +// cancelFn must be called exactly once unless iteratorFn returns nil, otherwise go routine might +// leak. +type cancelFn func() + +func (s *idSet) iterator() (iteratorFn, cancelFn) { + if s == nil { + return func() *interval { return nil }, func() {} + } + cancelCh := make(chan byte) + dataCh := make(chan interval) + go func() { + s.set.Intervals(func(ii intervalset.Interval) bool { + select { + case <-cancelCh: + return false + case dataCh <- ii.(interval): + return true + } + }) + close(dataCh) + }() + iterator := func() *interval { + i, ok := <-dataCh + if !ok { + return nil + } + return &i + } + return iterator, func() { close(cancelCh) } +} + +// size returns the total number of ids in the ID set. +func (s *idSet) size() int { + var size int + iterator, cancel := s.iterator() + defer cancel() + for i := iterator(); i != nil; i = iterator() { + size += i.length() + } + return size +} + +// findAvailable finds the `n` ids from `s`. +func (s *idSet) findAvailable(n int) (*idSet, error) { + var intervals []intervalset.Interval + iterator, cancel := s.iterator() + defer cancel() + for i := iterator(); n > 0 && i != nil; i = iterator() { + i.end = minInt(i.end, i.start+n) + intervals = append(intervals, *i) + n -= i.length() + } + if n > 0 { + return nil, errors.New("could not find enough available IDs") + } + return &idSet{set: intervalset.NewImmutableSet(intervals)}, nil +} + +// zip creates an id map from `s` (host ids) and container ids. +func (s *idSet) zip(container *idSet) []idtools.IDMap { + hostIterator, hostCancel := s.iterator() + defer hostCancel() + containerIterator, containerCancel := container.iterator() + defer containerCancel() + var out []idtools.IDMap + for h, c := hostIterator(), containerIterator(); h != nil && c != nil; { + if n := minInt(h.length(), c.length()); n > 0 { + out = append(out, idtools.IDMap{ + ContainerID: c.start, + HostID: h.start, + Size: n, + }) + h.start += n + c.start += n + } + if h.IsZero() { + h = hostIterator() + } + if c.IsZero() { + c = containerIterator() + } + } + return out +} + +// interval represents an interval of integers [start, end). Note it is allowed to have +// start >= end, in which case it is treated as an empty interval. It implements interface +// intervalset.Interval. +type interval struct { + // Start of the interval (inclusive). + start int + // End of the interval (exclusive). + end int +} + +func (i interval) length() int { + return maxInt(0, i.end-i.start) +} + +func (i interval) Intersect(other intervalset.Interval) intervalset.Interval { + j := other.(interval) + return interval{start: maxInt(i.start, j.start), end: minInt(i.end, j.end)} +} + +func (i interval) Before(other intervalset.Interval) bool { + j := other.(interval) + return !i.IsZero() && !j.IsZero() && i.end < j.start +} + +func (i interval) IsZero() bool { + return i.length() <= 0 +} + +func (i interval) Bisect(other intervalset.Interval) (intervalset.Interval, intervalset.Interval) { + j := other.(interval) + if j.IsZero() { + return i, interval{} + } + // Subtracting [j.start, j.end) is equivalent to the union of intersecting (-inf, j.start) and + // [j.end, +inf). + left := interval{start: i.start, end: minInt(i.end, j.start)} + right := interval{start: maxInt(i.start, j.end), end: i.end} + return left, right +} + +func (i interval) Adjoin(other intervalset.Interval) intervalset.Interval { + j := other.(interval) + if !i.IsZero() && !j.IsZero() && (i.end == j.start || j.end == i.start) { + return interval{start: minInt(i.start, j.start), end: maxInt(i.end, j.end)} + } + return interval{} +} + +func (i interval) Encompass(other intervalset.Interval) intervalset.Interval { + j := other.(interval) + switch { + case i.IsZero(): + return j + case j.IsZero(): + return i + default: + return interval{start: minInt(i.start, j.start), end: maxInt(i.end, j.end)} + } +} + +func minInt(a, b int) int { + if a < b { + return a + } + return b +} + +func maxInt(a, b int) int { + if a < b { + return b + } + return a +} diff --git a/vendor/github.com/containers/storage/images.go b/vendor/github.com/containers/storage/images.go new file mode 100644 index 00000000000..ea76e43f9c1 --- /dev/null +++ b/vendor/github.com/containers/storage/images.go @@ -0,0 +1,843 @@ +package storage + +import ( + "io/ioutil" + "os" + "path/filepath" + "strings" + "sync" + "time" + + "github.com/containers/storage/pkg/ioutils" + "github.com/containers/storage/pkg/stringid" + "github.com/containers/storage/pkg/stringutils" + "github.com/containers/storage/pkg/truncindex" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +const ( + // ImageDigestManifestBigDataNamePrefix is a prefix of big data item + // names which we consider to be manifests, used for computing a + // "digest" value for the image as a whole, by which we can locate the + // image later. + ImageDigestManifestBigDataNamePrefix = "manifest" + // ImageDigestBigDataKey is provided for compatibility with older + // versions of the image library. It will be removed in the future. + ImageDigestBigDataKey = "manifest" +) + +// An Image is a reference to a layer and an associated metadata string. +type Image struct { + // ID is either one which was specified at create-time, or a random + // value which was generated by the library. + ID string `json:"id"` + + // Digest is a digest value that we can use to locate the image, if one + // was specified at creation-time. + Digest digest.Digest `json:"digest,omitempty"` + + // Digests is a list of digest values of the image's manifests, and + // possibly a manually-specified value, that we can use to locate the + // image. If Digest is set, its value is also in this list. + Digests []digest.Digest `json:"-"` + + // Names is an optional set of user-defined convenience values. The + // image can be referred to by its ID or any of its names. Names are + // unique among images, and are often the text representation of tagged + // or canonical references. + Names []string `json:"names,omitempty"` + + // NamesHistory is an optional set of Names the image had in the past. The + // contained names are free from any duplicates, whereas the newest entry + // is the first one. + NamesHistory []string `json:"names-history,omitempty"` + + // TopLayer is the ID of the topmost layer of the image itself, if the + // image contains one or more layers. Multiple images can refer to the + // same top layer. + TopLayer string `json:"layer,omitempty"` + + // MappedTopLayers are the IDs of alternate versions of the top layer + // which have the same contents and parent, and which differ from + // TopLayer only in which ID mappings they use. When the image is + // to be removed, they should be removed before the TopLayer, as the + // graph driver may depend on that. + MappedTopLayers []string `json:"mapped-layers,omitempty"` + + // Metadata is data we keep for the convenience of the caller. It is not + // expected to be large, since it is kept in memory. + Metadata string `json:"metadata,omitempty"` + + // BigDataNames is a list of names of data items that we keep for the + // convenience of the caller. They can be large, and are only in + // memory when being read from or written to disk. + BigDataNames []string `json:"big-data-names,omitempty"` + + // BigDataSizes maps the names in BigDataNames to the sizes of the data + // that has been stored, if they're known. + BigDataSizes map[string]int64 `json:"big-data-sizes,omitempty"` + + // BigDataDigests maps the names in BigDataNames to the digests of the + // data that has been stored, if they're known. + BigDataDigests map[string]digest.Digest `json:"big-data-digests,omitempty"` + + // Created is the datestamp for when this image was created. Older + // versions of the library did not track this information, so callers + // will likely want to use the IsZero() method to verify that a value + // is set before using it. + Created time.Time `json:"created,omitempty"` + + // ReadOnly is true if this image resides in a read-only layer store. + ReadOnly bool `json:"-"` + + Flags map[string]interface{} `json:"flags,omitempty"` +} + +// ROImageStore provides bookkeeping for information about Images. +type ROImageStore interface { + ROFileBasedStore + ROMetadataStore + ROBigDataStore + + // Exists checks if there is an image with the given ID or name. + Exists(id string) bool + + // Get retrieves information about an image given an ID or name. + Get(id string) (*Image, error) + + // Lookup attempts to translate a name to an ID. Most methods do this + // implicitly. + Lookup(name string) (string, error) + + // Images returns a slice enumerating the known images. + Images() ([]Image, error) + + // ByDigest returns a slice enumerating the images which have either an + // explicitly-set digest, or a big data item with a name that starts + // with ImageDigestManifestBigDataNamePrefix, which matches the + // specified digest. + ByDigest(d digest.Digest) ([]*Image, error) +} + +// ImageStore provides bookkeeping for information about Images. +type ImageStore interface { + ROImageStore + RWFileBasedStore + RWMetadataStore + RWImageBigDataStore + FlaggableStore + + // Create creates an image that has a specified ID (or a random one) and + // optional names, using the specified layer as its topmost (hopefully + // read-only) layer. That layer can be referenced by multiple images. + Create(id string, names []string, layer, metadata string, created time.Time, searchableDigest digest.Digest) (*Image, error) + + // SetNames replaces the list of names associated with an image with the + // supplied values. The values are expected to be valid normalized + // named image references. + // Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`. + SetNames(id string, names []string) error + + // AddNames adds the supplied values to the list of names associated with the image with + // the specified id. The values are expected to be valid normalized + // named image references. + AddNames(id string, names []string) error + + // RemoveNames removes the supplied values from the list of names associated with the image with + // the specified id. The values are expected to be valid normalized + // named image references. + RemoveNames(id string, names []string) error + + // Delete removes the record of the image. + Delete(id string) error + + // Wipe removes records of all images. + Wipe() error +} + +type imageStore struct { + lockfile Locker + dir string + images []*Image + idindex *truncindex.TruncIndex + byid map[string]*Image + byname map[string]*Image + bydigest map[digest.Digest][]*Image + loadMut sync.Mutex +} + +func copyImage(i *Image) *Image { + return &Image{ + ID: i.ID, + Digest: i.Digest, + Digests: copyDigestSlice(i.Digests), + Names: copyStringSlice(i.Names), + NamesHistory: copyStringSlice(i.NamesHistory), + TopLayer: i.TopLayer, + MappedTopLayers: copyStringSlice(i.MappedTopLayers), + Metadata: i.Metadata, + BigDataNames: copyStringSlice(i.BigDataNames), + BigDataSizes: copyStringInt64Map(i.BigDataSizes), + BigDataDigests: copyStringDigestMap(i.BigDataDigests), + Created: i.Created, + ReadOnly: i.ReadOnly, + Flags: copyStringInterfaceMap(i.Flags), + } +} + +func copyImageSlice(slice []*Image) []*Image { + if len(slice) > 0 { + cp := make([]*Image, len(slice)) + for i := range slice { + cp[i] = copyImage(slice[i]) + } + return cp + } + return nil +} + +func (r *imageStore) Images() ([]Image, error) { + images := make([]Image, len(r.images)) + for i := range r.images { + images[i] = *copyImage(r.images[i]) + } + return images, nil +} + +func (r *imageStore) imagespath() string { + return filepath.Join(r.dir, "images.json") +} + +func (r *imageStore) datadir(id string) string { + return filepath.Join(r.dir, id) +} + +func (r *imageStore) datapath(id, key string) string { + return filepath.Join(r.datadir(id), makeBigDataBaseName(key)) +} + +// bigDataNameIsManifest determines if a big data item with the specified name +// is considered to be representative of the image, in that its digest can be +// said to also be the image's digest. Currently, if its name is, or begins +// with, "manifest", we say that it is. +func bigDataNameIsManifest(name string) bool { + return strings.HasPrefix(name, ImageDigestManifestBigDataNamePrefix) +} + +// recomputeDigests takes a fixed digest and a name-to-digest map and builds a +// list of the unique values that would identify the image. +func (i *Image) recomputeDigests() error { + validDigests := make([]digest.Digest, 0, len(i.BigDataDigests)+1) + digests := make(map[digest.Digest]struct{}) + if i.Digest != "" { + if err := i.Digest.Validate(); err != nil { + return errors.Wrapf(err, "error validating image digest %q", string(i.Digest)) + } + digests[i.Digest] = struct{}{} + validDigests = append(validDigests, i.Digest) + } + for name, digest := range i.BigDataDigests { + if !bigDataNameIsManifest(name) { + continue + } + if digest.Validate() != nil { + return errors.Wrapf(digest.Validate(), "error validating digest %q for big data item %q", string(digest), name) + } + // Deduplicate the digest values. + if _, known := digests[digest]; !known { + digests[digest] = struct{}{} + validDigests = append(validDigests, digest) + } + } + if i.Digest == "" && len(validDigests) > 0 { + i.Digest = validDigests[0] + } + i.Digests = validDigests + return nil +} + +func (r *imageStore) Load() error { + shouldSave := false + rpath := r.imagespath() + data, err := ioutil.ReadFile(rpath) + if err != nil && !os.IsNotExist(err) { + return err + } + images := []*Image{} + idlist := []string{} + ids := make(map[string]*Image) + names := make(map[string]*Image) + digests := make(map[digest.Digest][]*Image) + if err = json.Unmarshal(data, &images); len(data) == 0 || err == nil { + idlist = make([]string, 0, len(images)) + for n, image := range images { + ids[image.ID] = images[n] + idlist = append(idlist, image.ID) + for _, name := range image.Names { + if conflict, ok := names[name]; ok { + r.removeName(conflict, name) + shouldSave = true + } + } + // Compute the digest list. + err = image.recomputeDigests() + if err != nil { + return errors.Wrapf(err, "error computing digests for image with ID %q (%v)", image.ID, image.Names) + } + for _, name := range image.Names { + names[name] = image + } + for _, digest := range image.Digests { + list := digests[digest] + digests[digest] = append(list, image) + } + image.ReadOnly = !r.IsReadWrite() + } + } + if shouldSave && (!r.IsReadWrite() || !r.Locked()) { + return ErrDuplicateImageNames + } + r.images = images + r.idindex = truncindex.NewTruncIndex(idlist) + r.byid = ids + r.byname = names + r.bydigest = digests + if shouldSave { + return r.Save() + } + return nil +} + +func (r *imageStore) Save() error { + if !r.IsReadWrite() { + return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify the image store at %q", r.imagespath()) + } + if !r.Locked() { + return errors.New("image store is not locked for writing") + } + rpath := r.imagespath() + if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil { + return err + } + jdata, err := json.Marshal(&r.images) + if err != nil { + return err + } + defer r.Touch() + return ioutils.AtomicWriteFile(rpath, jdata, 0600) +} + +func newImageStore(dir string) (ImageStore, error) { + if err := os.MkdirAll(dir, 0700); err != nil { + return nil, err + } + lockfile, err := GetLockfile(filepath.Join(dir, "images.lock")) + if err != nil { + return nil, err + } + lockfile.Lock() + defer lockfile.Unlock() + istore := imageStore{ + lockfile: lockfile, + dir: dir, + images: []*Image{}, + byid: make(map[string]*Image), + byname: make(map[string]*Image), + bydigest: make(map[digest.Digest][]*Image), + } + if err := istore.Load(); err != nil { + return nil, err + } + return &istore, nil +} + +func newROImageStore(dir string) (ROImageStore, error) { + lockfile, err := GetROLockfile(filepath.Join(dir, "images.lock")) + if err != nil { + return nil, err + } + lockfile.RLock() + defer lockfile.Unlock() + istore := imageStore{ + lockfile: lockfile, + dir: dir, + images: []*Image{}, + byid: make(map[string]*Image), + byname: make(map[string]*Image), + bydigest: make(map[digest.Digest][]*Image), + } + if err := istore.Load(); err != nil { + return nil, err + } + return &istore, nil +} + +func (r *imageStore) lookup(id string) (*Image, bool) { + if image, ok := r.byid[id]; ok { + return image, ok + } else if image, ok := r.byname[id]; ok { + return image, ok + } else if longid, err := r.idindex.Get(id); err == nil { + image, ok := r.byid[longid] + return image, ok + } + return nil, false +} + +func (r *imageStore) ClearFlag(id string, flag string) error { + if !r.IsReadWrite() { + return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to clear flags on images at %q", r.imagespath()) + } + image, ok := r.lookup(id) + if !ok { + return errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) + } + delete(image.Flags, flag) + return r.Save() +} + +func (r *imageStore) SetFlag(id string, flag string, value interface{}) error { + if !r.IsReadWrite() { + return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to set flags on images at %q", r.imagespath()) + } + image, ok := r.lookup(id) + if !ok { + return errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) + } + if image.Flags == nil { + image.Flags = make(map[string]interface{}) + } + image.Flags[flag] = value + return r.Save() +} + +func (r *imageStore) Create(id string, names []string, layer, metadata string, created time.Time, searchableDigest digest.Digest) (image *Image, err error) { + if !r.IsReadWrite() { + return nil, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to create new images at %q", r.imagespath()) + } + if id == "" { + id = stringid.GenerateRandomID() + _, idInUse := r.byid[id] + for idInUse { + id = stringid.GenerateRandomID() + _, idInUse = r.byid[id] + } + } + if _, idInUse := r.byid[id]; idInUse { + return nil, errors.Wrapf(ErrDuplicateID, "an image with ID %q already exists", id) + } + names = dedupeNames(names) + for _, name := range names { + if image, nameInUse := r.byname[name]; nameInUse { + return nil, errors.Wrapf(ErrDuplicateName, "image name %q is already associated with image %q", name, image.ID) + } + } + if created.IsZero() { + created = time.Now().UTC() + } + if err == nil { + image = &Image{ + ID: id, + Digest: searchableDigest, + Digests: nil, + Names: names, + TopLayer: layer, + Metadata: metadata, + BigDataNames: []string{}, + BigDataSizes: make(map[string]int64), + BigDataDigests: make(map[string]digest.Digest), + Created: created, + Flags: make(map[string]interface{}), + } + err := image.recomputeDigests() + if err != nil { + return nil, errors.Wrapf(err, "error validating digests for new image") + } + r.images = append(r.images, image) + r.idindex.Add(id) + r.byid[id] = image + for _, name := range names { + r.byname[name] = image + } + for _, digest := range image.Digests { + list := r.bydigest[digest] + r.bydigest[digest] = append(list, image) + } + err = r.Save() + image = copyImage(image) + } + return image, err +} + +func (r *imageStore) addMappedTopLayer(id, layer string) error { + if image, ok := r.lookup(id); ok { + image.MappedTopLayers = append(image.MappedTopLayers, layer) + return r.Save() + } + return errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) +} + +func (r *imageStore) removeMappedTopLayer(id, layer string) error { + if image, ok := r.lookup(id); ok { + initialLen := len(image.MappedTopLayers) + image.MappedTopLayers = stringutils.RemoveFromSlice(image.MappedTopLayers, layer) + // No layer was removed. No need to save. + if initialLen == len(image.MappedTopLayers) { + return nil + } + return r.Save() + } + return errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) +} + +func (r *imageStore) Metadata(id string) (string, error) { + if image, ok := r.lookup(id); ok { + return image.Metadata, nil + } + return "", errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) +} + +func (r *imageStore) SetMetadata(id, metadata string) error { + if !r.IsReadWrite() { + return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify image metadata at %q", r.imagespath()) + } + if image, ok := r.lookup(id); ok { + image.Metadata = metadata + return r.Save() + } + return errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) +} + +func (r *imageStore) removeName(image *Image, name string) { + image.Names = stringSliceWithoutValue(image.Names, name) +} + +func (i *Image) addNameToHistory(name string) { + i.NamesHistory = dedupeNames(append([]string{name}, i.NamesHistory...)) +} + +// Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`. +func (r *imageStore) SetNames(id string, names []string) error { + return r.updateNames(id, names, setNames) +} + +func (r *imageStore) AddNames(id string, names []string) error { + return r.updateNames(id, names, addNames) +} + +func (r *imageStore) RemoveNames(id string, names []string) error { + return r.updateNames(id, names, removeNames) +} + +func (r *imageStore) updateNames(id string, names []string, op updateNameOperation) error { + if !r.IsReadWrite() { + return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to change image name assignments at %q", r.imagespath()) + } + image, ok := r.lookup(id) + if !ok { + return errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) + } + oldNames := image.Names + names, err := applyNameOperation(oldNames, names, op) + if err != nil { + return err + } + for _, name := range oldNames { + delete(r.byname, name) + } + for _, name := range names { + if otherImage, ok := r.byname[name]; ok { + r.removeName(otherImage, name) + } + r.byname[name] = image + image.addNameToHistory(name) + } + image.Names = names + return r.Save() +} + +func (r *imageStore) Delete(id string) error { + if !r.IsReadWrite() { + return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to delete images at %q", r.imagespath()) + } + image, ok := r.lookup(id) + if !ok { + return errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) + } + id = image.ID + toDeleteIndex := -1 + for i, candidate := range r.images { + if candidate.ID == id { + toDeleteIndex = i + } + } + delete(r.byid, id) + r.idindex.Delete(id) + for _, name := range image.Names { + delete(r.byname, name) + } + for _, digest := range image.Digests { + prunedList := imageSliceWithoutValue(r.bydigest[digest], image) + if len(prunedList) == 0 { + delete(r.bydigest, digest) + } else { + r.bydigest[digest] = prunedList + } + } + if toDeleteIndex != -1 { + // delete the image at toDeleteIndex + if toDeleteIndex == len(r.images)-1 { + r.images = r.images[:len(r.images)-1] + } else { + r.images = append(r.images[:toDeleteIndex], r.images[toDeleteIndex+1:]...) + } + } + if err := r.Save(); err != nil { + return err + } + if err := os.RemoveAll(r.datadir(id)); err != nil { + return err + } + return nil +} + +func (r *imageStore) Get(id string) (*Image, error) { + if image, ok := r.lookup(id); ok { + return copyImage(image), nil + } + return nil, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) +} + +func (r *imageStore) Lookup(name string) (id string, err error) { + if image, ok := r.lookup(name); ok { + return image.ID, nil + } + return "", errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) +} + +func (r *imageStore) Exists(id string) bool { + _, ok := r.lookup(id) + return ok +} + +func (r *imageStore) ByDigest(d digest.Digest) ([]*Image, error) { + if images, ok := r.bydigest[d]; ok { + return copyImageSlice(images), nil + } + return nil, errors.Wrapf(ErrImageUnknown, "error locating image with digest %q", d) +} + +func (r *imageStore) BigData(id, key string) ([]byte, error) { + if key == "" { + return nil, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve image big data value for empty name") + } + image, ok := r.lookup(id) + if !ok { + return nil, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) + } + return ioutil.ReadFile(r.datapath(image.ID, key)) +} + +func (r *imageStore) BigDataSize(id, key string) (int64, error) { + if key == "" { + return -1, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve size of image big data with empty name") + } + image, ok := r.lookup(id) + if !ok { + return -1, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) + } + if image.BigDataSizes == nil { + image.BigDataSizes = make(map[string]int64) + } + if size, ok := image.BigDataSizes[key]; ok { + return size, nil + } + if data, err := r.BigData(id, key); err == nil && data != nil { + return int64(len(data)), nil + } + return -1, ErrSizeUnknown +} + +func (r *imageStore) BigDataDigest(id, key string) (digest.Digest, error) { + if key == "" { + return "", errors.Wrapf(ErrInvalidBigDataName, "can't retrieve digest of image big data value with empty name") + } + image, ok := r.lookup(id) + if !ok { + return "", errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) + } + if image.BigDataDigests == nil { + image.BigDataDigests = make(map[string]digest.Digest) + } + if d, ok := image.BigDataDigests[key]; ok { + return d, nil + } + return "", ErrDigestUnknown +} + +func (r *imageStore) BigDataNames(id string) ([]string, error) { + image, ok := r.lookup(id) + if !ok { + return nil, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) + } + return copyStringSlice(image.BigDataNames), nil +} + +func imageSliceWithoutValue(slice []*Image, value *Image) []*Image { + modified := make([]*Image, 0, len(slice)) + for _, v := range slice { + if v == value { + continue + } + modified = append(modified, v) + } + return modified +} + +func (r *imageStore) SetBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error { + if key == "" { + return errors.Wrapf(ErrInvalidBigDataName, "can't set empty name for image big data item") + } + if !r.IsReadWrite() { + return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to save data items associated with images at %q", r.imagespath()) + } + image, ok := r.lookup(id) + if !ok { + return errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) + } + err := os.MkdirAll(r.datadir(image.ID), 0700) + if err != nil { + return err + } + var newDigest digest.Digest + if bigDataNameIsManifest(key) { + if digestManifest == nil { + return errors.Wrapf(ErrDigestUnknown, "error digesting manifest: no manifest digest callback provided") + } + if newDigest, err = digestManifest(data); err != nil { + return errors.Wrapf(err, "error digesting manifest") + } + } else { + newDigest = digest.Canonical.FromBytes(data) + } + err = ioutils.AtomicWriteFile(r.datapath(image.ID, key), data, 0600) + if err == nil { + save := false + if image.BigDataSizes == nil { + image.BigDataSizes = make(map[string]int64) + } + oldSize, sizeOk := image.BigDataSizes[key] + image.BigDataSizes[key] = int64(len(data)) + if image.BigDataDigests == nil { + image.BigDataDigests = make(map[string]digest.Digest) + } + oldDigest, digestOk := image.BigDataDigests[key] + image.BigDataDigests[key] = newDigest + if !sizeOk || oldSize != image.BigDataSizes[key] || !digestOk || oldDigest != newDigest { + save = true + } + addName := true + for _, name := range image.BigDataNames { + if name == key { + addName = false + break + } + } + if addName { + image.BigDataNames = append(image.BigDataNames, key) + save = true + } + for _, oldDigest := range image.Digests { + // remove the image from the list of images in the digest-based index + if list, ok := r.bydigest[oldDigest]; ok { + prunedList := imageSliceWithoutValue(list, image) + if len(prunedList) == 0 { + delete(r.bydigest, oldDigest) + } else { + r.bydigest[oldDigest] = prunedList + } + } + } + if err = image.recomputeDigests(); err != nil { + return errors.Wrapf(err, "error loading recomputing image digest information for %s", image.ID) + } + for _, newDigest := range image.Digests { + // add the image to the list of images in the digest-based index which + // corresponds to the new digest for this item, unless it's already there + list := r.bydigest[newDigest] + if len(list) == len(imageSliceWithoutValue(list, image)) { + // the list isn't shortened by trying to prune this image from it, + // so it's not in there yet + r.bydigest[newDigest] = append(list, image) + } + } + if save { + err = r.Save() + } + } + return err +} + +func (r *imageStore) Wipe() error { + if !r.IsReadWrite() { + return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to delete images at %q", r.imagespath()) + } + ids := make([]string, 0, len(r.byid)) + for id := range r.byid { + ids = append(ids, id) + } + for _, id := range ids { + if err := r.Delete(id); err != nil { + return err + } + } + return nil +} + +func (r *imageStore) Lock() { + r.lockfile.Lock() +} + +func (r *imageStore) RecursiveLock() { + r.lockfile.RecursiveLock() +} + +func (r *imageStore) RLock() { + r.lockfile.RLock() +} + +func (r *imageStore) Unlock() { + r.lockfile.Unlock() +} + +func (r *imageStore) Touch() error { + return r.lockfile.Touch() +} + +func (r *imageStore) Modified() (bool, error) { + return r.lockfile.Modified() +} + +func (r *imageStore) IsReadWrite() bool { + return r.lockfile.IsReadWrite() +} + +func (r *imageStore) TouchedSince(when time.Time) bool { + return r.lockfile.TouchedSince(when) +} + +func (r *imageStore) Locked() bool { + return r.lockfile.Locked() +} + +func (r *imageStore) ReloadIfChanged() error { + r.loadMut.Lock() + defer r.loadMut.Unlock() + + modified, err := r.Modified() + if err == nil && modified { + return r.Load() + } + return err +} diff --git a/vendor/github.com/containers/storage/jsoniter.go b/vendor/github.com/containers/storage/jsoniter.go new file mode 100644 index 00000000000..7dd6388d7f3 --- /dev/null +++ b/vendor/github.com/containers/storage/jsoniter.go @@ -0,0 +1,5 @@ +package storage + +import jsoniter "github.com/json-iterator/go" + +var json = jsoniter.ConfigCompatibleWithStandardLibrary diff --git a/vendor/github.com/containers/storage/layers.go b/vendor/github.com/containers/storage/layers.go new file mode 100644 index 00000000000..07be394d1ef --- /dev/null +++ b/vendor/github.com/containers/storage/layers.go @@ -0,0 +1,1895 @@ +package storage + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "path" + "path/filepath" + "reflect" + "sort" + "strings" + "sync" + "time" + + drivers "github.com/containers/storage/drivers" + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/ioutils" + "github.com/containers/storage/pkg/mount" + "github.com/containers/storage/pkg/stringid" + "github.com/containers/storage/pkg/system" + "github.com/containers/storage/pkg/tarlog" + "github.com/containers/storage/pkg/truncindex" + multierror "github.com/hashicorp/go-multierror" + "github.com/klauspost/pgzip" + digest "github.com/opencontainers/go-digest" + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/vbatts/tar-split/archive/tar" + "github.com/vbatts/tar-split/tar/asm" + "github.com/vbatts/tar-split/tar/storage" +) + +const ( + tarSplitSuffix = ".tar-split.gz" + incompleteFlag = "incomplete" +) + +// A Layer is a record of a copy-on-write layer that's stored by the lower +// level graph driver. +type Layer struct { + // ID is either one which was specified at create-time, or a random + // value which was generated by the library. + ID string `json:"id"` + + // Names is an optional set of user-defined convenience values. The + // layer can be referred to by its ID or any of its names. Names are + // unique among layers. + Names []string `json:"names,omitempty"` + + // Parent is the ID of a layer from which this layer inherits data. + Parent string `json:"parent,omitempty"` + + // Metadata is data we keep for the convenience of the caller. It is not + // expected to be large, since it is kept in memory. + Metadata string `json:"metadata,omitempty"` + + // MountLabel is an SELinux label which should be used when attempting to mount + // the layer. + MountLabel string `json:"mountlabel,omitempty"` + + // MountPoint is the path where the layer is mounted, or where it was most + // recently mounted. This can change between subsequent Unmount() and + // Mount() calls, so the caller should consult this value after Mount() + // succeeds to find the location of the container's root filesystem. + MountPoint string `json:"-"` + + // MountCount is used as a reference count for the container's layer being + // mounted at the mount point. + MountCount int `json:"-"` + + // Created is the datestamp for when this layer was created. Older + // versions of the library did not track this information, so callers + // will likely want to use the IsZero() method to verify that a value + // is set before using it. + Created time.Time `json:"created,omitempty"` + + // CompressedDigest is the digest of the blob that was last passed to + // ApplyDiff() or Put(), as it was presented to us. + CompressedDigest digest.Digest `json:"compressed-diff-digest,omitempty"` + + // CompressedSize is the length of the blob that was last passed to + // ApplyDiff() or Put(), as it was presented to us. If + // CompressedDigest is not set, this should be treated as if it were an + // uninitialized value. + CompressedSize int64 `json:"compressed-size,omitempty"` + + // UncompressedDigest is the digest of the blob that was last passed to + // ApplyDiff() or Put(), after we decompressed it. Often referred to + // as a DiffID. + UncompressedDigest digest.Digest `json:"diff-digest,omitempty"` + + // UncompressedSize is the length of the blob that was last passed to + // ApplyDiff() or Put(), after we decompressed it. If + // UncompressedDigest is not set, this should be treated as if it were + // an uninitialized value. + UncompressedSize int64 `json:"diff-size,omitempty"` + + // CompressionType is the type of compression which we detected on the blob + // that was last passed to ApplyDiff() or Put(). + CompressionType archive.Compression `json:"compression,omitempty"` + + // UIDs and GIDs are lists of UIDs and GIDs used in the layer. This + // field is only populated (i.e., will only contain one or more + // entries) if the layer was created using ApplyDiff() or Put(). + UIDs []uint32 `json:"uidset,omitempty"` + GIDs []uint32 `json:"gidset,omitempty"` + + // Flags is arbitrary data about the layer. + Flags map[string]interface{} `json:"flags,omitempty"` + + // UIDMap and GIDMap are used for setting up a layer's contents + // for use inside of a user namespace where UID mapping is being used. + UIDMap []idtools.IDMap `json:"uidmap,omitempty"` + GIDMap []idtools.IDMap `json:"gidmap,omitempty"` + + // ReadOnly is true if this layer resides in a read-only layer store. + ReadOnly bool `json:"-"` + + // BigDataNames is a list of names of data items that we keep for the + // convenience of the caller. They can be large, and are only in + // memory when being read from or written to disk. + BigDataNames []string `json:"big-data-names,omitempty"` +} + +type layerMountPoint struct { + ID string `json:"id"` + MountPoint string `json:"path"` + MountCount int `json:"count"` +} + +// DiffOptions override the default behavior of Diff() methods. +type DiffOptions struct { + // Compression, if set overrides the default compressor when generating a diff. + Compression *archive.Compression +} + +// ROLayerStore wraps a graph driver, adding the ability to refer to layers by +// name, and keeping track of parent-child relationships, along with a list of +// all known layers. +type ROLayerStore interface { + ROFileBasedStore + ROMetadataStore + ROLayerBigDataStore + + // Exists checks if a layer with the specified name or ID is known. + Exists(id string) bool + + // Get retrieves information about a layer given an ID or name. + Get(id string) (*Layer, error) + + // Status returns an slice of key-value pairs, suitable for human consumption, + // relaying whatever status information the underlying driver can share. + Status() ([][2]string, error) + + // Changes returns a slice of Change structures, which contain a pathname + // (Path) and a description of what sort of change (Kind) was made by the + // layer (either ChangeModify, ChangeAdd, or ChangeDelete), relative to a + // specified layer. By default, the layer's parent is used as a reference. + Changes(from, to string) ([]archive.Change, error) + + // Diff produces a tarstream which can be applied to a layer with the contents + // of the first layer to produce a layer with the contents of the second layer. + // By default, the parent of the second layer is used as the first + // layer, so it need not be specified. Options can be used to override + // default behavior, but are also not required. + Diff(from, to string, options *DiffOptions) (io.ReadCloser, error) + + // DiffSize produces an estimate of the length of the tarstream which would be + // produced by Diff. + DiffSize(from, to string) (int64, error) + + // Size produces a cached value for the uncompressed size of the layer, + // if one is known, or -1 if it is not known. If the layer can not be + // found, it returns an error. + Size(name string) (int64, error) + + // Lookup attempts to translate a name to an ID. Most methods do this + // implicitly. + Lookup(name string) (string, error) + + // LayersByCompressedDigest returns a slice of the layers with the + // specified compressed digest value recorded for them. + LayersByCompressedDigest(d digest.Digest) ([]Layer, error) + + // LayersByUncompressedDigest returns a slice of the layers with the + // specified uncompressed digest value recorded for them. + LayersByUncompressedDigest(d digest.Digest) ([]Layer, error) + + // Layers returns a slice of the known layers. + Layers() ([]Layer, error) +} + +// LayerStore wraps a graph driver, adding the ability to refer to layers by +// name, and keeping track of parent-child relationships, along with a list of +// all known layers. +type LayerStore interface { + ROLayerStore + RWFileBasedStore + RWMetadataStore + FlaggableStore + RWLayerBigDataStore + + // Create creates a new layer, optionally giving it a specified ID rather than + // a randomly-generated one, either inheriting data from another specified + // layer or the empty base layer. The new layer can optionally be given names + // and have an SELinux label specified for use when mounting it. Some + // underlying drivers can accept a "size" option. At this time, most + // underlying drivers do not themselves distinguish between writeable + // and read-only layers. + Create(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool) (*Layer, error) + + // CreateWithFlags combines the functions of Create and SetFlag. + CreateWithFlags(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}) (layer *Layer, err error) + + // Put combines the functions of CreateWithFlags and ApplyDiff. + Put(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}, diff io.Reader) (*Layer, int64, error) + + // SetNames replaces the list of names associated with a layer with the + // supplied values. + // Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`. + SetNames(id string, names []string) error + + // AddNames adds the supplied values to the list of names associated with the layer with the + // specified id. + AddNames(id string, names []string) error + + // RemoveNames remove the supplied values from the list of names associated with the layer with the + // specified id. + RemoveNames(id string, names []string) error + + // Delete deletes a layer with the specified name or ID. + Delete(id string) error + + // Wipe deletes all layers. + Wipe() error + + // Mount mounts a layer for use. If the specified layer is the parent of other + // layers, it should not be written to. An SELinux label to be applied to the + // mount can be specified to override the one configured for the layer. + // The mappings used by the container can be specified. + Mount(id string, options drivers.MountOpts) (string, error) + + // Unmount unmounts a layer when it is no longer in use. + Unmount(id string, force bool) (bool, error) + + // Mounted returns number of times the layer has been mounted. + Mounted(id string) (int, error) + + // ParentOwners returns the UIDs and GIDs of parents of the layer's mountpoint + // for which the layer's UID and GID maps don't contain corresponding entries. + ParentOwners(id string) (uids, gids []int, err error) + + // ApplyDiff reads a tarstream which was created by a previous call to Diff and + // applies its changes to a specified layer. + ApplyDiff(to string, diff io.Reader) (int64, error) + + // ApplyDiffWithDiffer applies the changes through the differ callback function. + // If to is the empty string, then a staging directory is created by the driver. + ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) + + // CleanupStagingDirectory cleanups the staging directory. It can be used to cleanup the staging directory on errors + CleanupStagingDirectory(stagingDirectory string) error + + // ApplyDiffFromStagingDirectory uses stagingDirectory to create the diff. + ApplyDiffFromStagingDirectory(id, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffOpts) error + + // DifferTarget gets the location where files are stored for the layer. + DifferTarget(id string) (string, error) + + // LoadLocked wraps Load in a locked state. This means it loads the store + // and cleans-up invalid layers if needed. + LoadLocked() error + + // PutAdditionalLayer creates a layer using the diff contained in the additional layer + // store. + // This API is experimental and can be changed without bumping the major version number. + PutAdditionalLayer(id string, parentLayer *Layer, names []string, aLayer drivers.AdditionalLayer) (layer *Layer, err error) +} + +type layerStore struct { + lockfile Locker + mountsLockfile Locker + rundir string + driver drivers.Driver + layerdir string + layers []*Layer + idindex *truncindex.TruncIndex + byid map[string]*Layer + byname map[string]*Layer + bymount map[string]*Layer + bycompressedsum map[digest.Digest][]string + byuncompressedsum map[digest.Digest][]string + uidMap []idtools.IDMap + gidMap []idtools.IDMap + loadMut sync.Mutex + layerspathModified time.Time +} + +func copyLayer(l *Layer) *Layer { + return &Layer{ + ID: l.ID, + Names: copyStringSlice(l.Names), + Parent: l.Parent, + Metadata: l.Metadata, + MountLabel: l.MountLabel, + MountPoint: l.MountPoint, + MountCount: l.MountCount, + Created: l.Created, + CompressedDigest: l.CompressedDigest, + CompressedSize: l.CompressedSize, + UncompressedDigest: l.UncompressedDigest, + UncompressedSize: l.UncompressedSize, + CompressionType: l.CompressionType, + ReadOnly: l.ReadOnly, + BigDataNames: copyStringSlice(l.BigDataNames), + Flags: copyStringInterfaceMap(l.Flags), + UIDMap: copyIDMap(l.UIDMap), + GIDMap: copyIDMap(l.GIDMap), + UIDs: copyUint32Slice(l.UIDs), + GIDs: copyUint32Slice(l.GIDs), + } +} + +func (r *layerStore) Layers() ([]Layer, error) { + layers := make([]Layer, len(r.layers)) + for i := range r.layers { + layers[i] = *copyLayer(r.layers[i]) + } + return layers, nil +} + +func (r *layerStore) mountspath() string { + return filepath.Join(r.rundir, "mountpoints.json") +} + +func (r *layerStore) layerspath() string { + return filepath.Join(r.layerdir, "layers.json") +} + +func (r *layerStore) Load() error { + shouldSave := false + rpath := r.layerspath() + data, err := ioutil.ReadFile(rpath) + if err != nil && !os.IsNotExist(err) { + return err + } + layers := []*Layer{} + idlist := []string{} + ids := make(map[string]*Layer) + names := make(map[string]*Layer) + compressedsums := make(map[digest.Digest][]string) + uncompressedsums := make(map[digest.Digest][]string) + if r.IsReadWrite() { + label.ClearLabels() + } + if err = json.Unmarshal(data, &layers); len(data) == 0 || err == nil { + idlist = make([]string, 0, len(layers)) + for n, layer := range layers { + ids[layer.ID] = layers[n] + idlist = append(idlist, layer.ID) + for _, name := range layer.Names { + if conflict, ok := names[name]; ok { + r.removeName(conflict, name) + shouldSave = true + } + names[name] = layers[n] + } + if layer.CompressedDigest != "" { + compressedsums[layer.CompressedDigest] = append(compressedsums[layer.CompressedDigest], layer.ID) + } + if layer.UncompressedDigest != "" { + uncompressedsums[layer.UncompressedDigest] = append(uncompressedsums[layer.UncompressedDigest], layer.ID) + } + if layer.MountLabel != "" { + label.ReserveLabel(layer.MountLabel) + } + layer.ReadOnly = !r.IsReadWrite() + } + err = nil + } + if shouldSave && (!r.IsReadWrite() || !r.Locked()) { + return ErrDuplicateLayerNames + } + r.layers = layers + r.idindex = truncindex.NewTruncIndex(idlist) + r.byid = ids + r.byname = names + r.bycompressedsum = compressedsums + r.byuncompressedsum = uncompressedsums + + // Load and merge information about which layers are mounted, and where. + if r.IsReadWrite() { + r.mountsLockfile.RLock() + defer r.mountsLockfile.Unlock() + if err = r.loadMounts(); err != nil { + return err + } + + // Last step: as we’re writable, try to remove anything that a previous + // user of this storage area marked for deletion but didn't manage to + // actually delete. + if r.Locked() { + for _, layer := range r.layers { + if layer.Flags == nil { + layer.Flags = make(map[string]interface{}) + } + if cleanup, ok := layer.Flags[incompleteFlag]; ok { + if b, ok := cleanup.(bool); ok && b { + err = r.deleteInternal(layer.ID) + if err != nil { + break + } + shouldSave = true + } + } + } + } + if shouldSave { + return r.saveLayers() + } + } + + return err +} + +func (r *layerStore) LoadLocked() error { + r.lockfile.Lock() + defer r.lockfile.Unlock() + return r.Load() +} + +func (r *layerStore) loadMounts() error { + mounts := make(map[string]*Layer) + mpath := r.mountspath() + data, err := ioutil.ReadFile(mpath) + if err != nil && !os.IsNotExist(err) { + return err + } + layerMounts := []layerMountPoint{} + if err = json.Unmarshal(data, &layerMounts); len(data) == 0 || err == nil { + // Clear all of our mount information. If another process + // unmounted something, it (along with its zero count) won't + // have been encoded into the version of mountpoints.json that + // we're loading, so our count could fall out of sync with it + // if we don't, and if we subsequently change something else, + // we'd pass that error along to other process that reloaded + // the data after we saved it. + for _, layer := range r.layers { + layer.MountPoint = "" + layer.MountCount = 0 + } + // All of the non-zero count values will have been encoded, so + // we reset the still-mounted ones based on the contents. + for _, mount := range layerMounts { + if mount.MountPoint != "" { + if layer, ok := r.lookup(mount.ID); ok { + mounts[mount.MountPoint] = layer + layer.MountPoint = mount.MountPoint + layer.MountCount = mount.MountCount + } + } + } + err = nil + } + r.bymount = mounts + return err +} + +func (r *layerStore) Save() error { + r.mountsLockfile.Lock() + defer r.mountsLockfile.Unlock() + defer r.mountsLockfile.Touch() + if err := r.saveLayers(); err != nil { + return err + } + return r.saveMounts() +} + +func (r *layerStore) saveLayers() error { + if !r.IsReadWrite() { + return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify the layer store at %q", r.layerspath()) + } + if !r.Locked() { + return errors.New("layer store is not locked for writing") + } + rpath := r.layerspath() + if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil { + return err + } + jldata, err := json.Marshal(&r.layers) + if err != nil { + return err + } + defer r.Touch() + return ioutils.AtomicWriteFile(rpath, jldata, 0600) +} + +func (r *layerStore) saveMounts() error { + if !r.IsReadWrite() { + return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify the layer store at %q", r.layerspath()) + } + if !r.mountsLockfile.Locked() { + return errors.New("layer store mount information is not locked for writing") + } + mpath := r.mountspath() + if err := os.MkdirAll(filepath.Dir(mpath), 0700); err != nil { + return err + } + mounts := make([]layerMountPoint, 0, len(r.layers)) + for _, layer := range r.layers { + if layer.MountPoint != "" && layer.MountCount > 0 { + mounts = append(mounts, layerMountPoint{ + ID: layer.ID, + MountPoint: layer.MountPoint, + MountCount: layer.MountCount, + }) + } + } + jmdata, err := json.Marshal(&mounts) + if err != nil { + return err + } + if err = ioutils.AtomicWriteFile(mpath, jmdata, 0600); err != nil { + return err + } + return r.loadMounts() +} + +func (s *store) newLayerStore(rundir string, layerdir string, driver drivers.Driver) (LayerStore, error) { + if err := os.MkdirAll(rundir, 0700); err != nil { + return nil, err + } + if err := os.MkdirAll(layerdir, 0700); err != nil { + return nil, err + } + lockfile, err := GetLockfile(filepath.Join(layerdir, "layers.lock")) + if err != nil { + return nil, err + } + mountsLockfile, err := GetLockfile(filepath.Join(rundir, "mountpoints.lock")) + if err != nil { + return nil, err + } + rlstore := layerStore{ + lockfile: lockfile, + mountsLockfile: mountsLockfile, + driver: driver, + rundir: rundir, + layerdir: layerdir, + byid: make(map[string]*Layer), + bymount: make(map[string]*Layer), + byname: make(map[string]*Layer), + uidMap: copyIDMap(s.uidMap), + gidMap: copyIDMap(s.gidMap), + } + if err := rlstore.Load(); err != nil { + return nil, err + } + return &rlstore, nil +} + +func newROLayerStore(rundir string, layerdir string, driver drivers.Driver) (ROLayerStore, error) { + lockfile, err := GetROLockfile(filepath.Join(layerdir, "layers.lock")) + if err != nil { + return nil, err + } + rlstore := layerStore{ + lockfile: lockfile, + mountsLockfile: nil, + driver: driver, + rundir: rundir, + layerdir: layerdir, + byid: make(map[string]*Layer), + bymount: make(map[string]*Layer), + byname: make(map[string]*Layer), + } + if err := rlstore.Load(); err != nil { + return nil, err + } + return &rlstore, nil +} + +func (r *layerStore) lookup(id string) (*Layer, bool) { + if layer, ok := r.byid[id]; ok { + return layer, ok + } else if layer, ok := r.byname[id]; ok { + return layer, ok + } else if longid, err := r.idindex.Get(id); err == nil { + layer, ok := r.byid[longid] + return layer, ok + } + return nil, false +} + +func (r *layerStore) Size(name string) (int64, error) { + layer, ok := r.lookup(name) + if !ok { + return -1, ErrLayerUnknown + } + // We use the presence of a non-empty digest as an indicator that the size value was intentionally set, and that + // a zero value is not just present because it was never set to anything else (which can happen if the layer was + // created by a version of this library that didn't keep track of digest and size information). + if layer.UncompressedDigest != "" { + return layer.UncompressedSize, nil + } + return -1, nil +} + +func (r *layerStore) ClearFlag(id string, flag string) error { + if !r.IsReadWrite() { + return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to clear flags on layers at %q", r.layerspath()) + } + layer, ok := r.lookup(id) + if !ok { + return ErrLayerUnknown + } + delete(layer.Flags, flag) + return r.Save() +} + +func (r *layerStore) SetFlag(id string, flag string, value interface{}) error { + if !r.IsReadWrite() { + return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to set flags on layers at %q", r.layerspath()) + } + layer, ok := r.lookup(id) + if !ok { + return ErrLayerUnknown + } + if layer.Flags == nil { + layer.Flags = make(map[string]interface{}) + } + layer.Flags[flag] = value + return r.Save() +} + +func (r *layerStore) Status() ([][2]string, error) { + return r.driver.Status(), nil +} + +func (r *layerStore) PutAdditionalLayer(id string, parentLayer *Layer, names []string, aLayer drivers.AdditionalLayer) (layer *Layer, err error) { + if duplicateLayer, idInUse := r.byid[id]; idInUse { + return duplicateLayer, ErrDuplicateID + } + for _, name := range names { + if _, nameInUse := r.byname[name]; nameInUse { + return nil, ErrDuplicateName + } + } + + parent := "" + if parentLayer != nil { + parent = parentLayer.ID + } + + info, err := aLayer.Info() + if err != nil { + return nil, err + } + defer info.Close() + layer = &Layer{} + if err := json.NewDecoder(info).Decode(layer); err != nil { + return nil, err + } + layer.ID = id + layer.Parent = parent + layer.Created = time.Now().UTC() + + if err := aLayer.CreateAs(id, parent); err != nil { + return nil, err + } + + // TODO: check if necessary fields are filled + r.layers = append(r.layers, layer) + r.idindex.Add(id) + r.byid[id] = layer + for _, name := range names { // names got from the additional layer store won't be used + r.byname[name] = layer + } + if layer.CompressedDigest != "" { + r.bycompressedsum[layer.CompressedDigest] = append(r.bycompressedsum[layer.CompressedDigest], layer.ID) + } + if layer.UncompressedDigest != "" { + r.byuncompressedsum[layer.CompressedDigest] = append(r.byuncompressedsum[layer.CompressedDigest], layer.ID) + } + if err := r.Save(); err != nil { + r.driver.Remove(id) + return nil, err + } + return copyLayer(layer), nil +} + +func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}, diff io.Reader) (layer *Layer, size int64, err error) { + if !r.IsReadWrite() { + return nil, -1, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to create new layers at %q", r.layerspath()) + } + size = -1 + if err := os.MkdirAll(r.rundir, 0700); err != nil { + return nil, -1, err + } + if err := os.MkdirAll(r.layerdir, 0700); err != nil { + return nil, -1, err + } + if id == "" { + id = stringid.GenerateRandomID() + _, idInUse := r.byid[id] + for idInUse { + id = stringid.GenerateRandomID() + _, idInUse = r.byid[id] + } + } + if duplicateLayer, idInUse := r.byid[id]; idInUse { + return duplicateLayer, -1, ErrDuplicateID + } + names = dedupeNames(names) + for _, name := range names { + if _, nameInUse := r.byname[name]; nameInUse { + return nil, -1, ErrDuplicateName + } + } + parent := "" + if parentLayer != nil { + parent = parentLayer.ID + } + var parentMappings, templateIDMappings, oldMappings *idtools.IDMappings + if moreOptions.TemplateLayer != "" { + templateLayer, ok := r.lookup(moreOptions.TemplateLayer) + if !ok { + return nil, -1, ErrLayerUnknown + } + templateIDMappings = idtools.NewIDMappingsFromMaps(templateLayer.UIDMap, templateLayer.GIDMap) + } else { + templateIDMappings = &idtools.IDMappings{} + } + if parentLayer != nil { + parentMappings = idtools.NewIDMappingsFromMaps(parentLayer.UIDMap, parentLayer.GIDMap) + } else { + parentMappings = &idtools.IDMappings{} + } + if mountLabel != "" { + label.ReserveLabel(mountLabel) + } + idMappings := idtools.NewIDMappingsFromMaps(moreOptions.UIDMap, moreOptions.GIDMap) + opts := drivers.CreateOpts{ + MountLabel: mountLabel, + StorageOpt: options, + IDMappings: idMappings, + } + if moreOptions.TemplateLayer != "" { + if err = r.driver.CreateFromTemplate(id, moreOptions.TemplateLayer, templateIDMappings, parent, parentMappings, &opts, writeable); err != nil { + if id != "" { + return nil, -1, errors.Wrapf(err, "error creating copy of template layer %q with ID %q", moreOptions.TemplateLayer, id) + } + return nil, -1, errors.Wrapf(err, "error creating copy of template layer %q", moreOptions.TemplateLayer) + } + oldMappings = templateIDMappings + } else { + if writeable { + if err = r.driver.CreateReadWrite(id, parent, &opts); err != nil { + if id != "" { + return nil, -1, errors.Wrapf(err, "error creating read-write layer with ID %q", id) + } + return nil, -1, errors.Wrapf(err, "error creating read-write layer") + } + } else { + if err = r.driver.Create(id, parent, &opts); err != nil { + if id != "" { + return nil, -1, errors.Wrapf(err, "error creating layer with ID %q", id) + } + return nil, -1, errors.Wrapf(err, "error creating layer") + } + } + oldMappings = parentMappings + } + if !reflect.DeepEqual(oldMappings.UIDs(), idMappings.UIDs()) || !reflect.DeepEqual(oldMappings.GIDs(), idMappings.GIDs()) { + if err = r.driver.UpdateLayerIDMap(id, oldMappings, idMappings, mountLabel); err != nil { + // We don't have a record of this layer, but at least + // try to clean it up underneath us. + r.driver.Remove(id) + return nil, -1, err + } + } + if err == nil { + layer = &Layer{ + ID: id, + Parent: parent, + Names: names, + MountLabel: mountLabel, + Created: time.Now().UTC(), + Flags: make(map[string]interface{}), + UIDMap: copyIDMap(moreOptions.UIDMap), + GIDMap: copyIDMap(moreOptions.GIDMap), + BigDataNames: []string{}, + } + r.layers = append(r.layers, layer) + r.idindex.Add(id) + r.byid[id] = layer + for _, name := range names { + r.byname[name] = layer + } + for flag, value := range flags { + layer.Flags[flag] = value + } + if diff != nil { + layer.Flags[incompleteFlag] = true + err = r.Save() + if err != nil { + // We don't have a record of this layer, but at least + // try to clean it up underneath us. + r.driver.Remove(id) + return nil, -1, err + } + size, err = r.applyDiffWithOptions(layer.ID, moreOptions, diff) + if err != nil { + if r.Delete(layer.ID) != nil { + // Either a driver error or an error saving. + // We now have a layer that's been marked for + // deletion but which we failed to remove. + } + return nil, -1, err + } + delete(layer.Flags, incompleteFlag) + } + err = r.Save() + if err != nil { + // We don't have a record of this layer, but at least + // try to clean it up underneath us. + r.driver.Remove(id) + return nil, -1, err + } + layer = copyLayer(layer) + } + return layer, size, err +} + +func (r *layerStore) CreateWithFlags(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}) (layer *Layer, err error) { + layer, _, err = r.Put(id, parent, names, mountLabel, options, moreOptions, writeable, flags, nil) + return layer, err +} + +func (r *layerStore) Create(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool) (layer *Layer, err error) { + return r.CreateWithFlags(id, parent, names, mountLabel, options, moreOptions, writeable, nil) +} + +func (r *layerStore) Mounted(id string) (int, error) { + if !r.IsReadWrite() { + return 0, errors.Wrapf(ErrStoreIsReadOnly, "no mount information for layers at %q", r.mountspath()) + } + r.mountsLockfile.RLock() + defer r.mountsLockfile.Unlock() + if modified, err := r.mountsLockfile.Modified(); modified || err != nil { + if err = r.loadMounts(); err != nil { + return 0, err + } + } + layer, ok := r.lookup(id) + if !ok { + return 0, ErrLayerUnknown + } + return layer.MountCount, nil +} + +func (r *layerStore) Mount(id string, options drivers.MountOpts) (string, error) { + + // check whether options include ro option + hasReadOnlyOpt := func(opts []string) bool { + for _, item := range opts { + if item == "ro" { + return true + } + } + return false + } + + // You are not allowed to mount layers from readonly stores if they + // are not mounted read/only. + if !r.IsReadWrite() && !hasReadOnlyOpt(options.Options) { + return "", errors.Wrapf(ErrStoreIsReadOnly, "not allowed to update mount locations for layers at %q", r.mountspath()) + } + r.mountsLockfile.Lock() + defer r.mountsLockfile.Unlock() + if modified, err := r.mountsLockfile.Modified(); modified || err != nil { + if err = r.loadMounts(); err != nil { + return "", err + } + } + defer r.mountsLockfile.Touch() + layer, ok := r.lookup(id) + if !ok { + return "", ErrLayerUnknown + } + if layer.MountCount > 0 { + mounted, err := mount.Mounted(layer.MountPoint) + if err != nil { + return "", err + } + // If the container is not mounted then we have a condition + // where the kernel umounted the mount point. This means + // that the mount count never got decremented. + if mounted { + layer.MountCount++ + return layer.MountPoint, r.saveMounts() + } + } + if options.MountLabel == "" { + options.MountLabel = layer.MountLabel + } + + if (options.UidMaps != nil || options.GidMaps != nil) && !r.driver.SupportsShifting() { + if !reflect.DeepEqual(options.UidMaps, layer.UIDMap) || !reflect.DeepEqual(options.GidMaps, layer.GIDMap) { + return "", fmt.Errorf("cannot mount layer %v: shifting not enabled", layer.ID) + } + } + mountpoint, err := r.driver.Get(id, options) + if mountpoint != "" && err == nil { + if layer.MountPoint != "" { + delete(r.bymount, layer.MountPoint) + } + layer.MountPoint = filepath.Clean(mountpoint) + layer.MountCount++ + r.bymount[layer.MountPoint] = layer + err = r.saveMounts() + } + return mountpoint, err +} + +func (r *layerStore) Unmount(id string, force bool) (bool, error) { + if !r.IsReadWrite() { + return false, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to update mount locations for layers at %q", r.mountspath()) + } + r.mountsLockfile.Lock() + defer r.mountsLockfile.Unlock() + if modified, err := r.mountsLockfile.Modified(); modified || err != nil { + if err = r.loadMounts(); err != nil { + return false, err + } + } + defer r.mountsLockfile.Touch() + layer, ok := r.lookup(id) + if !ok { + layerByMount, ok := r.bymount[filepath.Clean(id)] + if !ok { + return false, ErrLayerUnknown + } + layer = layerByMount + } + if force { + layer.MountCount = 1 + } + if layer.MountCount > 1 { + layer.MountCount-- + return true, r.saveMounts() + } + err := r.driver.Put(id) + if err == nil || os.IsNotExist(err) { + if layer.MountPoint != "" { + delete(r.bymount, layer.MountPoint) + } + layer.MountCount-- + layer.MountPoint = "" + return false, r.saveMounts() + } + return true, err +} + +func (r *layerStore) ParentOwners(id string) (uids, gids []int, err error) { + if !r.IsReadWrite() { + return nil, nil, errors.Wrapf(ErrStoreIsReadOnly, "no mount information for layers at %q", r.mountspath()) + } + r.mountsLockfile.RLock() + defer r.mountsLockfile.Unlock() + if modified, err := r.mountsLockfile.Modified(); modified || err != nil { + if err = r.loadMounts(); err != nil { + return nil, nil, err + } + } + layer, ok := r.lookup(id) + if !ok { + return nil, nil, ErrLayerUnknown + } + if len(layer.UIDMap) == 0 && len(layer.GIDMap) == 0 { + // We're not using any mappings, so there aren't any unmapped IDs on parent directories. + return nil, nil, nil + } + if layer.MountPoint == "" { + // We don't know which directories to examine. + return nil, nil, ErrLayerNotMounted + } + rootuid, rootgid, err := idtools.GetRootUIDGID(layer.UIDMap, layer.GIDMap) + if err != nil { + return nil, nil, errors.Wrapf(err, "error reading root ID values for layer %q", layer.ID) + } + m := idtools.NewIDMappingsFromMaps(layer.UIDMap, layer.GIDMap) + fsuids := make(map[int]struct{}) + fsgids := make(map[int]struct{}) + for dir := filepath.Dir(layer.MountPoint); dir != "" && dir != string(os.PathSeparator); dir = filepath.Dir(dir) { + st, err := system.Stat(dir) + if err != nil { + return nil, nil, errors.Wrap(err, "read directory ownership") + } + lst, err := system.Lstat(dir) + if err != nil { + return nil, nil, err + } + fsuid := int(st.UID()) + fsgid := int(st.GID()) + if _, _, err := m.ToContainer(idtools.IDPair{UID: fsuid, GID: rootgid}); err != nil { + fsuids[fsuid] = struct{}{} + } + if _, _, err := m.ToContainer(idtools.IDPair{UID: rootuid, GID: fsgid}); err != nil { + fsgids[fsgid] = struct{}{} + } + fsuid = int(lst.UID()) + fsgid = int(lst.GID()) + if _, _, err := m.ToContainer(idtools.IDPair{UID: fsuid, GID: rootgid}); err != nil { + fsuids[fsuid] = struct{}{} + } + if _, _, err := m.ToContainer(idtools.IDPair{UID: rootuid, GID: fsgid}); err != nil { + fsgids[fsgid] = struct{}{} + } + } + for uid := range fsuids { + uids = append(uids, uid) + } + for gid := range fsgids { + gids = append(gids, gid) + } + if len(uids) > 1 { + sort.Ints(uids) + } + if len(gids) > 1 { + sort.Ints(gids) + } + return uids, gids, nil +} + +func (r *layerStore) removeName(layer *Layer, name string) { + layer.Names = stringSliceWithoutValue(layer.Names, name) +} + +// Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`. +func (r *layerStore) SetNames(id string, names []string) error { + return r.updateNames(id, names, setNames) +} + +func (r *layerStore) AddNames(id string, names []string) error { + return r.updateNames(id, names, addNames) +} + +func (r *layerStore) RemoveNames(id string, names []string) error { + return r.updateNames(id, names, removeNames) +} + +func (r *layerStore) updateNames(id string, names []string, op updateNameOperation) error { + if !r.IsReadWrite() { + return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to change layer name assignments at %q", r.layerspath()) + } + layer, ok := r.lookup(id) + if !ok { + return ErrLayerUnknown + } + oldNames := layer.Names + names, err := applyNameOperation(oldNames, names, op) + if err != nil { + return err + } + for _, name := range oldNames { + delete(r.byname, name) + } + for _, name := range names { + if otherLayer, ok := r.byname[name]; ok { + r.removeName(otherLayer, name) + } + r.byname[name] = layer + } + layer.Names = names + return r.Save() +} + +func (r *layerStore) datadir(id string) string { + return filepath.Join(r.layerdir, id) +} + +func (r *layerStore) datapath(id, key string) string { + return filepath.Join(r.datadir(id), makeBigDataBaseName(key)) +} + +func (r *layerStore) BigData(id, key string) (io.ReadCloser, error) { + if key == "" { + return nil, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve layer big data value for empty name") + } + layer, ok := r.lookup(id) + if !ok { + return nil, errors.Wrapf(ErrLayerUnknown, "error locating layer with ID %q", id) + } + return os.Open(r.datapath(layer.ID, key)) +} + +func (r *layerStore) SetBigData(id, key string, data io.Reader) error { + if key == "" { + return errors.Wrapf(ErrInvalidBigDataName, "can't set empty name for layer big data item") + } + if !r.IsReadWrite() { + return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to save data items associated with layers at %q", r.layerspath()) + } + layer, ok := r.lookup(id) + if !ok { + return errors.Wrapf(ErrLayerUnknown, "error locating layer with ID %q to write bigdata", id) + } + err := os.MkdirAll(r.datadir(layer.ID), 0700) + if err != nil { + return err + } + + // NewAtomicFileWriter doesn't overwrite/truncate the existing inode. + // BigData() relies on this behaviour when opening the file for read + // so that it is either accessing the old data or the new one. + writer, err := ioutils.NewAtomicFileWriter(r.datapath(layer.ID, key), 0600) + if err != nil { + return errors.Wrapf(err, "error opening bigdata file") + } + + if _, err := io.Copy(writer, data); err != nil { + writer.Close() + return errors.Wrapf(err, "error copying bigdata for the layer") + + } + if err := writer.Close(); err != nil { + return errors.Wrapf(err, "error closing bigdata file for the layer") + } + + addName := true + for _, name := range layer.BigDataNames { + if name == key { + addName = false + break + } + } + if addName { + layer.BigDataNames = append(layer.BigDataNames, key) + return r.Save() + } + return nil +} + +func (r *layerStore) BigDataNames(id string) ([]string, error) { + layer, ok := r.lookup(id) + if !ok { + return nil, errors.Wrapf(ErrImageUnknown, "error locating layer with ID %q to retrieve bigdata names", id) + } + return copyStringSlice(layer.BigDataNames), nil +} + +func (r *layerStore) Metadata(id string) (string, error) { + if layer, ok := r.lookup(id); ok { + return layer.Metadata, nil + } + return "", ErrLayerUnknown +} + +func (r *layerStore) SetMetadata(id, metadata string) error { + if !r.IsReadWrite() { + return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify layer metadata at %q", r.layerspath()) + } + if layer, ok := r.lookup(id); ok { + layer.Metadata = metadata + return r.Save() + } + return ErrLayerUnknown +} + +func (r *layerStore) tspath(id string) string { + return filepath.Join(r.layerdir, id+tarSplitSuffix) +} + +func (r *layerStore) deleteInternal(id string) error { + if !r.IsReadWrite() { + return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to delete layers at %q", r.layerspath()) + } + layer, ok := r.lookup(id) + if !ok { + return ErrLayerUnknown + } + id = layer.ID + err := r.driver.Remove(id) + if err != nil { + return err + } + + os.Remove(r.tspath(id)) + os.RemoveAll(r.datadir(id)) + delete(r.byid, id) + for _, name := range layer.Names { + delete(r.byname, name) + } + r.idindex.Delete(id) + mountLabel := layer.MountLabel + if layer.MountPoint != "" { + delete(r.bymount, layer.MountPoint) + } + r.deleteInDigestMap(id) + toDeleteIndex := -1 + for i, candidate := range r.layers { + if candidate.ID == id { + toDeleteIndex = i + break + } + } + if toDeleteIndex != -1 { + // delete the layer at toDeleteIndex + if toDeleteIndex == len(r.layers)-1 { + r.layers = r.layers[:len(r.layers)-1] + } else { + r.layers = append(r.layers[:toDeleteIndex], r.layers[toDeleteIndex+1:]...) + } + } + if mountLabel != "" { + var found bool + for _, candidate := range r.layers { + if candidate.MountLabel == mountLabel { + found = true + break + } + } + if !found { + label.ReleaseLabel(mountLabel) + } + } + + return nil +} + +func (r *layerStore) deleteInDigestMap(id string) { + for digest, layers := range r.bycompressedsum { + for i, layerID := range layers { + if layerID == id { + layers = append(layers[:i], layers[i+1:]...) + r.bycompressedsum[digest] = layers + break + } + } + } + for digest, layers := range r.byuncompressedsum { + for i, layerID := range layers { + if layerID == id { + layers = append(layers[:i], layers[i+1:]...) + r.byuncompressedsum[digest] = layers + break + } + } + } +} + +func (r *layerStore) Delete(id string) error { + layer, ok := r.lookup(id) + if !ok { + return ErrLayerUnknown + } + id = layer.ID + // The layer may already have been explicitly unmounted, but if not, we + // should try to clean that up before we start deleting anything at the + // driver level. + mountCount, err := r.Mounted(id) + if err != nil { + return errors.Wrapf(err, "error checking if layer %q is still mounted", id) + } + for mountCount > 0 { + if _, err := r.Unmount(id, false); err != nil { + return err + } + mountCount, err = r.Mounted(id) + if err != nil { + return errors.Wrapf(err, "error checking if layer %q is still mounted", id) + } + } + if err := r.deleteInternal(id); err != nil { + return err + } + return r.Save() +} + +func (r *layerStore) Lookup(name string) (id string, err error) { + if layer, ok := r.lookup(name); ok { + return layer.ID, nil + } + return "", ErrLayerUnknown +} + +func (r *layerStore) Exists(id string) bool { + _, ok := r.lookup(id) + return ok +} + +func (r *layerStore) Get(id string) (*Layer, error) { + if layer, ok := r.lookup(id); ok { + return copyLayer(layer), nil + } + return nil, ErrLayerUnknown +} + +func (r *layerStore) Wipe() error { + if !r.IsReadWrite() { + return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to delete layers at %q", r.layerspath()) + } + ids := make([]string, 0, len(r.byid)) + for id := range r.byid { + ids = append(ids, id) + } + for _, id := range ids { + if err := r.Delete(id); err != nil { + return err + } + } + return nil +} + +func (r *layerStore) findParentAndLayer(from, to string) (fromID string, toID string, fromLayer, toLayer *Layer, err error) { + var ok bool + toLayer, ok = r.lookup(to) + if !ok { + return "", "", nil, nil, ErrLayerUnknown + } + to = toLayer.ID + if from == "" { + from = toLayer.Parent + } + if from != "" { + fromLayer, ok = r.lookup(from) + if ok { + from = fromLayer.ID + } else { + fromLayer, ok = r.lookup(toLayer.Parent) + if ok { + from = fromLayer.ID + } + } + } + return from, to, fromLayer, toLayer, nil +} + +func (r *layerStore) layerMappings(layer *Layer) *idtools.IDMappings { + if layer == nil { + return &idtools.IDMappings{} + } + return idtools.NewIDMappingsFromMaps(layer.UIDMap, layer.GIDMap) +} + +func (r *layerStore) Changes(from, to string) ([]archive.Change, error) { + from, to, fromLayer, toLayer, err := r.findParentAndLayer(from, to) + if err != nil { + return nil, ErrLayerUnknown + } + return r.driver.Changes(to, r.layerMappings(toLayer), from, r.layerMappings(fromLayer), toLayer.MountLabel) +} + +type simpleGetCloser struct { + r *layerStore + path string + id string +} + +func (s *simpleGetCloser) Get(path string) (io.ReadCloser, error) { + return os.Open(filepath.Join(s.path, path)) +} + +func (s *simpleGetCloser) Close() error { + _, err := s.r.Unmount(s.id, false) + return err +} + +func (r *layerStore) newFileGetter(id string) (drivers.FileGetCloser, error) { + if getter, ok := r.driver.(drivers.DiffGetterDriver); ok { + return getter.DiffGetter(id) + } + path, err := r.Mount(id, drivers.MountOpts{}) + if err != nil { + return nil, err + } + return &simpleGetCloser{ + r: r, + path: path, + id: id, + }, nil +} + +func (r *layerStore) Diff(from, to string, options *DiffOptions) (io.ReadCloser, error) { + var metadata storage.Unpacker + + from, to, fromLayer, toLayer, err := r.findParentAndLayer(from, to) + if err != nil { + return nil, ErrLayerUnknown + } + // Default to applying the type of compression that we noted was used + // for the layerdiff when it was applied. + compression := toLayer.CompressionType + // If a particular compression type (or no compression) was selected, + // use that instead. + if options != nil && options.Compression != nil { + compression = *options.Compression + } + maybeCompressReadCloser := func(rc io.ReadCloser) (io.ReadCloser, error) { + // Depending on whether or not compression is desired, return either the + // passed-in ReadCloser, or a new one that provides its readers with a + // compressed version of the data that the original would have provided + // to its readers. + if compression == archive.Uncompressed { + return rc, nil + } + preader, pwriter := io.Pipe() + compressor, err := archive.CompressStream(pwriter, compression) + if err != nil { + rc.Close() + pwriter.Close() + preader.Close() + return nil, err + } + go func() { + defer pwriter.Close() + defer compressor.Close() + defer rc.Close() + io.Copy(compressor, rc) + }() + return preader, nil + } + + if from != toLayer.Parent { + diff, err := r.driver.Diff(to, r.layerMappings(toLayer), from, r.layerMappings(fromLayer), toLayer.MountLabel) + if err != nil { + return nil, err + } + return maybeCompressReadCloser(diff) + } + + if ad, ok := r.driver.(drivers.AdditionalLayerStoreDriver); ok { + if aLayer, err := ad.LookupAdditionalLayerByID(to); err == nil { + // This is an additional layer. We leverage blob API for acquiring the reproduced raw blob. + info, err := aLayer.Info() + if err != nil { + aLayer.Release() + return nil, err + } + defer info.Close() + layer := &Layer{} + if err := json.NewDecoder(info).Decode(layer); err != nil { + aLayer.Release() + return nil, err + } + blob, err := aLayer.Blob() + if err != nil { + aLayer.Release() + return nil, err + } + // If layer compression type is different from the expected one, decompress and convert it. + if compression != layer.CompressionType { + diff, err := archive.DecompressStream(blob) + if err != nil { + if err2 := blob.Close(); err2 != nil { + err = errors.Wrapf(err, "failed to close blob file: %v", err2) + } + aLayer.Release() + return nil, err + } + rc, err := maybeCompressReadCloser(diff) + if err != nil { + if err2 := closeAll(blob.Close, diff.Close); err2 != nil { + err = errors.Wrapf(err, "failed to cleanup: %v", err2) + } + aLayer.Release() + return nil, err + } + return ioutils.NewReadCloserWrapper(rc, func() error { + defer aLayer.Release() + return closeAll(blob.Close, rc.Close) + }), nil + } + return ioutils.NewReadCloserWrapper(blob, func() error { defer aLayer.Release(); return blob.Close() }), nil + } + } + + tsfile, err := os.Open(r.tspath(to)) + if err != nil { + if !os.IsNotExist(err) { + return nil, err + } + diff, err := r.driver.Diff(to, r.layerMappings(toLayer), from, r.layerMappings(fromLayer), toLayer.MountLabel) + if err != nil { + return nil, err + } + return maybeCompressReadCloser(diff) + } + + decompressor, err := pgzip.NewReader(tsfile) + if err != nil { + if e := tsfile.Close(); e != nil { + logrus.Debug(e) + } + return nil, err + } + + metadata = storage.NewJSONUnpacker(decompressor) + + fgetter, err := r.newFileGetter(to) + if err != nil { + errs := multierror.Append(nil, errors.Wrapf(err, "creating file-getter")) + if err := decompressor.Close(); err != nil { + errs = multierror.Append(errs, errors.Wrapf(err, "closing decompressor")) + } + if err := tsfile.Close(); err != nil { + errs = multierror.Append(errs, errors.Wrapf(err, "closing tarstream headers")) + } + return nil, errs.ErrorOrNil() + } + + tarstream := asm.NewOutputTarStream(fgetter, metadata) + rc := ioutils.NewReadCloserWrapper(tarstream, func() error { + var errs *multierror.Error + if err := decompressor.Close(); err != nil { + errs = multierror.Append(errs, errors.Wrapf(err, "closing decompressor")) + } + if err := tsfile.Close(); err != nil { + errs = multierror.Append(errs, errors.Wrapf(err, "closing tarstream headers")) + } + if err := tarstream.Close(); err != nil { + errs = multierror.Append(errs, errors.Wrapf(err, "closing reconstructed tarstream")) + } + if err := fgetter.Close(); err != nil { + errs = multierror.Append(errs, errors.Wrapf(err, "closing file-getter")) + } + if errs != nil { + return errs.ErrorOrNil() + } + return nil + }) + return maybeCompressReadCloser(rc) +} + +func (r *layerStore) DiffSize(from, to string) (size int64, err error) { + var fromLayer, toLayer *Layer + from, to, fromLayer, toLayer, err = r.findParentAndLayer(from, to) + if err != nil { + return -1, ErrLayerUnknown + } + return r.driver.DiffSize(to, r.layerMappings(toLayer), from, r.layerMappings(fromLayer), toLayer.MountLabel) +} + +func (r *layerStore) ApplyDiff(to string, diff io.Reader) (size int64, err error) { + return r.applyDiffWithOptions(to, nil, diff) +} + +func (r *layerStore) applyDiffWithOptions(to string, layerOptions *LayerOptions, diff io.Reader) (size int64, err error) { + if !r.IsReadWrite() { + return -1, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify layer contents at %q", r.layerspath()) + } + + layer, ok := r.lookup(to) + if !ok { + return -1, ErrLayerUnknown + } + + header := make([]byte, 10240) + n, err := diff.Read(header) + if err != nil && err != io.EOF { + return -1, err + } + compression := archive.DetectCompression(header[:n]) + defragmented := io.MultiReader(bytes.NewBuffer(header[:n]), diff) + + // Decide if we need to compute digests + var compressedDigest, uncompressedDigest digest.Digest // = "" + var compressedDigester, uncompressedDigester digest.Digester // = nil + if layerOptions != nil && layerOptions.OriginalDigest != "" && + layerOptions.OriginalDigest.Algorithm() == digest.Canonical { + compressedDigest = layerOptions.OriginalDigest + } else { + compressedDigester = digest.Canonical.Digester() + } + if layerOptions != nil && layerOptions.UncompressedDigest != "" && + layerOptions.UncompressedDigest.Algorithm() == digest.Canonical { + uncompressedDigest = layerOptions.UncompressedDigest + } else { + uncompressedDigester = digest.Canonical.Digester() + } + + var compressedWriter io.Writer + if compressedDigester != nil { + compressedWriter = compressedDigester.Hash() + } else { + compressedWriter = ioutil.Discard + } + compressedCounter := ioutils.NewWriteCounter(compressedWriter) + defragmented = io.TeeReader(defragmented, compressedCounter) + + tsdata := bytes.Buffer{} + compressor, err := pgzip.NewWriterLevel(&tsdata, pgzip.BestSpeed) + if err != nil { + compressor = pgzip.NewWriter(&tsdata) + } + if err := compressor.SetConcurrency(1024*1024, 1); err != nil { // 1024*1024 is the hard-coded default; we're not changing that + logrus.Infof("Error setting compression concurrency threads to 1: %v; ignoring", err) + } + metadata := storage.NewJSONPacker(compressor) + uncompressed, err := archive.DecompressStream(defragmented) + if err != nil { + return -1, err + } + defer uncompressed.Close() + uidLog := make(map[uint32]struct{}) + gidLog := make(map[uint32]struct{}) + idLogger, err := tarlog.NewLogger(func(h *tar.Header) { + if !strings.HasPrefix(path.Base(h.Name), archive.WhiteoutPrefix) { + uidLog[uint32(h.Uid)] = struct{}{} + gidLog[uint32(h.Gid)] = struct{}{} + } + }) + if err != nil { + return -1, err + } + defer idLogger.Close() + uncompressedCounter := ioutils.NewWriteCounter(idLogger) + uncompressedWriter := (io.Writer)(uncompressedCounter) + if uncompressedDigester != nil { + uncompressedWriter = io.MultiWriter(uncompressedWriter, uncompressedDigester.Hash()) + } + payload, err := asm.NewInputTarStream(io.TeeReader(uncompressed, uncompressedWriter), metadata, storage.NewDiscardFilePutter()) + if err != nil { + return -1, err + } + options := drivers.ApplyDiffOpts{ + Diff: payload, + Mappings: r.layerMappings(layer), + MountLabel: layer.MountLabel, + } + size, err = r.driver.ApplyDiff(layer.ID, layer.Parent, options) + if err != nil { + return -1, err + } + compressor.Close() + if err == nil { + if err := os.MkdirAll(filepath.Dir(r.tspath(layer.ID)), 0700); err != nil { + return -1, err + } + if err := ioutils.AtomicWriteFile(r.tspath(layer.ID), tsdata.Bytes(), 0600); err != nil { + return -1, err + } + } + if compressedDigester != nil { + compressedDigest = compressedDigester.Digest() + } + if uncompressedDigester != nil { + uncompressedDigest = uncompressedDigester.Digest() + } + + updateDigestMap := func(m *map[digest.Digest][]string, oldvalue, newvalue digest.Digest, id string) { + var newList []string + if oldvalue != "" { + for _, value := range (*m)[oldvalue] { + if value != id { + newList = append(newList, value) + } + } + if len(newList) > 0 { + (*m)[oldvalue] = newList + } else { + delete(*m, oldvalue) + } + } + if newvalue != "" { + (*m)[newvalue] = append((*m)[newvalue], id) + } + } + updateDigestMap(&r.bycompressedsum, layer.CompressedDigest, compressedDigest, layer.ID) + layer.CompressedDigest = compressedDigest + layer.CompressedSize = compressedCounter.Count + updateDigestMap(&r.byuncompressedsum, layer.UncompressedDigest, uncompressedDigest, layer.ID) + layer.UncompressedDigest = uncompressedDigest + layer.UncompressedSize = uncompressedCounter.Count + layer.CompressionType = compression + layer.UIDs = make([]uint32, 0, len(uidLog)) + for uid := range uidLog { + layer.UIDs = append(layer.UIDs, uid) + } + sort.Slice(layer.UIDs, func(i, j int) bool { + return layer.UIDs[i] < layer.UIDs[j] + }) + layer.GIDs = make([]uint32, 0, len(gidLog)) + for gid := range gidLog { + layer.GIDs = append(layer.GIDs, gid) + } + sort.Slice(layer.GIDs, func(i, j int) bool { + return layer.GIDs[i] < layer.GIDs[j] + }) + + err = r.Save() + + return size, err +} + +func (r *layerStore) DifferTarget(id string) (string, error) { + ddriver, ok := r.driver.(drivers.DriverWithDiffer) + if !ok { + return "", ErrNotSupported + } + layer, ok := r.lookup(id) + if !ok { + return "", ErrLayerUnknown + } + return ddriver.DifferTarget(layer.ID) +} + +func (r *layerStore) ApplyDiffFromStagingDirectory(id, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffOpts) error { + ddriver, ok := r.driver.(drivers.DriverWithDiffer) + if !ok { + return ErrNotSupported + } + layer, ok := r.lookup(id) + if !ok { + return ErrLayerUnknown + } + if options == nil { + options = &drivers.ApplyDiffOpts{ + Mappings: r.layerMappings(layer), + MountLabel: layer.MountLabel, + } + } + err := ddriver.ApplyDiffFromStagingDirectory(layer.ID, layer.Parent, stagingDirectory, diffOutput, options) + if err != nil { + return err + } + layer.UIDs = diffOutput.UIDs + layer.GIDs = diffOutput.GIDs + layer.UncompressedDigest = diffOutput.UncompressedDigest + layer.UncompressedSize = diffOutput.Size + layer.Metadata = diffOutput.Metadata + if err = r.Save(); err != nil { + return err + } + for k, v := range diffOutput.BigData { + if err := r.SetBigData(id, k, bytes.NewReader(v)); err != nil { + r.Delete(id) + return err + } + } + return err +} + +func (r *layerStore) ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) { + ddriver, ok := r.driver.(drivers.DriverWithDiffer) + if !ok { + return nil, ErrNotSupported + } + + if to == "" { + output, err := ddriver.ApplyDiffWithDiffer("", "", options, differ) + return &output, err + } + + layer, ok := r.lookup(to) + if !ok { + return nil, ErrLayerUnknown + } + if options == nil { + options = &drivers.ApplyDiffOpts{ + Mappings: r.layerMappings(layer), + MountLabel: layer.MountLabel, + } + } + output, err := ddriver.ApplyDiffWithDiffer(layer.ID, layer.Parent, options, differ) + if err != nil { + return nil, err + } + layer.UIDs = output.UIDs + layer.GIDs = output.GIDs + err = r.Save() + return &output, err +} + +func (r *layerStore) CleanupStagingDirectory(stagingDirectory string) error { + ddriver, ok := r.driver.(drivers.DriverWithDiffer) + if !ok { + return ErrNotSupported + } + return ddriver.CleanupStagingDirectory(stagingDirectory) +} + +func (r *layerStore) layersByDigestMap(m map[digest.Digest][]string, d digest.Digest) ([]Layer, error) { + var layers []Layer + for _, layerID := range m[d] { + layer, ok := r.lookup(layerID) + if !ok { + return nil, ErrLayerUnknown + } + layers = append(layers, *copyLayer(layer)) + } + return layers, nil +} + +func (r *layerStore) LayersByCompressedDigest(d digest.Digest) ([]Layer, error) { + return r.layersByDigestMap(r.bycompressedsum, d) +} + +func (r *layerStore) LayersByUncompressedDigest(d digest.Digest) ([]Layer, error) { + return r.layersByDigestMap(r.byuncompressedsum, d) +} + +func (r *layerStore) Lock() { + r.lockfile.Lock() +} + +func (r *layerStore) RecursiveLock() { + r.lockfile.RecursiveLock() +} + +func (r *layerStore) RLock() { + r.lockfile.RLock() +} + +func (r *layerStore) Unlock() { + r.lockfile.Unlock() +} + +func (r *layerStore) Touch() error { + return r.lockfile.Touch() +} + +func (r *layerStore) Modified() (bool, error) { + var mmodified, tmodified bool + lmodified, err := r.lockfile.Modified() + if err != nil { + return lmodified, err + } + if r.IsReadWrite() { + r.mountsLockfile.RLock() + defer r.mountsLockfile.Unlock() + mmodified, err = r.mountsLockfile.Modified() + if err != nil { + return lmodified, err + } + } + + if lmodified || mmodified { + return true, nil + } + + // If the layers.json file has been modified manually, then we have to + // reload the storage in any case. + info, err := os.Stat(r.layerspath()) + if err != nil && !os.IsNotExist(err) { + return false, errors.Wrap(err, "stat layers file") + } + if info != nil { + tmodified = info.ModTime() != r.layerspathModified + r.layerspathModified = info.ModTime() + } + + return tmodified, nil +} + +func (r *layerStore) IsReadWrite() bool { + return r.lockfile.IsReadWrite() +} + +func (r *layerStore) TouchedSince(when time.Time) bool { + return r.lockfile.TouchedSince(when) +} + +func (r *layerStore) Locked() bool { + return r.lockfile.Locked() +} + +func (r *layerStore) ReloadIfChanged() error { + r.loadMut.Lock() + defer r.loadMut.Unlock() + + modified, err := r.Modified() + if err == nil && modified { + return r.Load() + } + return err +} + +func closeAll(closes ...func() error) (rErr error) { + for _, f := range closes { + if err := f(); err != nil { + if rErr == nil { + rErr = errors.Wrapf(err, "close error") + continue + } + rErr = errors.Wrapf(rErr, "%v", err) + } + } + return +} diff --git a/vendor/github.com/containers/storage/lockfile_compat.go b/vendor/github.com/containers/storage/lockfile_compat.go new file mode 100644 index 00000000000..6fac2ebac63 --- /dev/null +++ b/vendor/github.com/containers/storage/lockfile_compat.go @@ -0,0 +1,15 @@ +package storage + +import ( + "github.com/containers/storage/pkg/lockfile" +) + +type Locker = lockfile.Locker + +func GetLockfile(path string) (lockfile.Locker, error) { + return lockfile.GetLockfile(path) +} + +func GetROLockfile(path string) (lockfile.Locker, error) { + return lockfile.GetROLockfile(path) +} diff --git a/vendor/github.com/containers/storage/pkg/archive/README.md b/vendor/github.com/containers/storage/pkg/archive/README.md new file mode 100644 index 00000000000..7307d9694f6 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/README.md @@ -0,0 +1 @@ +This code provides helper functions for dealing with archive files. diff --git a/vendor/github.com/containers/storage/pkg/archive/archive.go b/vendor/github.com/containers/storage/pkg/archive/archive.go new file mode 100644 index 00000000000..677a15edd29 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/archive.go @@ -0,0 +1,1520 @@ +package archive + +import ( + "archive/tar" + "bufio" + "bytes" + "compress/bzip2" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" + "sync" + "syscall" + + "github.com/containers/storage/pkg/fileutils" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/pools" + "github.com/containers/storage/pkg/promise" + "github.com/containers/storage/pkg/system" + "github.com/containers/storage/pkg/unshare" + gzip "github.com/klauspost/pgzip" + "github.com/opencontainers/runc/libcontainer/userns" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/ulikunitz/xz" +) + +type ( + // Compression is the state represents if compressed or not. + Compression int + // WhiteoutFormat is the format of whiteouts unpacked + WhiteoutFormat int + + // TarOptions wraps the tar options. + TarOptions struct { + IncludeFiles []string + ExcludePatterns []string + Compression Compression + NoLchown bool + UIDMaps []idtools.IDMap + GIDMaps []idtools.IDMap + IgnoreChownErrors bool + ChownOpts *idtools.IDPair + IncludeSourceDir bool + // WhiteoutFormat is the expected on disk format for whiteout files. + // This format will be converted to the standard format on pack + // and from the standard format on unpack. + WhiteoutFormat WhiteoutFormat + // This is additional data to be used by the converter. It will + // not survive a round trip through JSON, so it's primarily + // intended for generating archives (i.e., converting writes). + WhiteoutData interface{} + // When unpacking, specifies whether overwriting a directory with a + // non-directory is allowed and vice versa. + NoOverwriteDirNonDir bool + // For each include when creating an archive, the included name will be + // replaced with the matching name from this map. + RebaseNames map[string]string + InUserNS bool + // CopyPass indicates that the contents of any archive we're creating + // will instantly be extracted and written to disk, so we can deviate + // from the traditional behavior/format to get features like subsecond + // precision in timestamps. + CopyPass bool + // ForceMask, if set, indicates the permission mask used for created files. + ForceMask *os.FileMode + } +) + +const ( + tarExt = "tar" + solaris = "solaris" + windows = "windows" + containersOverrideXattr = "user.containers.override_stat" +) + +var xattrsToIgnore = map[string]interface{}{ + "security.selinux": true, +} + +// Archiver allows the reuse of most utility functions of this package with a +// pluggable Untar function. To facilitate the passing of specific id mappings +// for untar, an archiver can be created with maps which will then be passed to +// Untar operations. If ChownOpts is set, its values are mapped using +// UntarIDMappings before being used to create files and directories on disk. +type Archiver struct { + Untar func(io.Reader, string, *TarOptions) error + TarIDMappings *idtools.IDMappings + ChownOpts *idtools.IDPair + UntarIDMappings *idtools.IDMappings +} + +// NewDefaultArchiver returns a new Archiver without any IDMappings +func NewDefaultArchiver() *Archiver { + return &Archiver{Untar: Untar, TarIDMappings: &idtools.IDMappings{}, UntarIDMappings: &idtools.IDMappings{}} +} + +// breakoutError is used to differentiate errors related to breaking out +// When testing archive breakout in the unit tests, this error is expected +// in order for the test to pass. +type breakoutError error + +// overwriteError is used to differentiate errors related to attempting to +// overwrite a directory with a non-directory or vice-versa. When testing +// copying a file over a directory, this error is expected in order for the +// test to pass. +type overwriteError error + +const ( + // Uncompressed represents the uncompressed. + Uncompressed Compression = iota + // Bzip2 is bzip2 compression algorithm. + Bzip2 + // Gzip is gzip compression algorithm. + Gzip + // Xz is xz compression algorithm. + Xz + // Zstd is zstd compression algorithm. + Zstd +) + +const ( + // AUFSWhiteoutFormat is the default format for whiteouts + AUFSWhiteoutFormat WhiteoutFormat = iota + // OverlayWhiteoutFormat formats whiteout according to the overlay + // standard. + OverlayWhiteoutFormat +) + +const ( + modeISDIR = 040000 // Directory + modeISFIFO = 010000 // FIFO + modeISREG = 0100000 // Regular file + modeISLNK = 0120000 // Symbolic link + modeISBLK = 060000 // Block special file + modeISCHR = 020000 // Character special file + modeISSOCK = 0140000 // Socket +) + +// IsArchivePath checks if the (possibly compressed) file at the given path +// starts with a tar file header. +func IsArchivePath(path string) bool { + file, err := os.Open(path) + if err != nil { + return false + } + defer file.Close() + rdr, err := DecompressStream(file) + if err != nil { + return false + } + defer rdr.Close() + r := tar.NewReader(rdr) + _, err = r.Next() + return err == nil +} + +// DetectCompression detects the compression algorithm of the source. +func DetectCompression(source []byte) Compression { + for compression, m := range map[Compression][]byte{ + Bzip2: {0x42, 0x5A, 0x68}, + Gzip: {0x1F, 0x8B, 0x08}, + Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, + Zstd: {0x28, 0xb5, 0x2f, 0xfd}, + } { + if len(source) < len(m) { + logrus.Debug("Len too short") + continue + } + if bytes.Equal(m, source[:len(m)]) { + return compression + } + } + return Uncompressed +} + +// DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive. +func DecompressStream(archive io.Reader) (io.ReadCloser, error) { + p := pools.BufioReader32KPool + buf := p.Get(archive) + bs, err := buf.Peek(10) + if err != nil && err != io.EOF { + // Note: we'll ignore any io.EOF error because there are some odd + // cases where the layer.tar file will be empty (zero bytes) and + // that results in an io.EOF from the Peek() call. So, in those + // cases we'll just treat it as a non-compressed stream and + // that means just create an empty layer. + // See Issue 18170 + return nil, err + } + + compression := DetectCompression(bs) + switch compression { + case Uncompressed: + readBufWrapper := p.NewReadCloserWrapper(buf, buf) + return readBufWrapper, nil + case Gzip: + gzReader, err := gzip.NewReader(buf) + if err != nil { + return nil, err + } + readBufWrapper := p.NewReadCloserWrapper(buf, gzReader) + return readBufWrapper, nil + case Bzip2: + bz2Reader := bzip2.NewReader(buf) + readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader) + return readBufWrapper, nil + case Xz: + xzReader, err := xz.NewReader(buf) + if err != nil { + return nil, err + } + readBufWrapper := p.NewReadCloserWrapper(buf, xzReader) + return readBufWrapper, nil + case Zstd: + return zstdReader(buf) + default: + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + } +} + +// CompressStream compresses the dest with specified compression algorithm. +func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { + p := pools.BufioWriter32KPool + buf := p.Get(dest) + switch compression { + case Uncompressed: + writeBufWrapper := p.NewWriteCloserWrapper(buf, buf) + return writeBufWrapper, nil + case Gzip: + gzWriter := gzip.NewWriter(dest) + writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter) + return writeBufWrapper, nil + case Zstd: + return zstdWriter(dest) + case Bzip2, Xz: + // archive/bzip2 does not support writing, and there is no xz support at all + // However, this is not a problem as docker only currently generates gzipped tars + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + default: + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + } +} + +// TarModifierFunc is a function that can be passed to ReplaceFileTarWrapper to +// modify the contents or header of an entry in the archive. If the file already +// exists in the archive the TarModifierFunc will be called with the Header and +// a reader which will return the files content. If the file does not exist both +// header and content will be nil. +type TarModifierFunc func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) + +// ReplaceFileTarWrapper converts inputTarStream to a new tar stream. Files in the +// tar stream are modified if they match any of the keys in mods. +func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModifierFunc) io.ReadCloser { + pipeReader, pipeWriter := io.Pipe() + + go func() { + tarReader := tar.NewReader(inputTarStream) + tarWriter := tar.NewWriter(pipeWriter) + defer inputTarStream.Close() + defer tarWriter.Close() + + modify := func(name string, original *tar.Header, modifier TarModifierFunc, tarReader io.Reader) error { + header, data, err := modifier(name, original, tarReader) + switch { + case err != nil: + return err + case header == nil: + return nil + } + + header.Name = name + header.Size = int64(len(data)) + if err := tarWriter.WriteHeader(header); err != nil { + return err + } + if len(data) != 0 { + if _, err := tarWriter.Write(data); err != nil { + return err + } + } + return nil + } + + var err error + var originalHeader *tar.Header + for { + originalHeader, err = tarReader.Next() + if err == io.EOF { + break + } + if err != nil { + pipeWriter.CloseWithError(err) + return + } + + modifier, ok := mods[originalHeader.Name] + if !ok { + // No modifiers for this file, copy the header and data + if err := tarWriter.WriteHeader(originalHeader); err != nil { + pipeWriter.CloseWithError(err) + return + } + if _, err := pools.Copy(tarWriter, tarReader); err != nil { + pipeWriter.CloseWithError(err) + return + } + continue + } + delete(mods, originalHeader.Name) + + if err := modify(originalHeader.Name, originalHeader, modifier, tarReader); err != nil { + pipeWriter.CloseWithError(err) + return + } + } + + // Apply the modifiers that haven't matched any files in the archive + for name, modifier := range mods { + if err := modify(name, nil, modifier, nil); err != nil { + pipeWriter.CloseWithError(err) + return + } + } + + pipeWriter.Close() + + }() + return pipeReader +} + +// Extension returns the extension of a file that uses the specified compression algorithm. +func (compression *Compression) Extension() string { + switch *compression { + case Uncompressed: + return tarExt + case Bzip2: + return tarExt + ".bz2" + case Gzip: + return tarExt + ".gz" + case Xz: + return tarExt + ".xz" + case Zstd: + return tarExt + ".zst" + } + return "" +} + +// FileInfoHeader creates a populated Header from fi. +// Compared to archive pkg this function fills in more information. +// Also, regardless of Go version, this function fills file type bits (e.g. hdr.Mode |= modeISDIR), +// which have been deleted since Go 1.9 archive/tar. +func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) { + hdr, err := tar.FileInfoHeader(fi, link) + if err != nil { + return nil, err + } + hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi) + name, err = canonicalTarName(name, fi.IsDir()) + if err != nil { + return nil, fmt.Errorf("tar: cannot canonicalize path: %v", err) + } + hdr.Name = name + if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil { + return nil, err + } + return hdr, nil +} + +// fillGo18FileTypeBits fills type bits which have been removed on Go 1.9 archive/tar +// https://github.com/golang/go/commit/66b5a2f +func fillGo18FileTypeBits(mode int64, fi os.FileInfo) int64 { + fm := fi.Mode() + switch { + case fm.IsRegular(): + mode |= modeISREG + case fi.IsDir(): + mode |= modeISDIR + case fm&os.ModeSymlink != 0: + mode |= modeISLNK + case fm&os.ModeDevice != 0: + if fm&os.ModeCharDevice != 0 { + mode |= modeISCHR + } else { + mode |= modeISBLK + } + case fm&os.ModeNamedPipe != 0: + mode |= modeISFIFO + case fm&os.ModeSocket != 0: + mode |= modeISSOCK + } + return mode +} + +// ReadSecurityXattrToTarHeader reads security.capability, security,image +// xattrs from filesystem to a tar header +func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error { + if hdr.Xattrs == nil { + hdr.Xattrs = make(map[string]string) + } + for _, xattr := range []string{"security.capability", "security.ima"} { + capability, err := system.Lgetxattr(path, xattr) + if err != nil && !errors.Is(err, system.EOPNOTSUPP) && err != system.ErrNotSupportedPlatform { + return errors.Wrapf(err, "failed to read %q attribute from %q", xattr, path) + } + if capability != nil { + hdr.Xattrs[xattr] = string(capability) + } + } + return nil +} + +// ReadUserXattrToTarHeader reads user.* xattr from filesystem to a tar header +func ReadUserXattrToTarHeader(path string, hdr *tar.Header) error { + xattrs, err := system.Llistxattr(path) + if err != nil && !errors.Is(err, system.EOPNOTSUPP) && err != system.ErrNotSupportedPlatform { + return err + } + for _, key := range xattrs { + if strings.HasPrefix(key, "user.") { + value, err := system.Lgetxattr(path, key) + if err != nil { + if errors.Is(err, system.E2BIG) { + logrus.Errorf("archive: Skipping xattr for file %s since value is too big: %s", path, key) + continue + } + return err + } + if hdr.Xattrs == nil { + hdr.Xattrs = make(map[string]string) + } + hdr.Xattrs[key] = string(value) + } + } + return nil +} + +type TarWhiteoutHandler interface { + Setxattr(path, name string, value []byte) error + Mknod(path string, mode uint32, dev int) error + Chown(path string, uid, gid int) error +} + +type TarWhiteoutConverter interface { + ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error) + ConvertRead(*tar.Header, string) (bool, error) + ConvertReadWithHandler(*tar.Header, string, TarWhiteoutHandler) (bool, error) +} + +type tarAppender struct { + TarWriter *tar.Writer + Buffer *bufio.Writer + + // for hardlink mapping + SeenFiles map[uint64]string + IDMappings *idtools.IDMappings + ChownOpts *idtools.IDPair + + // For packing and unpacking whiteout files in the + // non standard format. The whiteout files defined + // by the AUFS standard are used as the tar whiteout + // standard. + WhiteoutConverter TarWhiteoutConverter + // CopyPass indicates that the contents of any archive we're creating + // will instantly be extracted and written to disk, so we can deviate + // from the traditional behavior/format to get features like subsecond + // precision in timestamps. + CopyPass bool +} + +func newTarAppender(idMapping *idtools.IDMappings, writer io.Writer, chownOpts *idtools.IDPair) *tarAppender { + return &tarAppender{ + SeenFiles: make(map[uint64]string), + TarWriter: tar.NewWriter(writer), + Buffer: pools.BufioWriter32KPool.Get(nil), + IDMappings: idMapping, + ChownOpts: chownOpts, + } +} + +// canonicalTarName provides a platform-independent and consistent posix-style +//path for files and directories to be archived regardless of the platform. +func canonicalTarName(name string, isDir bool) (string, error) { + name, err := CanonicalTarNameForPath(name) + if err != nil { + return "", err + } + + // suffix with '/' for directories + if isDir && !strings.HasSuffix(name, "/") { + name += "/" + } + return name, nil +} + +// addTarFile adds to the tar archive a file from `path` as `name` +func (ta *tarAppender) addTarFile(path, name string) error { + fi, err := os.Lstat(path) + if err != nil { + return err + } + + var link string + if fi.Mode()&os.ModeSymlink != 0 { + var err error + link, err = os.Readlink(path) + if err != nil { + return err + } + } + if fi.Mode()&os.ModeSocket != 0 { + logrus.Warnf("archive: skipping %q since it is a socket", path) + return nil + } + + hdr, err := FileInfoHeader(name, fi, link) + if err != nil { + return err + } + if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil { + return err + } + if err := ReadUserXattrToTarHeader(path, hdr); err != nil { + return err + } + if ta.CopyPass { + copyPassHeader(hdr) + } + + // if it's not a directory and has more than 1 link, + // it's hard linked, so set the type flag accordingly + if !fi.IsDir() && hasHardlinks(fi) { + inode, err := getInodeFromStat(fi.Sys()) + if err != nil { + return err + } + // a link should have a name that it links too + // and that linked name should be first in the tar archive + if oldpath, ok := ta.SeenFiles[inode]; ok { + hdr.Typeflag = tar.TypeLink + hdr.Linkname = oldpath + hdr.Size = 0 // This Must be here for the writer math to add up! + } else { + ta.SeenFiles[inode] = name + } + } + + //handle re-mapping container ID mappings back to host ID mappings before + //writing tar headers/files. We skip whiteout files because they were written + //by the kernel and already have proper ownership relative to the host + if !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && !ta.IDMappings.Empty() { + fileIDPair, err := getFileUIDGID(fi.Sys()) + if err != nil { + return err + } + hdr.Uid, hdr.Gid, err = ta.IDMappings.ToContainer(fileIDPair) + if err != nil { + return err + } + } + + // explicitly override with ChownOpts + if ta.ChownOpts != nil { + hdr.Uid = ta.ChownOpts.UID + hdr.Gid = ta.ChownOpts.GID + } + + maybeTruncateHeaderModTime(hdr) + + if ta.WhiteoutConverter != nil { + wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi) + if err != nil { + return err + } + + // If a new whiteout file exists, write original hdr, then + // replace hdr with wo to be written after. Whiteouts should + // always be written after the original. Note the original + // hdr may have been updated to be a whiteout with returning + // a whiteout header + if wo != nil { + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + return err + } + if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { + return fmt.Errorf("tar: cannot use whiteout for non-empty file") + } + hdr = wo + } + } + + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + return err + } + + if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { + file, err := os.Open(path) + if err != nil { + return err + } + + ta.Buffer.Reset(ta.TarWriter) + defer ta.Buffer.Reset(nil) + _, err = io.Copy(ta.Buffer, file) + file.Close() + if err != nil { + return err + } + err = ta.Buffer.Flush() + if err != nil { + return err + } + } + + return nil +} + +func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.IDPair, inUserns, ignoreChownErrors bool, forceMask *os.FileMode, buffer []byte) error { + // hdr.Mode is in linux format, which we can use for sycalls, + // but for os.Foo() calls we need the mode converted to os.FileMode, + // so use hdrInfo.Mode() (they differ for e.g. setuid bits) + hdrInfo := hdr.FileInfo() + + mask := hdrInfo.Mode() + if forceMask != nil { + mask = *forceMask + } + + switch hdr.Typeflag { + case tar.TypeDir: + // Create directory unless it exists as a directory already. + // In that case we just want to merge the two + if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { + if err := os.Mkdir(path, mask); err != nil { + return err + } + } + + case tar.TypeReg, tar.TypeRegA: + // Source is regular file. We use system.OpenFileSequential to use sequential + // file access to avoid depleting the standby list on Windows. + // On Linux, this equates to a regular os.OpenFile + file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, mask) + if err != nil { + return err + } + if _, err := io.CopyBuffer(file, reader, buffer); err != nil { + file.Close() + return err + } + file.Close() + + case tar.TypeBlock, tar.TypeChar: + if inUserns { // cannot create devices in a userns + logrus.Debugf("Tar: Can't create device %v while running in user namespace", path) + return nil + } + fallthrough + case tar.TypeFifo: + // Handle this is an OS-specific way + if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { + return err + } + + case tar.TypeLink: + targetPath := filepath.Join(extractDir, hdr.Linkname) + // check for hardlink breakout + if !strings.HasPrefix(targetPath, extractDir) { + return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname)) + } + if err := os.Link(targetPath, path); err != nil { + return err + } + + case tar.TypeSymlink: + // path -> hdr.Linkname = targetPath + // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file + targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) + + // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because + // that symlink would first have to be created, which would be caught earlier, at this very check: + if !strings.HasPrefix(targetPath, extractDir) { + return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) + } + if err := os.Symlink(hdr.Linkname, path); err != nil { + return err + } + + case tar.TypeXGlobalHeader: + logrus.Debug("PAX Global Extended Headers found and ignored") + return nil + + default: + return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag) + } + + if forceMask != nil && hdr.Typeflag != tar.TypeSymlink { + value := fmt.Sprintf("%d:%d:0%o", hdr.Uid, hdr.Gid, hdrInfo.Mode()&07777) + if err := system.Lsetxattr(path, containersOverrideXattr, []byte(value), 0); err != nil { + return err + } + } + + // Lchown is not supported on Windows. + if Lchown && runtime.GOOS != windows { + if chownOpts == nil { + chownOpts = &idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid} + } + err := idtools.SafeLchown(path, chownOpts.UID, chownOpts.GID) + if err != nil { + if ignoreChownErrors { + fmt.Fprintf(os.Stderr, "Chown error detected. Ignoring due to ignoreChownErrors flag: %v\n", err) + } else { + return err + } + } + } + + // There is no LChmod, so ignore mode for symlink. Also, this + // must happen after chown, as that can modify the file mode + if err := handleLChmod(hdr, path, hdrInfo, forceMask); err != nil { + return err + } + + aTime := hdr.AccessTime + if aTime.Before(hdr.ModTime) { + // Last access time should never be before last modified time. + aTime = hdr.ModTime + } + + // system.Chtimes doesn't support a NOFOLLOW flag atm + if hdr.Typeflag == tar.TypeLink { + if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { + if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { + return err + } + } + } else if hdr.Typeflag != tar.TypeSymlink { + if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { + return err + } + } else { + ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)} + if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { + return err + } + } + + var errs []string + for key, value := range hdr.Xattrs { + if _, found := xattrsToIgnore[key]; found { + continue + } + if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { + if errors.Is(err, syscall.ENOTSUP) || (inUserns && errors.Is(err, syscall.EPERM)) { + // We ignore errors here because not all graphdrivers support + // xattrs *cough* old versions of AUFS *cough*. However only + // ENOTSUP should be emitted in that case, otherwise we still + // bail. We also ignore EPERM errors if we are running in a + // user namespace. + errs = append(errs, err.Error()) + continue + } + return err + } + + } + + if len(errs) > 0 { + logrus.WithFields(logrus.Fields{ + "errors": errs, + }).Warn("ignored xattrs in archive: underlying filesystem doesn't support them") + } + + return nil +} + +// Tar creates an archive from the directory at `path`, and returns it as a +// stream of bytes. +func Tar(path string, compression Compression) (io.ReadCloser, error) { + return TarWithOptions(path, &TarOptions{Compression: compression}) +} + +// TarWithOptions creates an archive from the directory at `path`, only including files whose relative +// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. +func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { + + // Fix the source path to work with long path names. This is a no-op + // on platforms other than Windows. + srcPath = fixVolumePathPrefix(srcPath) + + pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns) + if err != nil { + return nil, err + } + + pipeReader, pipeWriter := io.Pipe() + + compressWriter, err := CompressStream(pipeWriter, options.Compression) + if err != nil { + return nil, err + } + + go func() { + ta := newTarAppender( + idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps), + compressWriter, + options.ChownOpts, + ) + ta.WhiteoutConverter = GetWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData) + ta.CopyPass = options.CopyPass + + defer func() { + // Make sure to check the error on Close. + if err := ta.TarWriter.Close(); err != nil { + logrus.Errorf("Can't close tar writer: %s", err) + } + if err := compressWriter.Close(); err != nil { + logrus.Errorf("Can't close compress writer: %s", err) + } + if err := pipeWriter.Close(); err != nil { + logrus.Errorf("Can't close pipe writer: %s", err) + } + }() + + // this buffer is needed for the duration of this piped stream + defer pools.BufioWriter32KPool.Put(ta.Buffer) + + // In general we log errors here but ignore them because + // during e.g. a diff operation the container can continue + // mutating the filesystem and we can see transient errors + // from this + + stat, err := os.Lstat(srcPath) + if err != nil { + return + } + + if !stat.IsDir() { + // We can't later join a non-dir with any includes because the + // 'walk' will error if "file/." is stat-ed and "file" is not a + // directory. So, we must split the source path and use the + // basename as the include. + if len(options.IncludeFiles) > 0 { + logrus.Warn("Tar: Can't archive a file with includes") + } + + dir, base := SplitPathDirEntry(srcPath) + srcPath = dir + options.IncludeFiles = []string{base} + } + + if len(options.IncludeFiles) == 0 { + options.IncludeFiles = []string{"."} + } + + seen := make(map[string]bool) + + for _, include := range options.IncludeFiles { + rebaseName := options.RebaseNames[include] + + walkRoot := getWalkRoot(srcPath, include) + filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error { + if err != nil { + logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err) + return nil + } + + relFilePath, err := filepath.Rel(srcPath, filePath) + if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) { + // Error getting relative path OR we are looking + // at the source directory path. Skip in both situations. + return nil + } + + if options.IncludeSourceDir && include == "." && relFilePath != "." { + relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator)) + } + + skip := false + + // If "include" is an exact match for the current file + // then even if there's an "excludePatterns" pattern that + // matches it, don't skip it. IOW, assume an explicit 'include' + // is asking for that file no matter what - which is true + // for some files, like .dockerignore and Dockerfile (sometimes) + if include != relFilePath { + matches, err := pm.IsMatch(relFilePath) + if err != nil { + logrus.Errorf("Matching %s: %v", relFilePath, err) + return err + } + skip = matches + } + + if skip { + // If we want to skip this file and its a directory + // then we should first check to see if there's an + // excludes pattern (e.g. !dir/file) that starts with this + // dir. If so then we can't skip this dir. + + // Its not a dir then so we can just return/skip. + if !f.IsDir() { + return nil + } + + // No exceptions (!...) in patterns so just skip dir + if !pm.Exclusions() { + return filepath.SkipDir + } + + dirSlash := relFilePath + string(filepath.Separator) + + for _, pat := range pm.Patterns() { + if !pat.Exclusion() { + continue + } + if strings.HasPrefix(pat.String()+string(filepath.Separator), dirSlash) { + // found a match - so can't skip this dir + return nil + } + } + + // No matching exclusion dir so just skip dir + return filepath.SkipDir + } + + if seen[relFilePath] { + return nil + } + seen[relFilePath] = true + + // Rename the base resource. + if rebaseName != "" { + var replacement string + if rebaseName != string(filepath.Separator) { + // Special case the root directory to replace with an + // empty string instead so that we don't end up with + // double slashes in the paths. + replacement = rebaseName + } + + relFilePath = strings.Replace(relFilePath, include, replacement, 1) + } + + if err := ta.addTarFile(filePath, relFilePath); err != nil { + logrus.Errorf("Can't add file %s to tar: %s", filePath, err) + // if pipe is broken, stop writing tar stream to it + if err == io.ErrClosedPipe { + return err + } + } + return nil + }) + } + }() + + return pipeReader, nil +} + +// Unpack unpacks the decompressedArchive to dest with options. +func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { + tr := tar.NewReader(decompressedArchive) + trBuf := pools.BufioReader32KPool.Get(nil) + defer pools.BufioReader32KPool.Put(trBuf) + + var dirs []*tar.Header + idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) + rootIDs := idMappings.RootPair() + whiteoutConverter := GetWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData) + buffer := make([]byte, 1<<20) + + doChown := !options.NoLchown + if options.ForceMask != nil { + // if ForceMask is in place, make sure lchown is disabled. + doChown = false + uid, gid, mode, err := GetFileOwner(dest) + if err == nil { + value := fmt.Sprintf("%d:%d:0%o", uid, gid, mode) + if err := system.Lsetxattr(dest, containersOverrideXattr, []byte(value), 0); err != nil { + return err + } + } + } + + // Iterate through the files in the archive. +loop: + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + return err + } + + // Normalize name, for safety and for a simple is-root check + // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows: + // This keeps "..\" as-is, but normalizes "\..\" to "\". + hdr.Name = filepath.Clean(hdr.Name) + + for _, exclude := range options.ExcludePatterns { + if strings.HasPrefix(hdr.Name, exclude) { + continue loop + } + } + + // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in + // the filepath format for the OS on which the daemon is running. Hence + // the check for a slash-suffix MUST be done in an OS-agnostic way. + if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { + // Not the root directory, ensure that the parent directory exists + parent := filepath.Dir(hdr.Name) + parentPath := filepath.Join(dest, parent) + if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { + err = idtools.MkdirAllAndChownNew(parentPath, 0777, rootIDs) + if err != nil { + return err + } + } + } + + path := filepath.Join(dest, hdr.Name) + rel, err := filepath.Rel(dest, path) + if err != nil { + return err + } + if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { + return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) + } + + // If path exits we almost always just want to remove and replace it + // The only exception is when it is a directory *and* the file from + // the layer is also a directory. Then we want to merge them (i.e. + // just apply the metadata from the layer). + if fi, err := os.Lstat(path); err == nil { + if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir { + // If NoOverwriteDirNonDir is true then we cannot replace + // an existing directory with a non-directory from the archive. + return overwriteError(fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest)) + } + + if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir { + // If NoOverwriteDirNonDir is true then we cannot replace + // an existing non-directory with a directory from the archive. + return overwriteError(fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest)) + } + + if fi.IsDir() && hdr.Name == "." { + continue + } + + if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { + if err := os.RemoveAll(path); err != nil { + return err + } + } + } + trBuf.Reset(tr) + + chownOpts := options.ChownOpts + if err := remapIDs(nil, idMappings, chownOpts, hdr); err != nil { + return err + } + + if whiteoutConverter != nil { + writeFile, err := whiteoutConverter.ConvertRead(hdr, path) + if err != nil { + return err + } + if !writeFile { + continue + } + } + + if chownOpts != nil { + chownOpts = &idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid} + } + + if err = createTarFile(path, dest, hdr, trBuf, doChown, chownOpts, options.InUserNS, options.IgnoreChownErrors, options.ForceMask, buffer); err != nil { + return err + } + + // Directory mtimes must be handled at the end to avoid further + // file creation in them to modify the directory mtime + if hdr.Typeflag == tar.TypeDir { + dirs = append(dirs, hdr) + } + } + + for _, hdr := range dirs { + path := filepath.Join(dest, hdr.Name) + + if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { + return err + } + } + return nil +} + +// Untar reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive may be compressed with one of the following algorithms: +// identity (uncompressed), gzip, bzip2, xz. +// FIXME: specify behavior when target path exists vs. doesn't exist. +func Untar(tarArchive io.Reader, dest string, options *TarOptions) error { + return untarHandler(tarArchive, dest, options, true) +} + +// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive must be an uncompressed stream. +func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error { + return untarHandler(tarArchive, dest, options, false) +} + +// Handler for teasing out the automatic decompression +func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error { + if tarArchive == nil { + return fmt.Errorf("Empty archive") + } + dest = filepath.Clean(dest) + if options == nil { + options = &TarOptions{} + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + + r := tarArchive + if decompress { + decompressedArchive, err := DecompressStream(tarArchive) + if err != nil { + return err + } + defer decompressedArchive.Close() + r = decompressedArchive + } + + return Unpack(r, dest, options) +} + +// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. +// If either Tar or Untar fails, TarUntar aborts and returns the error. +func (archiver *Archiver) TarUntar(src, dst string) error { + logrus.Debugf("TarUntar(%s %s)", src, dst) + tarMappings := archiver.TarIDMappings + if tarMappings == nil { + tarMappings = &idtools.IDMappings{} + } + options := &TarOptions{ + UIDMaps: tarMappings.UIDs(), + GIDMaps: tarMappings.GIDs(), + Compression: Uncompressed, + CopyPass: true, + InUserNS: userns.RunningInUserNS(), + } + archive, err := TarWithOptions(src, options) + if err != nil { + return err + } + defer archive.Close() + untarMappings := archiver.UntarIDMappings + if untarMappings == nil { + untarMappings = &idtools.IDMappings{} + } + options = &TarOptions{ + UIDMaps: untarMappings.UIDs(), + GIDMaps: untarMappings.GIDs(), + ChownOpts: archiver.ChownOpts, + InUserNS: userns.RunningInUserNS(), + } + return archiver.Untar(archive, dst, options) +} + +// UntarPath untar a file from path to a destination, src is the source tar file path. +func (archiver *Archiver) UntarPath(src, dst string) error { + archive, err := os.Open(src) + if err != nil { + return err + } + defer archive.Close() + untarMappings := archiver.UntarIDMappings + if untarMappings == nil { + untarMappings = &idtools.IDMappings{} + } + options := &TarOptions{ + UIDMaps: untarMappings.UIDs(), + GIDMaps: untarMappings.GIDs(), + ChownOpts: archiver.ChownOpts, + InUserNS: userns.RunningInUserNS(), + } + return archiver.Untar(archive, dst, options) +} + +// CopyWithTar creates a tar archive of filesystem path `src`, and +// unpacks it at filesystem path `dst`. +// The archive is streamed directly with fixed buffering and no +// intermediary disk IO. +func (archiver *Archiver) CopyWithTar(src, dst string) error { + srcSt, err := os.Stat(src) + if err != nil { + return err + } + if !srcSt.IsDir() { + return archiver.CopyFileWithTar(src, dst) + } + + // if this archiver is set up with ID mapping we need to create + // the new destination directory with the remapped root UID/GID pair + // as owner + rootIDs := archiver.UntarIDMappings.RootPair() + if archiver.ChownOpts != nil { + rootIDs = *archiver.ChownOpts + } + // Create dst, copy src's content into it + logrus.Debugf("Creating dest directory: %s", dst) + if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil { + return err + } + logrus.Debugf("Calling TarUntar(%s, %s)", src, dst) + return archiver.TarUntar(src, dst) +} + +// CopyFileWithTar emulates the behavior of the 'cp' command-line +// for a single file. It copies a regular file from path `src` to +// path `dst`, and preserves all its metadata. +func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { + logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst) + srcSt, err := os.Stat(src) + if err != nil { + return err + } + + if srcSt.IsDir() { + return fmt.Errorf("Can't copy a directory") + } + + // Clean up the trailing slash. This must be done in an operating + // system specific manner. + if dst[len(dst)-1] == os.PathSeparator { + dst = filepath.Join(dst, filepath.Base(src)) + } + // Create the holding directory if necessary + if err := os.MkdirAll(filepath.Dir(dst), 0700); err != nil { + return err + } + + r, w := io.Pipe() + errC := promise.Go(func() error { + defer w.Close() + + srcF, err := os.Open(src) + if err != nil { + return err + } + defer srcF.Close() + + hdr, err := tar.FileInfoHeader(srcSt, "") + if err != nil { + return err + } + hdr.Name = filepath.Base(dst) + hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) + copyPassHeader(hdr) + + if err := remapIDs(archiver.TarIDMappings, nil, archiver.ChownOpts, hdr); err != nil { + return err + } + + tw := tar.NewWriter(w) + defer tw.Close() + if err := tw.WriteHeader(hdr); err != nil { + return err + } + if _, err := io.Copy(tw, srcF); err != nil { + return err + } + return nil + }) + defer func() { + if er := <-errC; err == nil && er != nil { + err = er + } + }() + + options := &TarOptions{ + UIDMaps: archiver.UntarIDMappings.UIDs(), + GIDMaps: archiver.UntarIDMappings.GIDs(), + ChownOpts: archiver.ChownOpts, + InUserNS: userns.RunningInUserNS(), + NoOverwriteDirNonDir: true, + } + err = archiver.Untar(r, filepath.Dir(dst), options) + if err != nil { + r.CloseWithError(err) + } + return err +} + +func remapIDs(readIDMappings, writeIDMappings *idtools.IDMappings, chownOpts *idtools.IDPair, hdr *tar.Header) (err error) { + var uid, gid int + if chownOpts != nil { + uid, gid = chownOpts.UID, chownOpts.GID + } else { + if readIDMappings != nil && !readIDMappings.Empty() { + uid, gid, err = readIDMappings.ToContainer(idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid}) + if err != nil { + return err + } + } else { + uid, gid = hdr.Uid, hdr.Gid + } + } + ids := idtools.IDPair{UID: uid, GID: gid} + if writeIDMappings != nil && !writeIDMappings.Empty() { + ids, err = writeIDMappings.ToHost(ids) + if err != nil { + return err + } + } + hdr.Uid, hdr.Gid = ids.UID, ids.GID + return nil +} + +// NewTempArchive reads the content of src into a temporary file, and returns the contents +// of that file as an archive. The archive can only be read once - as soon as reading completes, +// the file will be deleted. +func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) { + f, err := ioutil.TempFile(dir, "") + if err != nil { + return nil, err + } + if _, err := io.Copy(f, src); err != nil { + return nil, err + } + if _, err := f.Seek(0, 0); err != nil { + return nil, err + } + st, err := f.Stat() + if err != nil { + return nil, err + } + size := st.Size() + return &TempArchive{File: f, Size: size}, nil +} + +// TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes, +// the file will be deleted. +type TempArchive struct { + *os.File + Size int64 // Pre-computed from Stat().Size() as a convenience + read int64 + closed bool +} + +// Close closes the underlying file if it's still open, or does a no-op +// to allow callers to try to close the TempArchive multiple times safely. +func (archive *TempArchive) Close() error { + if archive.closed { + return nil + } + + archive.closed = true + + return archive.File.Close() +} + +func (archive *TempArchive) Read(data []byte) (int, error) { + n, err := archive.File.Read(data) + archive.read += int64(n) + if err != nil || archive.read == archive.Size { + archive.Close() + os.Remove(archive.File.Name()) + } + return n, err +} + +// IsArchive checks for the magic bytes of a tar or any supported compression +// algorithm. +func IsArchive(header []byte) bool { + compression := DetectCompression(header) + if compression != Uncompressed { + return true + } + r := tar.NewReader(bytes.NewBuffer(header)) + _, err := r.Next() + return err == nil +} + +// UntarPath is a convenience function which looks for an archive +// at filesystem path `src`, and unpacks it at `dst`. +func UntarPath(src, dst string) error { + return NewDefaultArchiver().UntarPath(src, dst) +} + +const ( + // HeaderSize is the size in bytes of a tar header + HeaderSize = 512 +) + +// NewArchiver returns a new Archiver +func NewArchiver(idMappings *idtools.IDMappings) *Archiver { + if idMappings == nil { + idMappings = &idtools.IDMappings{} + } + return &Archiver{Untar: Untar, TarIDMappings: idMappings, UntarIDMappings: idMappings} +} + +// NewArchiverWithChown returns a new Archiver which uses Untar and the provided ID mapping configuration on both ends +func NewArchiverWithChown(tarIDMappings *idtools.IDMappings, chownOpts *idtools.IDPair, untarIDMappings *idtools.IDMappings) *Archiver { + if tarIDMappings == nil { + tarIDMappings = &idtools.IDMappings{} + } + if untarIDMappings == nil { + untarIDMappings = &idtools.IDMappings{} + } + return &Archiver{Untar: Untar, TarIDMappings: tarIDMappings, ChownOpts: chownOpts, UntarIDMappings: untarIDMappings} +} + +// CopyFileWithTarAndChown returns a function which copies a single file from outside +// of any container into our working container, mapping permissions using the +// container's ID maps, possibly overridden using the passed-in chownOpts +func CopyFileWithTarAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap []idtools.IDMap, gidmap []idtools.IDMap) func(src, dest string) error { + untarMappings := idtools.NewIDMappingsFromMaps(uidmap, gidmap) + archiver := NewArchiverWithChown(nil, chownOpts, untarMappings) + if hasher != nil { + originalUntar := archiver.Untar + archiver.Untar = func(tarArchive io.Reader, dest string, options *TarOptions) error { + contentReader, contentWriter, err := os.Pipe() + if err != nil { + return errors.Wrapf(err, "error creating pipe extract data to %q", dest) + } + defer contentReader.Close() + defer contentWriter.Close() + var hashError error + var hashWorker sync.WaitGroup + hashWorker.Add(1) + go func() { + t := tar.NewReader(contentReader) + _, err := t.Next() + if err != nil { + hashError = err + } + if _, err = io.Copy(hasher, t); err != nil && err != io.EOF { + hashError = err + } + hashWorker.Done() + }() + if err = originalUntar(io.TeeReader(tarArchive, contentWriter), dest, options); err != nil { + err = errors.Wrapf(err, "error extracting data to %q while copying", dest) + } + hashWorker.Wait() + if err == nil { + err = errors.Wrapf(hashError, "error calculating digest of data for %q while copying", dest) + } + return err + } + } + return archiver.CopyFileWithTar +} + +// CopyWithTarAndChown returns a function which copies a directory tree from outside of +// any container into our working container, mapping permissions using the +// container's ID maps, possibly overridden using the passed-in chownOpts +func CopyWithTarAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap []idtools.IDMap, gidmap []idtools.IDMap) func(src, dest string) error { + untarMappings := idtools.NewIDMappingsFromMaps(uidmap, gidmap) + archiver := NewArchiverWithChown(nil, chownOpts, untarMappings) + if hasher != nil { + originalUntar := archiver.Untar + archiver.Untar = func(tarArchive io.Reader, dest string, options *TarOptions) error { + return originalUntar(io.TeeReader(tarArchive, hasher), dest, options) + } + } + return archiver.CopyWithTar +} + +// UntarPathAndChown returns a function which extracts an archive in a specified +// location into our working container, mapping permissions using the +// container's ID maps, possibly overridden using the passed-in chownOpts +func UntarPathAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap []idtools.IDMap, gidmap []idtools.IDMap) func(src, dest string) error { + untarMappings := idtools.NewIDMappingsFromMaps(uidmap, gidmap) + archiver := NewArchiverWithChown(nil, chownOpts, untarMappings) + if hasher != nil { + originalUntar := archiver.Untar + archiver.Untar = func(tarArchive io.Reader, dest string, options *TarOptions) error { + return originalUntar(io.TeeReader(tarArchive, hasher), dest, options) + } + } + return archiver.UntarPath +} + +// TarPath returns a function which creates an archive of a specified +// location in the container's filesystem, mapping permissions using the +// container's ID maps +func TarPath(uidmap []idtools.IDMap, gidmap []idtools.IDMap) func(path string) (io.ReadCloser, error) { + tarMappings := idtools.NewIDMappingsFromMaps(uidmap, gidmap) + return func(path string) (io.ReadCloser, error) { + return TarWithOptions(path, &TarOptions{ + Compression: Uncompressed, + UIDMaps: tarMappings.UIDs(), + GIDMaps: tarMappings.GIDs(), + }) + } +} + +// GetOverlayXattrName returns the xattr used by the overlay driver with the +// given name. +// It uses the trusted.overlay prefix when running as root, and user.overlay +// in rootless mode. +func GetOverlayXattrName(name string) string { + if unshare.IsRootless() { + return fmt.Sprintf("user.overlay.%s", name) + } + return fmt.Sprintf("trusted.overlay.%s", name) +} diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_110.go b/vendor/github.com/containers/storage/pkg/archive/archive_110.go new file mode 100644 index 00000000000..7bc44a5665e --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/archive_110.go @@ -0,0 +1,22 @@ +// +build go1.10 + +package archive + +import ( + "archive/tar" + "time" +) + +func copyPassHeader(hdr *tar.Header) { + hdr.Format = tar.FormatPAX +} + +func maybeTruncateHeaderModTime(hdr *tar.Header) { + if hdr.Format == tar.FormatUnknown { + // one of the first things archive/tar does is round this + // value, possibly up, if the format isn't specified, while we + // are much better equipped to handle truncation when scanning + // for changes between source and an extracted copy of this + hdr.ModTime = hdr.ModTime.Truncate(time.Second) + } +} diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_19.go b/vendor/github.com/containers/storage/pkg/archive/archive_19.go new file mode 100644 index 00000000000..d19811fdbca --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/archive_19.go @@ -0,0 +1,13 @@ +// +build !go1.10 + +package archive + +import ( + "archive/tar" +) + +func copyPassHeader(hdr *tar.Header) { +} + +func maybeTruncateHeaderModTime(hdr *tar.Header) { +} diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_freebsd.go b/vendor/github.com/containers/storage/pkg/archive/archive_freebsd.go new file mode 100644 index 00000000000..7c307ffcfe5 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/archive_freebsd.go @@ -0,0 +1,125 @@ +// +build freebsd + +package archive + +import ( + "archive/tar" + "errors" + "os" + "path/filepath" + "syscall" + + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/system" + "github.com/opencontainers/runc/libcontainer/userns" + "golang.org/x/sys/unix" +) + +// fixVolumePathPrefix does platform specific processing to ensure that if +// the path being passed in is not in a volume path format, convert it to one. +func fixVolumePathPrefix(srcPath string) string { + return srcPath +} + +// getWalkRoot calculates the root path when performing a TarWithOptions. +// We use a separate function as this is platform specific. On Linux, we +// can't use filepath.Join(srcPath,include) because this will clean away +// a trailing "." or "/" which may be important. +func getWalkRoot(srcPath string, include string) string { + return srcPath + string(filepath.Separator) + include +} + +// CanonicalTarNameForPath returns platform-specific filepath +// to canonical posix-style path for tar archival. p is relative +// path. +func CanonicalTarNameForPath(p string) (string, error) { + return p, nil // already unix-style +} + +// chmodTarEntry is used to adjust the file permissions used in tar header based +// on the platform the archival is done. +func chmodTarEntry(perm os.FileMode) os.FileMode { + return perm // noop for unix as golang APIs provide perm bits correctly +} + +func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) { + s, ok := stat.(*syscall.Stat_t) + + if ok { + // Currently go does not fill in the major/minors + if s.Mode&unix.S_IFBLK != 0 || + s.Mode&unix.S_IFCHR != 0 { + hdr.Devmajor = int64(major(uint64(s.Rdev))) // nolint: unconvert + hdr.Devminor = int64(minor(uint64(s.Rdev))) // nolint: unconvert + } + } + + return +} + +func getInodeFromStat(stat interface{}) (inode uint64, err error) { + s, ok := stat.(*syscall.Stat_t) + + if ok { + inode = s.Ino + } + + return +} + +func getFileUIDGID(stat interface{}) (idtools.IDPair, error) { + s, ok := stat.(*syscall.Stat_t) + + if !ok { + return idtools.IDPair{}, errors.New("cannot convert stat value to syscall.Stat_t") + } + return idtools.IDPair{UID: int(s.Uid), GID: int(s.Gid)}, nil +} + +func major(device uint64) uint64 { + return (device >> 8) & 0xfff +} + +func minor(device uint64) uint64 { + return (device & 0xff) | ((device >> 12) & 0xfff00) +} + +// handleTarTypeBlockCharFifo is an OS-specific helper function used by +// createTarFile to handle the following types of header: Block; Char; Fifo +func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { + if userns.RunningInUserNS() { + // cannot create a device if running in user namespace + return nil + } + + mode := uint32(hdr.Mode & 07777) + switch hdr.Typeflag { + case tar.TypeBlock: + mode |= unix.S_IFBLK + case tar.TypeChar: + mode |= unix.S_IFCHR + case tar.TypeFifo: + mode |= unix.S_IFIFO + } + + return system.Mknod(path, mode, uint64(system.Mkdev(hdr.Devmajor, hdr.Devminor))) +} + +func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo, forceMask *os.FileMode) error { + permissionsMask := hdrInfo.Mode() + if forceMask != nil { + permissionsMask = *forceMask + } + if hdr.Typeflag == tar.TypeLink { + if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { + if err := os.Chmod(path, permissionsMask); err != nil { + return err + } + } + } else if hdr.Typeflag != tar.TypeSymlink { + if err := os.Chmod(path, permissionsMask); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_linux.go b/vendor/github.com/containers/storage/pkg/archive/archive_linux.go new file mode 100644 index 00000000000..2f548b661ce --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/archive_linux.go @@ -0,0 +1,191 @@ +package archive + +import ( + "archive/tar" + "os" + "path/filepath" + "strings" + "syscall" + + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/system" + "golang.org/x/sys/unix" +) + +func getOverlayOpaqueXattrName() string { + return GetOverlayXattrName("opaque") +} + +func GetWhiteoutConverter(format WhiteoutFormat, data interface{}) TarWhiteoutConverter { + if format == OverlayWhiteoutFormat { + if rolayers, ok := data.([]string); ok && len(rolayers) > 0 { + return overlayWhiteoutConverter{rolayers: rolayers} + } + return overlayWhiteoutConverter{rolayers: nil} + } + return nil +} + +type overlayWhiteoutConverter struct { + rolayers []string +} + +func (o overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) (wo *tar.Header, err error) { + // convert whiteouts to AUFS format + if fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 { + // we just rename the file and make it normal + dir, filename := filepath.Split(hdr.Name) + hdr.Name = filepath.Join(dir, WhiteoutPrefix+filename) + hdr.Mode = 0600 + hdr.Typeflag = tar.TypeReg + hdr.Size = 0 + } + + if fi.Mode()&os.ModeDir != 0 { + // convert opaque dirs to AUFS format by writing an empty file with the whiteout prefix + opaque, err := system.Lgetxattr(path, getOverlayOpaqueXattrName()) + if err != nil { + return nil, err + } + if len(opaque) == 1 && opaque[0] == 'y' { + if hdr.Xattrs != nil { + delete(hdr.Xattrs, getOverlayOpaqueXattrName()) + } + // If there are no lower layers, then it can't have been deleted in this layer. + if len(o.rolayers) == 0 { + return nil, nil + } + // At this point, we have a directory that's opaque. If it appears in one of the lower + // layers, then it was newly-created here, so it wasn't also deleted here. + for _, rolayer := range o.rolayers { + stat, statErr := os.Stat(filepath.Join(rolayer, hdr.Name)) + if statErr != nil && !os.IsNotExist(statErr) && !isENOTDIR(statErr) { + // Not sure what happened here. + return nil, statErr + } + if statErr == nil { + if stat.Mode()&os.ModeCharDevice != 0 { + if isWhiteOut(stat) { + return nil, nil + } + } + // It's not whiteout, so it was there in the older layer, so we need to + // add a whiteout for this item in this layer. + // create a header for the whiteout file + // it should inherit some properties from the parent, but be a regular file + wo = &tar.Header{ + Typeflag: tar.TypeReg, + Mode: hdr.Mode & int64(os.ModePerm), + Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir), + Size: 0, + Uid: hdr.Uid, + Uname: hdr.Uname, + Gid: hdr.Gid, + Gname: hdr.Gname, + AccessTime: hdr.AccessTime, + ChangeTime: hdr.ChangeTime, + } + break + } + for dir := filepath.Dir(hdr.Name); dir != "" && dir != "." && dir != string(os.PathSeparator); dir = filepath.Dir(dir) { + // Check for whiteout for a parent directory in a parent layer. + stat, statErr := os.Stat(filepath.Join(rolayer, dir)) + if statErr != nil && !os.IsNotExist(statErr) && !isENOTDIR(statErr) { + // Not sure what happened here. + return nil, statErr + } + if statErr == nil { + if stat.Mode()&os.ModeCharDevice != 0 { + // If it's whiteout for a parent directory, then the + // original directory wasn't inherited into this layer, + // so we don't need to emit whiteout for it. + if isWhiteOut(stat) { + return nil, nil + } + } + } + } + } + } + } + + return +} + +func (overlayWhiteoutConverter) ConvertReadWithHandler(hdr *tar.Header, path string, handler TarWhiteoutHandler) (bool, error) { + base := filepath.Base(path) + dir := filepath.Dir(path) + + // if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay + if base == WhiteoutOpaqueDir { + err := handler.Setxattr(dir, getOverlayOpaqueXattrName(), []byte{'y'}) + // don't write the file itself + return false, err + } + + // if a file was deleted and we are using overlay, we need to create a character device + if strings.HasPrefix(base, WhiteoutPrefix) { + originalBase := base[len(WhiteoutPrefix):] + originalPath := filepath.Join(dir, originalBase) + + if err := handler.Mknod(originalPath, unix.S_IFCHR, 0); err != nil { + // If someone does: + // rm -rf /foo/bar + // in an image, some tools will generate a layer with: + // /.wh.foo + // /foo/.wh.bar + // and when doing the second mknod(), we will fail with + // ENOTDIR, since the previous /foo was mknod()'d as a + // character device node and not a directory. + if isENOTDIR(err) { + return false, nil + } + return false, err + } + if err := handler.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil { + return false, err + } + + // don't write the file itself + return false, nil + } + + return true, nil +} + +type directHandler struct { +} + +func (d directHandler) Setxattr(path, name string, value []byte) error { + return unix.Setxattr(path, name, value, 0) +} + +func (d directHandler) Mknod(path string, mode uint32, dev int) error { + return unix.Mknod(path, mode, dev) +} + +func (d directHandler) Chown(path string, uid, gid int) error { + return idtools.SafeChown(path, uid, gid) +} + +func (o overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) { + var handler directHandler + return o.ConvertReadWithHandler(hdr, path, handler) +} + +func isWhiteOut(stat os.FileInfo) bool { + s := stat.Sys().(*syscall.Stat_t) + return major(uint64(s.Rdev)) == 0 && minor(uint64(s.Rdev)) == 0 +} + +func GetFileOwner(path string) (uint32, uint32, uint32, error) { + f, err := os.Stat(path) + if err != nil { + return 0, 0, 0, err + } + s, ok := f.Sys().(*syscall.Stat_t) + if ok { + return s.Uid, s.Gid, s.Mode & 07777, nil + } + return 0, 0, uint32(f.Mode()), nil +} diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_other.go b/vendor/github.com/containers/storage/pkg/archive/archive_other.go new file mode 100644 index 00000000000..4b8834444f0 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/archive_other.go @@ -0,0 +1,11 @@ +// +build !linux + +package archive + +func GetWhiteoutConverter(format WhiteoutFormat, data interface{}) TarWhiteoutConverter { + return nil +} + +func GetFileOwner(path string) (uint32, uint32, uint32, error) { + return 0, 0, 0, nil +} diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_unix.go b/vendor/github.com/containers/storage/pkg/archive/archive_unix.go new file mode 100644 index 00000000000..7c3e442dad9 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/archive_unix.go @@ -0,0 +1,120 @@ +// +build !windows,!freebsd + +package archive + +import ( + "archive/tar" + "errors" + "os" + "path/filepath" + "syscall" + + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/system" + "golang.org/x/sys/unix" +) + +// fixVolumePathPrefix does platform specific processing to ensure that if +// the path being passed in is not in a volume path format, convert it to one. +func fixVolumePathPrefix(srcPath string) string { + return srcPath +} + +// getWalkRoot calculates the root path when performing a TarWithOptions. +// We use a separate function as this is platform specific. On Linux, we +// can't use filepath.Join(srcPath,include) because this will clean away +// a trailing "." or "/" which may be important. +func getWalkRoot(srcPath string, include string) string { + return srcPath + string(filepath.Separator) + include +} + +// CanonicalTarNameForPath returns platform-specific filepath +// to canonical posix-style path for tar archival. p is relative +// path. +func CanonicalTarNameForPath(p string) (string, error) { + return p, nil // already unix-style +} + +// chmodTarEntry is used to adjust the file permissions used in tar header based +// on the platform the archival is done. + +func chmodTarEntry(perm os.FileMode) os.FileMode { + return perm // noop for unix as golang APIs provide perm bits correctly +} + +func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) { + s, ok := stat.(*syscall.Stat_t) + + if ok { + // Currently go does not fill in the major/minors + if s.Mode&unix.S_IFBLK != 0 || + s.Mode&unix.S_IFCHR != 0 { + hdr.Devmajor = int64(major(uint64(s.Rdev))) // nolint: unconvert + hdr.Devminor = int64(minor(uint64(s.Rdev))) // nolint: unconvert + } + } + + return +} + +func getInodeFromStat(stat interface{}) (inode uint64, err error) { + s, ok := stat.(*syscall.Stat_t) + + if ok { + inode = s.Ino + } + + return +} + +func getFileUIDGID(stat interface{}) (idtools.IDPair, error) { + s, ok := stat.(*syscall.Stat_t) + + if !ok { + return idtools.IDPair{}, errors.New("cannot convert stat value to syscall.Stat_t") + } + return idtools.IDPair{UID: int(s.Uid), GID: int(s.Gid)}, nil +} + +func major(device uint64) uint64 { + return (device >> 8) & 0xfff +} + +func minor(device uint64) uint64 { + return (device & 0xff) | ((device >> 12) & 0xfff00) +} + +// handleTarTypeBlockCharFifo is an OS-specific helper function used by +// createTarFile to handle the following types of header: Block; Char; Fifo +func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { + mode := uint32(hdr.Mode & 07777) + switch hdr.Typeflag { + case tar.TypeBlock: + mode |= unix.S_IFBLK + case tar.TypeChar: + mode |= unix.S_IFCHR + case tar.TypeFifo: + mode |= unix.S_IFIFO + } + + return system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))) +} + +func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo, forceMask *os.FileMode) error { + permissionsMask := hdrInfo.Mode() + if forceMask != nil { + permissionsMask = *forceMask + } + if hdr.Typeflag == tar.TypeLink { + if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { + if err := os.Chmod(path, permissionsMask); err != nil { + return err + } + } + } else if hdr.Typeflag != tar.TypeSymlink { + if err := os.Chmod(path, permissionsMask); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_windows.go b/vendor/github.com/containers/storage/pkg/archive/archive_windows.go new file mode 100644 index 00000000000..a0872444f32 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/archive_windows.go @@ -0,0 +1,79 @@ +// +build windows + +package archive + +import ( + "archive/tar" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/longpath" +) + +// fixVolumePathPrefix does platform specific processing to ensure that if +// the path being passed in is not in a volume path format, convert it to one. +func fixVolumePathPrefix(srcPath string) string { + return longpath.AddPrefix(srcPath) +} + +// getWalkRoot calculates the root path when performing a TarWithOptions. +// We use a separate function as this is platform specific. +func getWalkRoot(srcPath string, include string) string { + return filepath.Join(srcPath, include) +} + +// CanonicalTarNameForPath returns platform-specific filepath +// to canonical posix-style path for tar archival. p is relative +// path. +func CanonicalTarNameForPath(p string) (string, error) { + // windows: convert windows style relative path with backslashes + // into forward slashes. Since windows does not allow '/' or '\' + // in file names, it is mostly safe to replace however we must + // check just in case + if strings.Contains(p, "/") { + return "", fmt.Errorf("Windows path contains forward slash: %s", p) + } + return strings.Replace(p, string(os.PathSeparator), "/", -1), nil + +} + +// chmodTarEntry is used to adjust the file permissions used in tar header based +// on the platform the archival is done. +func chmodTarEntry(perm os.FileMode) os.FileMode { + //perm &= 0755 // this 0-ed out tar flags (like link, regular file, directory marker etc.) + permPart := perm & os.ModePerm + noPermPart := perm &^ os.ModePerm + // Add the x bit: make everything +x from windows + permPart |= 0111 + permPart &= 0755 + + return noPermPart | permPart +} + +func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) { + // do nothing. no notion of Rdev, Nlink in stat on Windows + return +} + +func getInodeFromStat(stat interface{}) (inode uint64, err error) { + // do nothing. no notion of Inode in stat on Windows + return +} + +// handleTarTypeBlockCharFifo is an OS-specific helper function used by +// createTarFile to handle the following types of header: Block; Char; Fifo +func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { + return nil +} + +func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo, forceMask *os.FileMode) error { + return nil +} + +func getFileUIDGID(stat interface{}) (idtools.IDPair, error) { + // no notion of file ownership mapping yet on Windows + return idtools.IDPair{0, 0}, nil +} diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_zstd.go b/vendor/github.com/containers/storage/pkg/archive/archive_zstd.go new file mode 100644 index 00000000000..36b7118aa85 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/archive_zstd.go @@ -0,0 +1,41 @@ +package archive + +import ( + "io" + + "github.com/klauspost/compress/zstd" +) + +type wrapperZstdDecoder struct { + decoder *zstd.Decoder +} + +func (w *wrapperZstdDecoder) Close() error { + w.decoder.Close() + return nil +} + +func (w *wrapperZstdDecoder) DecodeAll(input, dst []byte) ([]byte, error) { + return w.decoder.DecodeAll(input, dst) +} + +func (w *wrapperZstdDecoder) Read(p []byte) (int, error) { + return w.decoder.Read(p) +} + +func (w *wrapperZstdDecoder) Reset(r io.Reader) error { + return w.decoder.Reset(r) +} + +func (w *wrapperZstdDecoder) WriteTo(wr io.Writer) (int64, error) { + return w.decoder.WriteTo(wr) +} + +func zstdReader(buf io.Reader) (io.ReadCloser, error) { + decoder, err := zstd.NewReader(buf) + return &wrapperZstdDecoder{decoder: decoder}, err +} + +func zstdWriter(dest io.Writer) (io.WriteCloser, error) { + return zstd.NewWriter(dest) +} diff --git a/vendor/github.com/containers/storage/pkg/archive/changes.go b/vendor/github.com/containers/storage/pkg/archive/changes.go new file mode 100644 index 00000000000..c7bb25d0f17 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/changes.go @@ -0,0 +1,500 @@ +package archive + +import ( + "archive/tar" + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "sort" + "strings" + "syscall" + "time" + + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/pools" + "github.com/containers/storage/pkg/system" + "github.com/sirupsen/logrus" +) + +// ChangeType represents the change type. +type ChangeType int + +const ( + // ChangeModify represents the modify operation. + ChangeModify = iota + // ChangeAdd represents the add operation. + ChangeAdd + // ChangeDelete represents the delete operation. + ChangeDelete +) + +func (c ChangeType) String() string { + switch c { + case ChangeModify: + return "C" + case ChangeAdd: + return "A" + case ChangeDelete: + return "D" + } + return "" +} + +// Change represents a change, it wraps the change type and path. +// It describes changes of the files in the path respect to the +// parent layers. The change could be modify, add, delete. +// This is used for layer diff. +type Change struct { + Path string + Kind ChangeType +} + +func (change *Change) String() string { + return fmt.Sprintf("%s %s", change.Kind, change.Path) +} + +// for sort.Sort +type changesByPath []Change + +func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path } +func (c changesByPath) Len() int { return len(c) } +func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] } + +// Gnu tar and the go tar writer don't have sub-second mtime +// precision, which is problematic when we apply changes via tar +// files, we handle this by comparing for exact times, *or* same +// second count and either a or b having exactly 0 nanoseconds +func sameFsTime(a, b time.Time) bool { + return a == b || + (a.Unix() == b.Unix() && + (a.Nanosecond() == 0 || b.Nanosecond() == 0)) +} + +func sameFsTimeSpec(a, b syscall.Timespec) bool { + return a.Sec == b.Sec && + (a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0) +} + +// Changes walks the path rw and determines changes for the files in the path, +// with respect to the parent layers +func Changes(layers []string, rw string) ([]Change, error) { + return changes(layers, rw, aufsDeletedFile, aufsMetadataSkip, aufsWhiteoutPresent) +} + +func aufsMetadataSkip(path string) (skip bool, err error) { + skip, err = filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path) + if err != nil { + skip = true + } + return +} + +func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) { + f := filepath.Base(path) + + // If there is a whiteout, then the file was removed + if strings.HasPrefix(f, WhiteoutPrefix) { + originalFile := f[len(WhiteoutPrefix):] + return filepath.Join(filepath.Dir(path), originalFile), nil + } + + return "", nil +} + +func aufsWhiteoutPresent(root, path string) (bool, error) { + f := filepath.Join(filepath.Dir(path), WhiteoutPrefix+filepath.Base(path)) + _, err := os.Stat(filepath.Join(root, f)) + if err == nil { + return true, nil + } + if os.IsNotExist(err) || isENOTDIR(err) { + return false, nil + } + return false, err +} + +func isENOTDIR(err error) bool { + if err == nil { + return false + } + if err == syscall.ENOTDIR { + return true + } + if perror, ok := err.(*os.PathError); ok { + if errno, ok := perror.Err.(syscall.Errno); ok { + return errno == syscall.ENOTDIR + } + } + return false +} + +type skipChange func(string) (bool, error) +type deleteChange func(string, string, os.FileInfo) (string, error) +type whiteoutChange func(string, string) (bool, error) + +func changes(layers []string, rw string, dc deleteChange, sc skipChange, wc whiteoutChange) ([]Change, error) { + var ( + changes []Change + changedDirs = make(map[string]struct{}) + ) + + err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + path, err = filepath.Rel(rw, path) + if err != nil { + return err + } + + // As this runs on the daemon side, file paths are OS specific. + path = filepath.Join(string(os.PathSeparator), path) + + // Skip root + if path == string(os.PathSeparator) { + return nil + } + + if sc != nil { + if skip, err := sc(path); skip { + return err + } + } + + change := Change{ + Path: path, + } + + deletedFile, err := dc(rw, path, f) + if err != nil { + return err + } + + // Find out what kind of modification happened + if deletedFile != "" { + change.Path = deletedFile + change.Kind = ChangeDelete + } else { + // Otherwise, the file was added + change.Kind = ChangeAdd + + // ...Unless it already existed in a top layer, in which case, it's a modification + layerScan: + for _, layer := range layers { + if wc != nil { + // ...Unless a lower layer also had whiteout for this directory or one of its parents, + // in which case, it's new + ignore, err := wc(layer, path) + if err != nil { + return err + } + if ignore { + break layerScan + } + for dir := filepath.Dir(path); dir != "" && dir != string(os.PathSeparator); dir = filepath.Dir(dir) { + ignore, err = wc(layer, dir) + if err != nil { + return err + } + if ignore { + break layerScan + } + } + } + stat, err := os.Stat(filepath.Join(layer, path)) + if err != nil && !os.IsNotExist(err) { + return err + } + if err == nil { + // The file existed in the top layer, so that's a modification + + // However, if it's a directory, maybe it wasn't actually modified. + // If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar + if stat.IsDir() && f.IsDir() { + if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) { + // Both directories are the same, don't record the change + return nil + } + } + change.Kind = ChangeModify + break + } + } + } + + // If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files. + // This block is here to ensure the change is recorded even if the + // modify time, mode and size of the parent directory in the rw and ro layers are all equal. + // Check https://github.com/docker/docker/pull/13590 for details. + if f.IsDir() { + changedDirs[path] = struct{}{} + } + if change.Kind == ChangeAdd || change.Kind == ChangeDelete { + parent := filepath.Dir(path) + tail := []Change{} + for parent != "/" { + if _, ok := changedDirs[parent]; !ok && parent != "/" { + tail = append([]Change{{Path: parent, Kind: ChangeModify}}, tail...) + changedDirs[parent] = struct{}{} + } + parent = filepath.Dir(parent) + } + changes = append(changes, tail...) + } + + // Record change + changes = append(changes, change) + return nil + }) + if err != nil && !os.IsNotExist(err) { + return nil, err + } + return changes, nil +} + +// FileInfo describes the information of a file. +type FileInfo struct { + parent *FileInfo + idMappings *idtools.IDMappings + name string + stat *system.StatT + children map[string]*FileInfo + capability []byte + added bool + xattrs map[string]string +} + +// LookUp looks up the file information of a file. +func (info *FileInfo) LookUp(path string) *FileInfo { + // As this runs on the daemon side, file paths are OS specific. + parent := info + if path == string(os.PathSeparator) { + return info + } + + pathElements := strings.Split(path, string(os.PathSeparator)) + for _, elem := range pathElements { + if elem != "" { + child := parent.children[elem] + if child == nil { + return nil + } + parent = child + } + } + return parent +} + +func (info *FileInfo) path() string { + if info.parent == nil { + // As this runs on the daemon side, file paths are OS specific. + return string(os.PathSeparator) + } + return filepath.Join(info.parent.path(), info.name) +} + +func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { + + sizeAtEntry := len(*changes) + + if oldInfo == nil { + // add + change := Change{ + Path: info.path(), + Kind: ChangeAdd, + } + *changes = append(*changes, change) + info.added = true + } + + // We make a copy so we can modify it to detect additions + // also, we only recurse on the old dir if the new info is a directory + // otherwise any previous delete/change is considered recursive + oldChildren := make(map[string]*FileInfo) + if oldInfo != nil && info.isDir() { + for k, v := range oldInfo.children { + oldChildren[k] = v + } + } + + for name, newChild := range info.children { + oldChild := oldChildren[name] + if oldChild != nil { + // change? + oldStat := oldChild.stat + newStat := newChild.stat + // Note: We can't compare inode or ctime or blocksize here, because these change + // when copying a file into a container. However, that is not generally a problem + // because any content change will change mtime, and any status change should + // be visible when actually comparing the stat fields. The only time this + // breaks down is if some code intentionally hides a change by setting + // back mtime + if statDifferent(oldStat, oldInfo, newStat, info) || + !bytes.Equal(oldChild.capability, newChild.capability) || + !reflect.DeepEqual(oldChild.xattrs, newChild.xattrs) { + change := Change{ + Path: newChild.path(), + Kind: ChangeModify, + } + *changes = append(*changes, change) + newChild.added = true + } + + // Remove from copy so we can detect deletions + delete(oldChildren, name) + } + + newChild.addChanges(oldChild, changes) + } + for _, oldChild := range oldChildren { + // delete + change := Change{ + Path: oldChild.path(), + Kind: ChangeDelete, + } + *changes = append(*changes, change) + } + + // If there were changes inside this directory, we need to add it, even if the directory + // itself wasn't changed. This is needed to properly save and restore filesystem permissions. + // As this runs on the daemon side, file paths are OS specific. + if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) { + change := Change{ + Path: info.path(), + Kind: ChangeModify, + } + // Let's insert the directory entry before the recently added entries located inside this dir + *changes = append(*changes, change) // just to resize the slice, will be overwritten + copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:]) + (*changes)[sizeAtEntry] = change + } + +} + +// Changes add changes to file information. +func (info *FileInfo) Changes(oldInfo *FileInfo) []Change { + var changes []Change + + info.addChanges(oldInfo, &changes) + + return changes +} + +func newRootFileInfo(idMappings *idtools.IDMappings) *FileInfo { + // As this runs on the daemon side, file paths are OS specific. + root := &FileInfo{ + name: string(os.PathSeparator), + idMappings: idMappings, + children: make(map[string]*FileInfo), + } + return root +} + +// ChangesDirs compares two directories and generates an array of Change objects describing the changes. +// If oldDir is "", then all files in newDir will be Add-Changes. +func ChangesDirs(newDir string, newMappings *idtools.IDMappings, oldDir string, oldMappings *idtools.IDMappings) ([]Change, error) { + var ( + oldRoot, newRoot *FileInfo + ) + if oldDir == "" { + emptyDir, err := ioutil.TempDir("", "empty") + if err != nil { + return nil, err + } + defer os.Remove(emptyDir) + oldDir = emptyDir + } + oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir, oldMappings, newMappings) + if err != nil { + return nil, err + } + + return newRoot.Changes(oldRoot), nil +} + +// ChangesSize calculates the size in bytes of the provided changes, based on newDir. +func ChangesSize(newDir string, changes []Change) int64 { + var ( + size int64 + sf = make(map[uint64]struct{}) + ) + for _, change := range changes { + if change.Kind == ChangeModify || change.Kind == ChangeAdd { + file := filepath.Join(newDir, change.Path) + fileInfo, err := os.Lstat(file) + if err != nil { + logrus.Errorf("Can not stat %q: %s", file, err) + continue + } + + if fileInfo != nil && !fileInfo.IsDir() { + if hasHardlinks(fileInfo) { + inode := getIno(fileInfo) + if _, ok := sf[inode]; !ok { + size += fileInfo.Size() + sf[inode] = struct{}{} + } + } else { + size += fileInfo.Size() + } + } + } + } + return size +} + +// ExportChanges produces an Archive from the provided changes, relative to dir. +func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (io.ReadCloser, error) { + reader, writer := io.Pipe() + go func() { + ta := newTarAppender(idtools.NewIDMappingsFromMaps(uidMaps, gidMaps), writer, nil) + + // this buffer is needed for the duration of this piped stream + defer pools.BufioWriter32KPool.Put(ta.Buffer) + + sort.Sort(changesByPath(changes)) + + // In general we log errors here but ignore them because + // during e.g. a diff operation the container can continue + // mutating the filesystem and we can see transient errors + // from this + for _, change := range changes { + if change.Kind == ChangeDelete { + whiteOutDir := filepath.Dir(change.Path) + whiteOutBase := filepath.Base(change.Path) + whiteOut := filepath.Join(whiteOutDir, WhiteoutPrefix+whiteOutBase) + timestamp := time.Now() + hdr := &tar.Header{ + Name: whiteOut[1:], + Size: 0, + ModTime: timestamp, + AccessTime: timestamp, + ChangeTime: timestamp, + } + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + logrus.Debugf("Can't write whiteout header: %s", err) + } + } else { + path := filepath.Join(dir, change.Path) + if err := ta.addTarFile(path, change.Path[1:]); err != nil { + logrus.Debugf("Can't add file %s to tar: %s", path, err) + } + } + } + + // Make sure to check the error on Close. + if err := ta.TarWriter.Close(); err != nil { + logrus.Debugf("Can't close layer: %s", err) + } + if err := writer.Close(); err != nil { + logrus.Debugf("failed close Changes writer: %s", err) + } + }() + return reader, nil +} diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_linux.go b/vendor/github.com/containers/storage/pkg/archive/changes_linux.go new file mode 100644 index 00000000000..a3addebe690 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/changes_linux.go @@ -0,0 +1,401 @@ +package archive + +import ( + "bytes" + "errors" + "fmt" + "os" + "path/filepath" + "sort" + "strings" + "syscall" + "unsafe" + + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/system" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +// walker is used to implement collectFileInfoForChanges on linux. Where this +// method in general returns the entire contents of two directory trees, we +// optimize some FS calls out on linux. In particular, we take advantage of the +// fact that getdents(2) returns the inode of each file in the directory being +// walked, which, when walking two trees in parallel to generate a list of +// changes, can be used to prune subtrees without ever having to lstat(2) them +// directly. Eliminating stat calls in this way can save up to seconds on large +// images. +type walker struct { + dir1 string + dir2 string + root1 *FileInfo + root2 *FileInfo + idmap1 *idtools.IDMappings + idmap2 *idtools.IDMappings +} + +// collectFileInfoForChanges returns a complete representation of the trees +// rooted at dir1 and dir2, with one important exception: any subtree or +// leaf where the inode and device numbers are an exact match between dir1 +// and dir2 will be pruned from the results. This method is *only* to be used +// to generating a list of changes between the two directories, as it does not +// reflect the full contents. +func collectFileInfoForChanges(dir1, dir2 string, idmap1, idmap2 *idtools.IDMappings) (*FileInfo, *FileInfo, error) { + w := &walker{ + dir1: dir1, + dir2: dir2, + root1: newRootFileInfo(idmap1), + root2: newRootFileInfo(idmap2), + } + + i1, err := os.Lstat(w.dir1) + if err != nil { + return nil, nil, err + } + i2, err := os.Lstat(w.dir2) + if err != nil { + return nil, nil, err + } + + if err := w.walk("/", i1, i2); err != nil { + return nil, nil, err + } + + return w.root1, w.root2, nil +} + +// Given a FileInfo, its path info, and a reference to the root of the tree +// being constructed, register this file with the tree. +func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error { + if fi == nil { + return nil + } + parent := root.LookUp(filepath.Dir(path)) + if parent == nil { + return fmt.Errorf("walkchunk: Unexpectedly no parent for %s", path) + } + info := &FileInfo{ + name: filepath.Base(path), + children: make(map[string]*FileInfo), + parent: parent, + idMappings: root.idMappings, + } + cpath := filepath.Join(dir, path) + stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t)) + if err != nil { + return err + } + info.stat = stat + info.capability, err = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access + if err != nil && !errors.Is(err, system.EOPNOTSUPP) { + return err + } + xattrs, err := system.Llistxattr(cpath) + if err != nil && !errors.Is(err, system.EOPNOTSUPP) { + return err + } + for _, key := range xattrs { + if strings.HasPrefix(key, "user.") { + value, err := system.Lgetxattr(cpath, key) + if err != nil { + if errors.Is(err, system.E2BIG) { + logrus.Errorf("archive: Skipping xattr for file %s since value is too big: %s", cpath, key) + continue + } + return err + } + if info.xattrs == nil { + info.xattrs = make(map[string]string) + } + info.xattrs[key] = string(value) + } + } + parent.children[info.name] = info + return nil +} + +// Walk a subtree rooted at the same path in both trees being iterated. For +// example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d +func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) { + // Register these nodes with the return trees, unless we're still at the + // (already-created) roots: + if path != "/" { + if err := walkchunk(path, i1, w.dir1, w.root1); err != nil { + return err + } + if err := walkchunk(path, i2, w.dir2, w.root2); err != nil { + return err + } + } + + is1Dir := i1 != nil && i1.IsDir() + is2Dir := i2 != nil && i2.IsDir() + + sameDevice := false + if i1 != nil && i2 != nil { + si1 := i1.Sys().(*syscall.Stat_t) + si2 := i2.Sys().(*syscall.Stat_t) + if si1.Dev == si2.Dev { + sameDevice = true + } + } + + // If these files are both non-existent, or leaves (non-dirs), we are done. + if !is1Dir && !is2Dir { + return nil + } + + // Fetch the names of all the files contained in both directories being walked: + var names1, names2 []nameIno + if is1Dir { + names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access + if err != nil { + return err + } + } + if is2Dir { + names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access + if err != nil { + return err + } + } + + // We have lists of the files contained in both parallel directories, sorted + // in the same order. Walk them in parallel, generating a unique merged list + // of all items present in either or both directories. + var names []string + ix1 := 0 + ix2 := 0 + + for { + if ix1 >= len(names1) { + break + } + if ix2 >= len(names2) { + break + } + + ni1 := names1[ix1] + ni2 := names2[ix2] + + switch bytes.Compare([]byte(ni1.name), []byte(ni2.name)) { + case -1: // ni1 < ni2 -- advance ni1 + // we will not encounter ni1 in names2 + names = append(names, ni1.name) + ix1++ + case 0: // ni1 == ni2 + if ni1.ino != ni2.ino || !sameDevice { + names = append(names, ni1.name) + } + ix1++ + ix2++ + case 1: // ni1 > ni2 -- advance ni2 + // we will not encounter ni2 in names1 + names = append(names, ni2.name) + ix2++ + } + } + for ix1 < len(names1) { + names = append(names, names1[ix1].name) + ix1++ + } + for ix2 < len(names2) { + names = append(names, names2[ix2].name) + ix2++ + } + + // For each of the names present in either or both of the directories being + // iterated, stat the name under each root, and recurse the pair of them: + for _, name := range names { + fname := filepath.Join(path, name) + var cInfo1, cInfo2 os.FileInfo + if is1Dir { + cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access + if err != nil && !os.IsNotExist(err) { + return err + } + } + if is2Dir { + cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access + if err != nil && !os.IsNotExist(err) { + return err + } + } + if err = w.walk(fname, cInfo1, cInfo2); err != nil { + return err + } + } + return nil +} + +// {name,inode} pairs used to support the early-pruning logic of the walker type +type nameIno struct { + name string + ino uint64 +} + +type nameInoSlice []nameIno + +func (s nameInoSlice) Len() int { return len(s) } +func (s nameInoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name } + +// readdirnames is a hacked-apart version of the Go stdlib code, exposing inode +// numbers further up the stack when reading directory contents. Unlike +// os.Readdirnames, which returns a list of filenames, this function returns a +// list of {filename,inode} pairs. +func readdirnames(dirname string) (names []nameIno, err error) { + var ( + size = 100 + buf = make([]byte, 4096) + nbuf int + bufp int + nb int + ) + + f, err := os.Open(dirname) + if err != nil { + return nil, err + } + defer f.Close() + + names = make([]nameIno, 0, size) // Empty with room to grow. + for { + // Refill the buffer if necessary + if bufp >= nbuf { + bufp = 0 + nbuf, err = unix.ReadDirent(int(f.Fd()), buf) // getdents on linux + if nbuf < 0 { + nbuf = 0 + } + if err != nil { + return nil, os.NewSyscallError("readdirent", err) + } + if nbuf <= 0 { + break // EOF + } + } + + // Drain the buffer + nb, names = parseDirent(buf[bufp:nbuf], names) + bufp += nb + } + + sl := nameInoSlice(names) + sort.Sort(sl) + return sl, nil +} + +// parseDirent is a minor modification of unix.ParseDirent (linux version) +// which returns {name,inode} pairs instead of just names. +func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) { + origlen := len(buf) + for len(buf) > 0 { + dirent := (*unix.Dirent)(unsafe.Pointer(&buf[0])) + buf = buf[dirent.Reclen:] + if dirent.Ino == 0 { // File absent in directory. + continue + } + builder := make([]byte, 0, dirent.Reclen) + for i := 0; i < len(dirent.Name); i++ { + if dirent.Name[i] == 0 { + break + } + builder = append(builder, byte(dirent.Name[i])) + } + name := string(builder) + if name == "." || name == ".." { // Useless names + continue + } + names = append(names, nameIno{name, dirent.Ino}) + } + return origlen - len(buf), names +} + +// OverlayChanges walks the path rw and determines changes for the files in the path, +// with respect to the parent layers +func OverlayChanges(layers []string, rw string) ([]Change, error) { + dc := func(root, path string, fi os.FileInfo) (string, error) { + return overlayDeletedFile(layers, root, path, fi) + } + return changes(layers, rw, dc, nil, overlayLowerContainsWhiteout) +} + +func overlayLowerContainsWhiteout(root, path string) (bool, error) { + // Whiteout for a file or directory has the same name, but is for a character + // device with major/minor of 0/0. + stat, err := os.Stat(filepath.Join(root, path)) + if err != nil && !os.IsNotExist(err) && !isENOTDIR(err) { + // Not sure what happened here. + return false, err + } + if err == nil && stat.Mode()&os.ModeCharDevice != 0 { + if isWhiteOut(stat) { + return true, nil + } + } + return false, nil +} + +func overlayDeletedFile(layers []string, root, path string, fi os.FileInfo) (string, error) { + // If it's a whiteout item, then a file or directory with that name is removed by this layer. + if fi.Mode()&os.ModeCharDevice != 0 { + if isWhiteOut(fi) { + return path, nil + } + } + // After this we only need to pay attention to directories. + if !fi.IsDir() { + return "", nil + } + // If the directory isn't marked as opaque, then it's just a normal directory. + opaque, err := system.Lgetxattr(filepath.Join(root, path), getOverlayOpaqueXattrName()) + if err != nil { + return "", err + } + if len(opaque) != 1 || opaque[0] != 'y' { + return "", err + } + // If there are no lower layers, then it can't have been deleted and recreated in this layer. + if len(layers) == 0 { + return "", err + } + // At this point, we have a directory that's opaque. If it appears in one of the lower + // layers, then it was newly-created here, so it wasn't also deleted here. + for _, layer := range layers { + stat, err := os.Stat(filepath.Join(layer, path)) + if err != nil && !os.IsNotExist(err) && !isENOTDIR(err) { + // Not sure what happened here. + return "", err + } + if err == nil { + if stat.Mode()&os.ModeCharDevice != 0 { + if isWhiteOut(stat) { + return "", nil + } + } + // It's not whiteout, so it was there in the older layer, so it has to be + // marked as deleted in this layer. + return path, nil + } + for dir := filepath.Dir(path); dir != "" && dir != string(os.PathSeparator); dir = filepath.Dir(dir) { + // Check for whiteout for a parent directory. + stat, err := os.Stat(filepath.Join(layer, dir)) + if err != nil && !os.IsNotExist(err) && !isENOTDIR(err) { + // Not sure what happened here. + return "", err + } + if err == nil { + if stat.Mode()&os.ModeCharDevice != 0 { + if isWhiteOut(stat) { + return "", nil + } + } + } + } + } + + // We didn't find the same path in any older layers, so it was new in this one. + return "", nil + +} diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_other.go b/vendor/github.com/containers/storage/pkg/archive/changes_other.go new file mode 100644 index 00000000000..bbbd8c9de87 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/changes_other.go @@ -0,0 +1,99 @@ +// +build !linux + +package archive + +import ( + "fmt" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/system" +) + +func collectFileInfoForChanges(oldDir, newDir string, oldIDMap, newIDMap *idtools.IDMappings) (*FileInfo, *FileInfo, error) { + var ( + oldRoot, newRoot *FileInfo + err1, err2 error + errs = make(chan error, 2) + ) + go func() { + oldRoot, err1 = collectFileInfo(oldDir, oldIDMap) + errs <- err1 + }() + go func() { + newRoot, err2 = collectFileInfo(newDir, newIDMap) + errs <- err2 + }() + + // block until both routines have returned + for i := 0; i < 2; i++ { + if err := <-errs; err != nil { + return nil, nil, err + } + } + + return oldRoot, newRoot, nil +} + +func collectFileInfo(sourceDir string, idMappings *idtools.IDMappings) (*FileInfo, error) { + root := newRootFileInfo(idMappings) + + err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + relPath, err := filepath.Rel(sourceDir, path) + if err != nil { + return err + } + + // As this runs on the daemon side, file paths are OS specific. + relPath = filepath.Join(string(os.PathSeparator), relPath) + + // See https://github.com/golang/go/issues/9168 - bug in filepath.Join. + // Temporary workaround. If the returned path starts with two backslashes, + // trim it down to a single backslash. Only relevant on Windows. + if runtime.GOOS == "windows" { + if strings.HasPrefix(relPath, `\\`) { + relPath = relPath[1:] + } + } + + if relPath == string(os.PathSeparator) { + return nil + } + + parent := root.LookUp(filepath.Dir(relPath)) + if parent == nil { + return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath) + } + + info := &FileInfo{ + name: filepath.Base(relPath), + children: make(map[string]*FileInfo), + parent: parent, + idMappings: idMappings, + } + + s, err := system.Lstat(path) + if err != nil { + return err + } + info.stat = s + + info.capability, _ = system.Lgetxattr(path, "security.capability") + + parent.children[info.name] = info + + return nil + }) + if err != nil { + return nil, err + } + return root, nil +} diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_unix.go b/vendor/github.com/containers/storage/pkg/archive/changes_unix.go new file mode 100644 index 00000000000..1cc1910f897 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/changes_unix.go @@ -0,0 +1,50 @@ +// +build !windows + +package archive + +import ( + "os" + "syscall" + + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/system" + "golang.org/x/sys/unix" +) + +func statDifferent(oldStat *system.StatT, oldInfo *FileInfo, newStat *system.StatT, newInfo *FileInfo) bool { + // Don't look at size for dirs, its not a good measure of change + oldUID, oldGID := oldStat.UID(), oldStat.GID() + uid, gid := newStat.UID(), newStat.GID() + if cuid, cgid, err := newInfo.idMappings.ToContainer(idtools.IDPair{UID: int(uid), GID: int(gid)}); err == nil { + uid = uint32(cuid) + gid = uint32(cgid) + if oldInfo != nil { + if oldcuid, oldcgid, err := oldInfo.idMappings.ToContainer(idtools.IDPair{UID: int(oldUID), GID: int(oldGID)}); err == nil { + oldUID = uint32(oldcuid) + oldGID = uint32(oldcgid) + } + } + } + ownerChanged := uid != oldUID || gid != oldGID + if oldStat.Mode() != newStat.Mode() || + ownerChanged || + oldStat.Rdev() != newStat.Rdev() || + // Don't look at size for dirs, its not a good measure of change + (oldStat.Mode()&unix.S_IFDIR != unix.S_IFDIR && + (!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) { + return true + } + return false +} + +func (info *FileInfo) isDir() bool { + return info.parent == nil || info.stat.Mode()&unix.S_IFDIR != 0 +} + +func getIno(fi os.FileInfo) uint64 { + return fi.Sys().(*syscall.Stat_t).Ino +} + +func hasHardlinks(fi os.FileInfo) bool { + return fi.Sys().(*syscall.Stat_t).Nlink > 1 +} diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_windows.go b/vendor/github.com/containers/storage/pkg/archive/changes_windows.go new file mode 100644 index 00000000000..966400e5940 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/changes_windows.go @@ -0,0 +1,30 @@ +package archive + +import ( + "os" + + "github.com/containers/storage/pkg/system" +) + +func statDifferent(oldStat *system.StatT, oldInfo *FileInfo, newStat *system.StatT, newInfo *FileInfo) bool { + + // Don't look at size for dirs, its not a good measure of change + if oldStat.Mtim() != newStat.Mtim() || + oldStat.Mode() != newStat.Mode() || + oldStat.Size() != newStat.Size() && !oldStat.Mode().IsDir() { + return true + } + return false +} + +func (info *FileInfo) isDir() bool { + return info.parent == nil || info.stat.Mode().IsDir() +} + +func getIno(fi os.FileInfo) (inode uint64) { + return +} + +func hasHardlinks(fi os.FileInfo) bool { + return false +} diff --git a/vendor/github.com/containers/storage/pkg/archive/copy.go b/vendor/github.com/containers/storage/pkg/archive/copy.go new file mode 100644 index 00000000000..6298a674d49 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/copy.go @@ -0,0 +1,460 @@ +package archive + +import ( + "archive/tar" + "errors" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/sirupsen/logrus" +) + +// Errors used or returned by this file. +var ( + ErrNotDirectory = errors.New("not a directory") + ErrDirNotExists = errors.New("no such directory") + ErrCannotCopyDir = errors.New("cannot copy directory") + ErrInvalidCopySource = errors.New("invalid copy source content") +) + +// PreserveTrailingDotOrSeparator returns the given cleaned path (after +// processing using any utility functions from the path or filepath stdlib +// packages) and appends a trailing `/.` or `/` if its corresponding original +// path (from before being processed by utility functions from the path or +// filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned +// path already ends in a `.` path segment, then another is not added. If the +// clean path already ends in a path separator, then another is not added. +func PreserveTrailingDotOrSeparator(cleanedPath, originalPath string) string { + // Ensure paths are in platform semantics + cleanedPath = normalizePath(cleanedPath) + originalPath = normalizePath(originalPath) + + if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) { + if !hasTrailingPathSeparator(cleanedPath) { + // Add a separator if it doesn't already end with one (a cleaned + // path would only end in a separator if it is the root). + cleanedPath += string(filepath.Separator) + } + cleanedPath += "." + } + + if !hasTrailingPathSeparator(cleanedPath) && hasTrailingPathSeparator(originalPath) { + cleanedPath += string(filepath.Separator) + } + + return cleanedPath +} + +// assertsDirectory returns whether the given path is +// asserted to be a directory, i.e., the path ends with +// a trailing '/' or `/.`, assuming a path separator of `/`. +func assertsDirectory(path string) bool { + return hasTrailingPathSeparator(path) || specifiesCurrentDir(path) +} + +// hasTrailingPathSeparator returns whether the given +// path ends with the system's path separator character. +func hasTrailingPathSeparator(path string) bool { + return len(path) > 0 && os.IsPathSeparator(path[len(path)-1]) +} + +// specifiesCurrentDir returns whether the given path specifies +// a "current directory", i.e., the last path segment is `.`. +func specifiesCurrentDir(path string) bool { + return filepath.Base(path) == "." +} + +// SplitPathDirEntry splits the given path between its directory name and its +// basename by first cleaning the path but preserves a trailing "." if the +// original path specified the current directory. +func SplitPathDirEntry(path string) (dir, base string) { + cleanedPath := filepath.Clean(normalizePath(path)) + + if specifiesCurrentDir(path) { + cleanedPath += string(filepath.Separator) + "." + } + + return filepath.Dir(cleanedPath), filepath.Base(cleanedPath) +} + +// TarResource archives the resource described by the given CopyInfo to a Tar +// archive. A non-nil error is returned if sourcePath does not exist or is +// asserted to be a directory but exists as another type of file. +// +// This function acts as a convenient wrapper around TarWithOptions, which +// requires a directory as the source path. TarResource accepts either a +// directory or a file path and correctly sets the Tar options. +func TarResource(sourceInfo CopyInfo) (content io.ReadCloser, err error) { + return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName) +} + +// TarResourceRebase is like TarResource but renames the first path element of +// items in the resulting tar archive to match the given rebaseName if not "". +func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, err error) { + sourcePath = normalizePath(sourcePath) + if _, err = os.Lstat(sourcePath); err != nil { + // Catches the case where the source does not exist or is not a + // directory if asserted to be a directory, as this also causes an + // error. + return + } + + // Separate the source path between its directory and + // the entry in that directory which we are archiving. + sourceDir, sourceBase := SplitPathDirEntry(sourcePath) + + filter := []string{sourceBase} + + logrus.Debugf("copying %q from %q", sourceBase, sourceDir) + + return TarWithOptions(sourceDir, &TarOptions{ + Compression: Uncompressed, + IncludeFiles: filter, + IncludeSourceDir: true, + RebaseNames: map[string]string{ + sourceBase: rebaseName, + }, + }) +} + +// CopyInfo holds basic info about the source +// or destination path of a copy operation. +type CopyInfo struct { + Path string + Exists bool + IsDir bool + RebaseName string +} + +// CopyInfoSourcePath stats the given path to create a CopyInfo +// struct representing that resource for the source of an archive copy +// operation. The given path should be an absolute local path. A source path +// has all symlinks evaluated that appear before the last path separator ("/" +// on Unix). As it is to be a copy source, the path must exist. +func CopyInfoSourcePath(path string, followLink bool) (CopyInfo, error) { + // normalize the file path and then evaluate the symbol link + // we will use the target file instead of the symbol link if + // followLink is set + path = normalizePath(path) + + resolvedPath, rebaseName, err := ResolveHostSourcePath(path, followLink) + if err != nil { + return CopyInfo{}, err + } + + stat, err := os.Lstat(resolvedPath) + if err != nil { + return CopyInfo{}, err + } + + return CopyInfo{ + Path: resolvedPath, + Exists: true, + IsDir: stat.IsDir(), + RebaseName: rebaseName, + }, nil +} + +// CopyInfoDestinationPath stats the given path to create a CopyInfo +// struct representing that resource for the destination of an archive copy +// operation. The given path should be an absolute local path. +func CopyInfoDestinationPath(path string) (info CopyInfo, err error) { + maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot. + path = normalizePath(path) + originalPath := path + + stat, err := os.Lstat(path) + + if err == nil && stat.Mode()&os.ModeSymlink == 0 { + // The path exists and is not a symlink. + return CopyInfo{ + Path: path, + Exists: true, + IsDir: stat.IsDir(), + }, nil + } + + // While the path is a symlink. + for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ { + if n > maxSymlinkIter { + // Don't follow symlinks more than this arbitrary number of times. + return CopyInfo{}, errors.New("too many symlinks in " + originalPath) + } + + // The path is a symbolic link. We need to evaluate it so that the + // destination of the copy operation is the link target and not the + // link itself. This is notably different than CopyInfoSourcePath which + // only evaluates symlinks before the last appearing path separator. + // Also note that it is okay if the last path element is a broken + // symlink as the copy operation should create the target. + var linkTarget string + + linkTarget, err = os.Readlink(path) + if err != nil { + return CopyInfo{}, err + } + + if !filepath.IsAbs(linkTarget) { + // Join with the parent directory. + dstParent, _ := SplitPathDirEntry(path) + linkTarget = filepath.Join(dstParent, linkTarget) + } + + path = linkTarget + stat, err = os.Lstat(path) + } + + if err != nil { + // It's okay if the destination path doesn't exist. We can still + // continue the copy operation if the parent directory exists. + if !os.IsNotExist(err) { + return CopyInfo{}, err + } + + // Ensure destination parent dir exists. + dstParent, _ := SplitPathDirEntry(path) + + parentDirStat, err := os.Lstat(dstParent) + if err != nil { + return CopyInfo{}, err + } + if !parentDirStat.IsDir() { + return CopyInfo{}, ErrNotDirectory + } + + return CopyInfo{Path: path}, nil + } + + // The path exists after resolving symlinks. + return CopyInfo{ + Path: path, + Exists: true, + IsDir: stat.IsDir(), + }, nil +} + +// PrepareArchiveCopy prepares the given srcContent archive, which should +// contain the archived resource described by srcInfo, to the destination +// described by dstInfo. Returns the possibly modified content archive along +// with the path to the destination directory which it should be extracted to. +func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content io.ReadCloser, err error) { + // Ensure in platform semantics + srcInfo.Path = normalizePath(srcInfo.Path) + dstInfo.Path = normalizePath(dstInfo.Path) + + // Separate the destination path between its directory and base + // components in case the source archive contents need to be rebased. + dstDir, dstBase := SplitPathDirEntry(dstInfo.Path) + _, srcBase := SplitPathDirEntry(srcInfo.Path) + + switch { + case dstInfo.Exists && dstInfo.IsDir: + // The destination exists as a directory. No alteration + // to srcContent is needed as its contents can be + // simply extracted to the destination directory. + return dstInfo.Path, ioutil.NopCloser(srcContent), nil + case dstInfo.Exists && srcInfo.IsDir: + // The destination exists as some type of file and the source + // content is a directory. This is an error condition since + // you cannot copy a directory to an existing file location. + return "", nil, ErrCannotCopyDir + case dstInfo.Exists: + // The destination exists as some type of file and the source content + // is also a file. The source content entry will have to be renamed to + // have a basename which matches the destination path's basename. + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil + case srcInfo.IsDir: + // The destination does not exist and the source content is an archive + // of a directory. The archive should be extracted to the parent of + // the destination path instead, and when it is, the directory that is + // created as a result should take the name of the destination path. + // The source content entries will have to be renamed to have a + // basename which matches the destination path's basename. + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil + case assertsDirectory(dstInfo.Path): + // The destination does not exist and is asserted to be created as a + // directory, but the source content is not a directory. This is an + // error condition since you cannot create a directory from a file + // source. + return "", nil, ErrDirNotExists + default: + // The last remaining case is when the destination does not exist, is + // not asserted to be a directory, and the source content is not an + // archive of a directory. It this case, the destination file will need + // to be created when the archive is extracted and the source content + // entry will have to be renamed to have a basename which matches the + // destination path's basename. + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil + } + +} + +// RebaseArchiveEntries rewrites the given srcContent archive replacing +// an occurrence of oldBase with newBase at the beginning of entry names. +func RebaseArchiveEntries(srcContent io.Reader, oldBase, newBase string) io.ReadCloser { + if oldBase == string(os.PathSeparator) { + // If oldBase specifies the root directory, use an empty string as + // oldBase instead so that newBase doesn't replace the path separator + // that all paths will start with. + oldBase = "" + } + + rebased, w := io.Pipe() + + go func() { + srcTar := tar.NewReader(srcContent) + rebasedTar := tar.NewWriter(w) + + for { + hdr, err := srcTar.Next() + if err == io.EOF { + // Signals end of archive. + rebasedTar.Close() + w.Close() + return + } + if err != nil { + w.CloseWithError(err) + return + } + + hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1) + if hdr.Typeflag == tar.TypeLink { + hdr.Linkname = strings.Replace(hdr.Linkname, oldBase, newBase, 1) + } + + if err = rebasedTar.WriteHeader(hdr); err != nil { + w.CloseWithError(err) + return + } + + if _, err = io.Copy(rebasedTar, srcTar); err != nil { + w.CloseWithError(err) + return + } + } + }() + + return rebased +} + +// CopyResource performs an archive copy from the given source path to the +// given destination path. The source path MUST exist and the destination +// path's parent directory must exist. +func CopyResource(srcPath, dstPath string, followLink bool) error { + var ( + srcInfo CopyInfo + err error + ) + + // Ensure in platform semantics + srcPath = normalizePath(srcPath) + dstPath = normalizePath(dstPath) + + // Clean the source and destination paths. + srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath) + dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath) + + if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil { + return err + } + + content, err := TarResource(srcInfo) + if err != nil { + return err + } + defer content.Close() + + return CopyTo(content, srcInfo, dstPath) +} + +// CopyTo handles extracting the given content whose +// entries should be sourced from srcInfo to dstPath. +func CopyTo(content io.Reader, srcInfo CopyInfo, dstPath string) error { + // The destination path need not exist, but CopyInfoDestinationPath will + // ensure that at least the parent directory exists. + dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath)) + if err != nil { + return err + } + + dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo) + if err != nil { + return err + } + defer copyArchive.Close() + + options := &TarOptions{ + NoLchown: true, + NoOverwriteDirNonDir: true, + } + + return Untar(copyArchive, dstDir, options) +} + +// ResolveHostSourcePath decides real path need to be copied with parameters such as +// whether to follow symbol link or not, if followLink is true, resolvedPath will return +// link target of any symbol link file, else it will only resolve symlink of directory +// but return symbol link file itself without resolving. +func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, err error) { + if followLink { + resolvedPath, err = filepath.EvalSymlinks(path) + if err != nil { + return + } + + resolvedPath, rebaseName = GetRebaseName(path, resolvedPath) + } else { + dirPath, basePath := filepath.Split(path) + + // if not follow symbol link, then resolve symbol link of parent dir + var resolvedDirPath string + resolvedDirPath, err = filepath.EvalSymlinks(dirPath) + if err != nil { + return + } + // resolvedDirPath will have been cleaned (no trailing path separators) so + // we can manually join it with the base path element. + resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath + if hasTrailingPathSeparator(path) && filepath.Base(path) != filepath.Base(resolvedPath) { + rebaseName = filepath.Base(path) + } + } + return resolvedPath, rebaseName, nil +} + +// GetRebaseName normalizes and compares path and resolvedPath, +// return completed resolved path and rebased file name +func GetRebaseName(path, resolvedPath string) (string, string) { + // linkTarget will have been cleaned (no trailing path separators and dot) so + // we can manually join it with them + var rebaseName string + if specifiesCurrentDir(path) && !specifiesCurrentDir(resolvedPath) { + resolvedPath += string(filepath.Separator) + "." + } + + if hasTrailingPathSeparator(path) && !hasTrailingPathSeparator(resolvedPath) { + resolvedPath += string(filepath.Separator) + } + + if filepath.Base(path) != filepath.Base(resolvedPath) { + // In the case where the path had a trailing separator and a symlink + // evaluation has changed the last path component, we will need to + // rebase the name in the archive that is being copied to match the + // originally requested name. + rebaseName = filepath.Base(path) + } + return resolvedPath, rebaseName +} diff --git a/vendor/github.com/containers/storage/pkg/archive/copy_unix.go b/vendor/github.com/containers/storage/pkg/archive/copy_unix.go new file mode 100644 index 00000000000..e305b5e4af9 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/copy_unix.go @@ -0,0 +1,11 @@ +// +build !windows + +package archive + +import ( + "path/filepath" +) + +func normalizePath(path string) string { + return filepath.ToSlash(path) +} diff --git a/vendor/github.com/containers/storage/pkg/archive/copy_windows.go b/vendor/github.com/containers/storage/pkg/archive/copy_windows.go new file mode 100644 index 00000000000..2b775b45c4f --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/copy_windows.go @@ -0,0 +1,9 @@ +package archive + +import ( + "path/filepath" +) + +func normalizePath(path string) string { + return filepath.FromSlash(path) +} diff --git a/vendor/github.com/containers/storage/pkg/archive/diff.go b/vendor/github.com/containers/storage/pkg/archive/diff.go new file mode 100644 index 00000000000..14ffad5c0d4 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/diff.go @@ -0,0 +1,257 @@ +package archive + +import ( + "archive/tar" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/pools" + "github.com/containers/storage/pkg/system" + "github.com/sirupsen/logrus" +) + +// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be +// compressed or uncompressed. +// Returns the size in bytes of the contents of the layer. +func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, err error) { + tr := tar.NewReader(layer) + trBuf := pools.BufioReader32KPool.Get(tr) + defer pools.BufioReader32KPool.Put(trBuf) + + var dirs []*tar.Header + unpackedPaths := make(map[string]struct{}) + + if options == nil { + options = &TarOptions{} + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) + + aufsTempdir := "" + aufsHardlinks := make(map[string]*tar.Header) + buffer := make([]byte, 1<<20) + + // Iterate through the files in the archive. + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + return 0, err + } + + size += hdr.Size + + // Normalize name, for safety and for a simple is-root check + hdr.Name = filepath.Clean(hdr.Name) + + // Windows does not support filenames with colons in them. Ignore + // these files. This is not a problem though (although it might + // appear that it is). Let's suppose a client is running docker pull. + // The daemon it points to is Windows. Would it make sense for the + // client to be doing a docker pull Ubuntu for example (which has files + // with colons in the name under /usr/share/man/man3)? No, absolutely + // not as it would really only make sense that they were pulling a + // Windows image. However, for development, it is necessary to be able + // to pull Linux images which are in the repository. + // + // TODO Windows. Once the registry is aware of what images are Windows- + // specific or Linux-specific, this warning should be changed to an error + // to cater for the situation where someone does manage to upload a Linux + // image but have it tagged as Windows inadvertently. + if runtime.GOOS == windows { + if strings.Contains(hdr.Name, ":") { + logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name) + continue + } + } + + // Note as these operations are platform specific, so must the slash be. + if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { + // Not the root directory, ensure that the parent directory exists. + // This happened in some tests where an image had a tarfile without any + // parent directories. + parent := filepath.Dir(hdr.Name) + parentPath := filepath.Join(dest, parent) + + if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { + err = os.MkdirAll(parentPath, 0600) + if err != nil { + return 0, err + } + } + } + + // Skip AUFS metadata dirs + if strings.HasPrefix(hdr.Name, WhiteoutMetaPrefix) { + // Regular files inside /.wh..wh.plnk can be used as hardlink targets + // We don't want this directory, but we need the files in them so that + // such hardlinks can be resolved. + if strings.HasPrefix(hdr.Name, WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg { + basename := filepath.Base(hdr.Name) + aufsHardlinks[basename] = hdr + if aufsTempdir == "" { + if aufsTempdir, err = ioutil.TempDir("", "storageplnk"); err != nil { + return 0, err + } + defer os.RemoveAll(aufsTempdir) + } + if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil, options.InUserNS, options.IgnoreChownErrors, options.ForceMask, buffer); err != nil { + return 0, err + } + } + + if hdr.Name != WhiteoutOpaqueDir { + continue + } + } + path := filepath.Join(dest, hdr.Name) + rel, err := filepath.Rel(dest, path) + if err != nil { + return 0, err + } + + // Note as these operations are platform specific, so must the slash be. + if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { + return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) + } + base := filepath.Base(path) + + if strings.HasPrefix(base, WhiteoutPrefix) { + dir := filepath.Dir(path) + if base == WhiteoutOpaqueDir { + _, err := os.Lstat(dir) + if err != nil { + return 0, err + } + err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { + if err != nil { + if os.IsNotExist(err) { + err = nil // parent was deleted + } + return err + } + if path == dir { + return nil + } + if _, exists := unpackedPaths[path]; !exists { + err := os.RemoveAll(path) + return err + } + return nil + }) + if err != nil { + return 0, err + } + } else { + originalBase := base[len(WhiteoutPrefix):] + originalPath := filepath.Join(dir, originalBase) + if err := os.RemoveAll(originalPath); err != nil { + return 0, err + } + } + } else { + // If path exits we almost always just want to remove and replace it. + // The only exception is when it is a directory *and* the file from + // the layer is also a directory. Then we want to merge them (i.e. + // just apply the metadata from the layer). + if fi, err := os.Lstat(path); err == nil { + if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { + if err := os.RemoveAll(path); err != nil { + return 0, err + } + } + } + + trBuf.Reset(tr) + srcData := io.Reader(trBuf) + srcHdr := hdr + + // Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so + // we manually retarget these into the temporary files we extracted them into + if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), WhiteoutLinkDir) { + linkBasename := filepath.Base(hdr.Linkname) + srcHdr = aufsHardlinks[linkBasename] + if srcHdr == nil { + return 0, fmt.Errorf("Invalid aufs hardlink") + } + tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename)) + if err != nil { + return 0, err + } + defer tmpFile.Close() + srcData = tmpFile + } + + if err := remapIDs(nil, idMappings, options.ChownOpts, srcHdr); err != nil { + return 0, err + } + + if err := createTarFile(path, dest, srcHdr, srcData, true, nil, options.InUserNS, options.IgnoreChownErrors, options.ForceMask, buffer); err != nil { + return 0, err + } + + // Directory mtimes must be handled at the end to avoid further + // file creation in them to modify the directory mtime + if hdr.Typeflag == tar.TypeDir { + dirs = append(dirs, hdr) + } + unpackedPaths[path] = struct{}{} + } + } + + for _, hdr := range dirs { + path := filepath.Join(dest, hdr.Name) + if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { + return 0, err + } + } + + return size, nil +} + +// ApplyLayer parses a diff in the standard layer format from `layer`, +// and applies it to the directory `dest`. The stream `layer` can be +// compressed or uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyLayer(dest string, layer io.Reader) (int64, error) { + return applyLayerHandler(dest, layer, &TarOptions{}, true) +} + +// ApplyUncompressedLayer parses a diff in the standard layer format from +// `layer`, and applies it to the directory `dest`. The stream `layer` +// can only be uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyUncompressedLayer(dest string, layer io.Reader, options *TarOptions) (int64, error) { + return applyLayerHandler(dest, layer, options, false) +} + +// do the bulk load of ApplyLayer, but allow for not calling DecompressStream +func applyLayerHandler(dest string, layer io.Reader, options *TarOptions, decompress bool) (int64, error) { + dest = filepath.Clean(dest) + + // We need to be able to set any perms + oldmask, err := system.Umask(0) + if err != nil { + return 0, err + } + defer system.Umask(oldmask) // ignore err, ErrNotSupportedPlatform + + if decompress { + layer, err = DecompressStream(layer) + if err != nil { + return 0, err + } + } + return UnpackLayer(dest, layer, options) +} diff --git a/vendor/github.com/containers/storage/pkg/archive/time_linux.go b/vendor/github.com/containers/storage/pkg/archive/time_linux.go new file mode 100644 index 00000000000..3448569b1eb --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/time_linux.go @@ -0,0 +1,16 @@ +package archive + +import ( + "syscall" + "time" +) + +func timeToTimespec(time time.Time) (ts syscall.Timespec) { + if time.IsZero() { + // Return UTIME_OMIT special value + ts.Sec = 0 + ts.Nsec = ((1 << 30) - 2) + return + } + return syscall.NsecToTimespec(time.UnixNano()) +} diff --git a/vendor/github.com/containers/storage/pkg/archive/time_unsupported.go b/vendor/github.com/containers/storage/pkg/archive/time_unsupported.go new file mode 100644 index 00000000000..e85aac05408 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/time_unsupported.go @@ -0,0 +1,16 @@ +// +build !linux + +package archive + +import ( + "syscall" + "time" +) + +func timeToTimespec(time time.Time) (ts syscall.Timespec) { + nsec := int64(0) + if !time.IsZero() { + nsec = time.UnixNano() + } + return syscall.NsecToTimespec(nsec) +} diff --git a/vendor/github.com/containers/storage/pkg/archive/whiteouts.go b/vendor/github.com/containers/storage/pkg/archive/whiteouts.go new file mode 100644 index 00000000000..d20478a10dc --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/whiteouts.go @@ -0,0 +1,23 @@ +package archive + +// Whiteouts are files with a special meaning for the layered filesystem. +// Docker uses AUFS whiteout files inside exported archives. In other +// filesystems these files are generated/handled on tar creation/extraction. + +// WhiteoutPrefix prefix means file is a whiteout. If this is followed by a +// filename this means that file has been removed from the base layer. +const WhiteoutPrefix = ".wh." + +// WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not +// for removing an actual file. Normally these files are excluded from exported +// archives. +const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix + +// WhiteoutLinkDir is a directory AUFS uses for storing hardlink links to other +// layers. Normally these should not go into exported archives and all changed +// hardlinks should be copied to the top layer. +const WhiteoutLinkDir = WhiteoutMetaPrefix + "plnk" + +// WhiteoutOpaqueDir file means directory has been made opaque - meaning +// readdir calls to this directory do not follow to lower layers. +const WhiteoutOpaqueDir = WhiteoutMetaPrefix + ".opq" diff --git a/vendor/github.com/containers/storage/pkg/archive/wrap.go b/vendor/github.com/containers/storage/pkg/archive/wrap.go new file mode 100644 index 00000000000..b39d12c8780 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/wrap.go @@ -0,0 +1,59 @@ +package archive + +import ( + "archive/tar" + "bytes" + "io" +) + +// Generate generates a new archive from the content provided +// as input. +// +// `files` is a sequence of path/content pairs. A new file is +// added to the archive for each pair. +// If the last pair is incomplete, the file is created with an +// empty content. For example: +// +// Generate("foo.txt", "hello world", "emptyfile") +// +// The above call will return an archive with 2 files: +// * ./foo.txt with content "hello world" +// * ./empty with empty content +// +// FIXME: stream content instead of buffering +// FIXME: specify permissions and other archive metadata +func Generate(input ...string) (io.Reader, error) { + files := parseStringPairs(input...) + buf := new(bytes.Buffer) + tw := tar.NewWriter(buf) + for _, file := range files { + name, content := file[0], file[1] + hdr := &tar.Header{ + Name: name, + Size: int64(len(content)), + } + if err := tw.WriteHeader(hdr); err != nil { + return nil, err + } + if _, err := tw.Write([]byte(content)); err != nil { + return nil, err + } + } + if err := tw.Close(); err != nil { + return nil, err + } + return buf, nil +} + +func parseStringPairs(input ...string) (output [][2]string) { + output = make([][2]string, 0, len(input)/2+1) + for i := 0; i < len(input); i += 2 { + var pair [2]string + pair[0] = input[i] + if i+1 < len(input) { + pair[1] = input[i+1] + } + output = append(output, pair) + } + return +} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go new file mode 100644 index 00000000000..e874eb74e05 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go @@ -0,0 +1,186 @@ +package chrootarchive + +import ( + stdtar "archive/tar" + "fmt" + "io" + "io/ioutil" + "net" + "os" + "os/user" + "path/filepath" + "sync" + + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/idtools" + "github.com/opencontainers/runc/libcontainer/userns" + "github.com/pkg/errors" +) + +func init() { + // initialize nss libraries in Glibc so that the dynamic libraries are loaded in the host + // environment not in the chroot from untrusted files. + _, _ = user.Lookup("storage") + _, _ = net.LookupHost("localhost") +} + +// NewArchiver returns a new Archiver which uses chrootarchive.Untar +func NewArchiver(idMappings *idtools.IDMappings) *archive.Archiver { + archiver := archive.NewArchiver(idMappings) + archiver.Untar = Untar + return archiver +} + +// NewArchiverWithChown returns a new Archiver which uses chrootarchive.Untar and the provided ID mapping configuration on both ends +func NewArchiverWithChown(tarIDMappings *idtools.IDMappings, chownOpts *idtools.IDPair, untarIDMappings *idtools.IDMappings) *archive.Archiver { + archiver := archive.NewArchiverWithChown(tarIDMappings, chownOpts, untarIDMappings) + archiver.Untar = Untar + return archiver +} + +// Untar reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive may be compressed with one of the following algorithms: +// identity (uncompressed), gzip, bzip2, xz. +func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error { + return untarHandler(tarArchive, dest, options, true, dest) +} + +// UntarWithRoot is the same as `Untar`, but allows you to pass in a root directory +// The root directory is the directory that will be chrooted to. +// `dest` must be a path within `root`, if it is not an error will be returned. +// +// `root` should set to a directory which is not controlled by any potentially +// malicious process. +// +// This should be used to prevent a potential attacker from manipulating `dest` +// such that it would provide access to files outside of `dest` through things +// like symlinks. Normally `ResolveSymlinksInScope` would handle this, however +// sanitizing symlinks in this manner is inherrently racey: +// ref: CVE-2018-15664 +func UntarWithRoot(tarArchive io.Reader, dest string, options *archive.TarOptions, root string) error { + return untarHandler(tarArchive, dest, options, true, root) +} + +// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive must be an uncompressed stream. +func UntarUncompressed(tarArchive io.Reader, dest string, options *archive.TarOptions) error { + return untarHandler(tarArchive, dest, options, false, dest) +} + +// Handler for teasing out the automatic decompression +func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions, decompress bool, root string) error { + if tarArchive == nil { + return fmt.Errorf("Empty archive") + } + if options == nil { + options = &archive.TarOptions{} + options.InUserNS = userns.RunningInUserNS() + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + + idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) + rootIDs := idMappings.RootPair() + + dest = filepath.Clean(dest) + if _, err := os.Stat(dest); os.IsNotExist(err) { + if err := idtools.MkdirAllAndChownNew(dest, 0755, rootIDs); err != nil { + return err + } + } + + r := ioutil.NopCloser(tarArchive) + if decompress { + decompressedArchive, err := archive.DecompressStream(tarArchive) + if err != nil { + return err + } + defer decompressedArchive.Close() + r = decompressedArchive + } + + return invokeUnpack(r, dest, options, root) +} + +// Tar tars the requested path while chrooted to the specified root. +func Tar(srcPath string, options *archive.TarOptions, root string) (io.ReadCloser, error) { + if options == nil { + options = &archive.TarOptions{} + } + return invokePack(srcPath, options, root) +} + +// CopyFileWithTarAndChown returns a function which copies a single file from outside +// of any container into our working container, mapping permissions using the +// container's ID maps, possibly overridden using the passed-in chownOpts +func CopyFileWithTarAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap []idtools.IDMap, gidmap []idtools.IDMap) func(src, dest string) error { + untarMappings := idtools.NewIDMappingsFromMaps(uidmap, gidmap) + archiver := NewArchiverWithChown(nil, chownOpts, untarMappings) + if hasher != nil { + originalUntar := archiver.Untar + archiver.Untar = func(tarArchive io.Reader, dest string, options *archive.TarOptions) error { + contentReader, contentWriter, err := os.Pipe() + if err != nil { + return errors.Wrapf(err, "error creating pipe extract data to %q", dest) + } + defer contentReader.Close() + defer contentWriter.Close() + var hashError error + var hashWorker sync.WaitGroup + hashWorker.Add(1) + go func() { + t := stdtar.NewReader(contentReader) + _, err := t.Next() + if err != nil { + hashError = err + } + if _, err = io.Copy(hasher, t); err != nil && err != io.EOF { + hashError = err + } + hashWorker.Done() + }() + if err = originalUntar(io.TeeReader(tarArchive, contentWriter), dest, options); err != nil { + err = errors.Wrapf(err, "error extracting data to %q while copying", dest) + } + hashWorker.Wait() + if err == nil { + err = errors.Wrapf(hashError, "error calculating digest of data for %q while copying", dest) + } + return err + } + } + return archiver.CopyFileWithTar +} + +// CopyWithTarAndChown returns a function which copies a directory tree from outside of +// any container into our working container, mapping permissions using the +// container's ID maps, possibly overridden using the passed-in chownOpts +func CopyWithTarAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap []idtools.IDMap, gidmap []idtools.IDMap) func(src, dest string) error { + untarMappings := idtools.NewIDMappingsFromMaps(uidmap, gidmap) + archiver := NewArchiverWithChown(nil, chownOpts, untarMappings) + if hasher != nil { + originalUntar := archiver.Untar + archiver.Untar = func(tarArchive io.Reader, dest string, options *archive.TarOptions) error { + return originalUntar(io.TeeReader(tarArchive, hasher), dest, options) + } + } + return archiver.CopyWithTar +} + +// UntarPathAndChown returns a function which extracts an archive in a specified +// location into our working container, mapping permissions using the +// container's ID maps, possibly overridden using the passed-in chownOpts +func UntarPathAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap []idtools.IDMap, gidmap []idtools.IDMap) func(src, dest string) error { + untarMappings := idtools.NewIDMappingsFromMaps(uidmap, gidmap) + archiver := NewArchiverWithChown(nil, chownOpts, untarMappings) + if hasher != nil { + originalUntar := archiver.Untar + archiver.Untar = func(tarArchive io.Reader, dest string, options *archive.TarOptions) error { + return originalUntar(io.TeeReader(tarArchive, hasher), dest, options) + } + } + return archiver.UntarPath +} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go new file mode 100644 index 00000000000..9da10fe33cd --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go @@ -0,0 +1,207 @@ +// +build !windows + +package chrootarchive + +import ( + "bytes" + "flag" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/reexec" + "github.com/pkg/errors" +) + +// untar is the entry-point for storage-untar on re-exec. This is not used on +// Windows as it does not support chroot, hence no point sandboxing through +// chroot and rexec. +func untar() { + runtime.LockOSThread() + flag.Parse() + + var options archive.TarOptions + + //read the options from the pipe "ExtraFiles" + if err := json.NewDecoder(os.NewFile(3, "options")).Decode(&options); err != nil { + fatal(err) + } + + dst := flag.Arg(0) + var root string + if len(flag.Args()) > 1 { + root = flag.Arg(1) + } + + if root == "" { + root = dst + } + + if err := chroot(root); err != nil { + fatal(err) + } + + if err := archive.Unpack(os.Stdin, dst, &options); err != nil { + fatal(err) + } + // fully consume stdin in case it is zero padded + if _, err := flush(os.Stdin); err != nil { + fatal(err) + } + + os.Exit(0) +} + +func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.TarOptions, root string) error { + if root == "" { + return errors.New("must specify a root to chroot to") + } + + // We can't pass a potentially large exclude list directly via cmd line + // because we easily overrun the kernel's max argument/environment size + // when the full image list is passed (e.g. when this is used by + // `docker load`). We will marshall the options via a pipe to the + // child + r, w, err := os.Pipe() + if err != nil { + return fmt.Errorf("Untar pipe failure: %v", err) + } + + if root != "" { + relDest, err := filepath.Rel(root, dest) + if err != nil { + return err + } + if relDest == "." { + relDest = "/" + } + if relDest[0] != '/' { + relDest = "/" + relDest + } + dest = relDest + } + + cmd := reexec.Command("storage-untar", dest, root) + cmd.Stdin = decompressedArchive + + cmd.ExtraFiles = append(cmd.ExtraFiles, r) + output := bytes.NewBuffer(nil) + cmd.Stdout = output + cmd.Stderr = output + + if err := cmd.Start(); err != nil { + w.Close() + return fmt.Errorf("Untar error on re-exec cmd: %v", err) + } + + //write the options to the pipe for the untar exec to read + if err := json.NewEncoder(w).Encode(options); err != nil { + w.Close() + return fmt.Errorf("Untar json encode to pipe failed: %v", err) + } + w.Close() + + if err := cmd.Wait(); err != nil { + // when `xz -d -c -q | storage-untar ...` failed on storage-untar side, + // we need to exhaust `xz`'s output, otherwise the `xz` side will be + // pending on write pipe forever + io.Copy(ioutil.Discard, decompressedArchive) + + return fmt.Errorf("Error processing tar file(%v): %s", err, output) + } + return nil +} + +func tar() { + runtime.LockOSThread() + flag.Parse() + + src := flag.Arg(0) + var root string + if len(flag.Args()) > 1 { + root = flag.Arg(1) + } + + if root == "" { + root = src + } + + if err := realChroot(root); err != nil { + fatal(err) + } + + var options archive.TarOptions + if err := json.NewDecoder(os.Stdin).Decode(&options); err != nil { + fatal(err) + } + + rdr, err := archive.TarWithOptions(src, &options) + if err != nil { + fatal(err) + } + defer rdr.Close() + + if _, err := io.Copy(os.Stdout, rdr); err != nil { + fatal(err) + } + + os.Exit(0) +} + +func invokePack(srcPath string, options *archive.TarOptions, root string) (io.ReadCloser, error) { + if root == "" { + return nil, errors.New("root path must not be empty") + } + + relSrc, err := filepath.Rel(root, srcPath) + if err != nil { + return nil, err + } + if relSrc == "." { + relSrc = "/" + } + if relSrc[0] != '/' { + relSrc = "/" + relSrc + } + + // make sure we didn't trim a trailing slash with the call to `Rel` + if strings.HasSuffix(srcPath, "/") && !strings.HasSuffix(relSrc, "/") { + relSrc += "/" + } + + cmd := reexec.Command("storage-tar", relSrc, root) + + errBuff := bytes.NewBuffer(nil) + cmd.Stderr = errBuff + + tarR, tarW := io.Pipe() + cmd.Stdout = tarW + + stdin, err := cmd.StdinPipe() + if err != nil { + return nil, errors.Wrap(err, "error getting options pipe for tar process") + } + + if err := cmd.Start(); err != nil { + return nil, errors.Wrap(err, "tar error on re-exec cmd") + } + + go func() { + err := cmd.Wait() + err = errors.Wrapf(err, "error processing tar file: %s", errBuff) + tarW.CloseWithError(err) + }() + + if err := json.NewEncoder(stdin).Encode(options); err != nil { + stdin.Close() + return nil, errors.Wrap(err, "tar json encode to pipe failed") + } + stdin.Close() + + return tarR, nil +} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_windows.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_windows.go new file mode 100644 index 00000000000..8a5c680b146 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_windows.go @@ -0,0 +1,29 @@ +package chrootarchive + +import ( + "io" + + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/longpath" +) + +// chroot is not supported by Windows +func chroot(path string) error { + return nil +} + +func invokeUnpack(decompressedArchive io.ReadCloser, + dest string, + options *archive.TarOptions, root string) error { + // Windows is different to Linux here because Windows does not support + // chroot. Hence there is no point sandboxing a chrooted process to + // do the unpack. We call inline instead within the daemon process. + return archive.Unpack(decompressedArchive, longpath.AddPrefix(dest), options) +} + +func invokePack(srcPath string, options *archive.TarOptions, root string) (io.ReadCloser, error) { + // Windows is different to Linux here because Windows does not support + // chroot. Hence there is no point sandboxing a chrooted process to + // do the pack. We call inline instead within the daemon process. + return archive.TarWithOptions(srcPath, options) +} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go new file mode 100644 index 00000000000..76c94c6c1e9 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go @@ -0,0 +1,114 @@ +package chrootarchive + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + + "github.com/containers/storage/pkg/mount" + "github.com/syndtr/gocapability/capability" + "golang.org/x/sys/unix" +) + +// chroot on linux uses pivot_root instead of chroot +// pivot_root takes a new root and an old root. +// Old root must be a sub-dir of new root, it is where the current rootfs will reside after the call to pivot_root. +// New root is where the new rootfs is set to. +// Old root is removed after the call to pivot_root so it is no longer available under the new root. +// This is similar to how libcontainer sets up a container's rootfs +func chroot(path string) (err error) { + caps, err := capability.NewPid(0) + if err != nil { + return err + } + + // if the process doesn't have CAP_SYS_ADMIN, but does have CAP_SYS_CHROOT, we need to use the actual chroot + if !caps.Get(capability.EFFECTIVE, capability.CAP_SYS_ADMIN) && caps.Get(capability.EFFECTIVE, capability.CAP_SYS_CHROOT) { + return realChroot(path) + } + + if err := unix.Unshare(unix.CLONE_NEWNS); err != nil { + return fmt.Errorf("Error creating mount namespace before pivot: %v", err) + } + + // make everything in new ns private + if err := mount.MakeRPrivate("/"); err != nil { + return err + } + + if mounted, _ := mount.Mounted(path); !mounted { + if err := mount.Mount(path, path, "bind", "rbind,rw"); err != nil { + return realChroot(path) + } + } + + // setup oldRoot for pivot_root + pivotDir, err := ioutil.TempDir(path, ".pivot_root") + if err != nil { + return fmt.Errorf("Error setting up pivot dir: %v", err) + } + + var mounted bool + defer func() { + if mounted { + // make sure pivotDir is not mounted before we try to remove it + if errCleanup := unix.Unmount(pivotDir, unix.MNT_DETACH); errCleanup != nil { + if err == nil { + err = errCleanup + } + return + } + } + + errCleanup := os.Remove(pivotDir) + // pivotDir doesn't exist if pivot_root failed and chroot+chdir was successful + // because we already cleaned it up on failed pivot_root + if errCleanup != nil && !os.IsNotExist(errCleanup) { + errCleanup = fmt.Errorf("Error cleaning up after pivot: %v", errCleanup) + if err == nil { + err = errCleanup + } + } + }() + + if err := unix.PivotRoot(path, pivotDir); err != nil { + // If pivot fails, fall back to the normal chroot after cleaning up temp dir + if err := os.Remove(pivotDir); err != nil { + return fmt.Errorf("Error cleaning up after failed pivot: %v", err) + } + return realChroot(path) + } + mounted = true + + // This is the new path for where the old root (prior to the pivot) has been moved to + // This dir contains the rootfs of the caller, which we need to remove so it is not visible during extraction + pivotDir = filepath.Join("/", filepath.Base(pivotDir)) + + if err := unix.Chdir("/"); err != nil { + return fmt.Errorf("Error changing to new root: %v", err) + } + + // Make the pivotDir (where the old root lives) private so it can be unmounted without propagating to the host + if err := unix.Mount("", pivotDir, "", unix.MS_PRIVATE|unix.MS_REC, ""); err != nil { + return fmt.Errorf("Error making old root private after pivot: %v", err) + } + + // Now unmount the old root so it's no longer visible from the new root + if err := unix.Unmount(pivotDir, unix.MNT_DETACH); err != nil { + return fmt.Errorf("Error while unmounting old root after pivot: %v", err) + } + mounted = false + + return nil +} + +func realChroot(path string) error { + if err := unix.Chroot(path); err != nil { + return fmt.Errorf("Error after fallback to chroot: %v", err) + } + if err := unix.Chdir("/"); err != nil { + return fmt.Errorf("Error changing to new root after chroot: %v", err) + } + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go new file mode 100644 index 00000000000..83278ee5051 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go @@ -0,0 +1,16 @@ +// +build !windows,!linux + +package chrootarchive + +import "golang.org/x/sys/unix" + +func realChroot(path string) error { + if err := unix.Chroot(path); err != nil { + return err + } + return unix.Chdir("/") +} + +func chroot(path string) error { + return realChroot(path) +} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/diff.go b/vendor/github.com/containers/storage/pkg/chrootarchive/diff.go new file mode 100644 index 00000000000..68b8f74f775 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/diff.go @@ -0,0 +1,23 @@ +package chrootarchive + +import ( + "io" + + "github.com/containers/storage/pkg/archive" +) + +// ApplyLayer parses a diff in the standard layer format from `layer`, +// and applies it to the directory `dest`. The stream `layer` can only be +// uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyLayer(dest string, layer io.Reader) (size int64, err error) { + return applyLayerHandler(dest, layer, &archive.TarOptions{}, true) +} + +// ApplyUncompressedLayer parses a diff in the standard layer format from +// `layer`, and applies it to the directory `dest`. The stream `layer` +// can only be uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyUncompressedLayer(dest string, layer io.Reader, options *archive.TarOptions) (int64, error) { + return applyLayerHandler(dest, layer, options, false) +} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go new file mode 100644 index 00000000000..84253c6aa9b --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go @@ -0,0 +1,129 @@ +//+build !windows + +package chrootarchive + +import ( + "bytes" + "flag" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/reexec" + "github.com/containers/storage/pkg/system" + "github.com/opencontainers/runc/libcontainer/userns" +) + +type applyLayerResponse struct { + LayerSize int64 `json:"layerSize"` +} + +// applyLayer is the entry-point for storage-applylayer on re-exec. This is not +// used on Windows as it does not support chroot, hence no point sandboxing +// through chroot and rexec. +func applyLayer() { + + var ( + tmpDir string + err error + options *archive.TarOptions + ) + runtime.LockOSThread() + flag.Parse() + + inUserns := userns.RunningInUserNS() + if err := chroot(flag.Arg(0)); err != nil { + fatal(err) + } + + // We need to be able to set any perms + oldmask, err := system.Umask(0) + defer system.Umask(oldmask) + if err != nil { + fatal(err) + } + + if err := json.Unmarshal([]byte(os.Getenv("OPT")), &options); err != nil { + fatal(err) + } + + if inUserns { + options.InUserNS = true + } + + if tmpDir, err = ioutil.TempDir("/", "temp-storage-extract"); err != nil { + fatal(err) + } + + os.Setenv("TMPDIR", tmpDir) + size, err := archive.UnpackLayer("/", os.Stdin, options) + os.RemoveAll(tmpDir) + if err != nil { + fatal(err) + } + + encoder := json.NewEncoder(os.Stdout) + if err := encoder.Encode(applyLayerResponse{size}); err != nil { + fatal(fmt.Errorf("unable to encode layerSize JSON: %s", err)) + } + + if _, err := flush(os.Stdin); err != nil { + fatal(err) + } + + os.Exit(0) +} + +// applyLayerHandler parses a diff in the standard layer format from `layer`, and +// applies it to the directory `dest`. Returns the size in bytes of the +// contents of the layer. +func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) { + dest = filepath.Clean(dest) + if decompress { + decompressed, err := archive.DecompressStream(layer) + if err != nil { + return 0, err + } + defer decompressed.Close() + + layer = decompressed + } + if options == nil { + options = &archive.TarOptions{} + if userns.RunningInUserNS() { + options.InUserNS = true + } + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + + data, err := json.Marshal(options) + if err != nil { + return 0, fmt.Errorf("ApplyLayer json encode: %v", err) + } + + cmd := reexec.Command("storage-applyLayer", dest) + cmd.Stdin = layer + cmd.Env = append(cmd.Env, fmt.Sprintf("OPT=%s", data)) + + outBuf, errBuf := new(bytes.Buffer), new(bytes.Buffer) + cmd.Stdout, cmd.Stderr = outBuf, errBuf + + if err = cmd.Run(); err != nil { + return 0, fmt.Errorf("ApplyLayer %s stdout: %s stderr: %s", err, outBuf, errBuf) + } + + // Stdout should be a valid JSON struct representing an applyLayerResponse. + response := applyLayerResponse{} + decoder := json.NewDecoder(outBuf) + if err = decoder.Decode(&response); err != nil { + return 0, fmt.Errorf("unable to decode ApplyLayer JSON response: %s", err) + } + + return response.LayerSize, nil +} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/diff_windows.go b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_windows.go new file mode 100644 index 00000000000..8f8e88bfbea --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_windows.go @@ -0,0 +1,45 @@ +package chrootarchive + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/longpath" +) + +// applyLayerHandler parses a diff in the standard layer format from `layer`, and +// applies it to the directory `dest`. Returns the size in bytes of the +// contents of the layer. +func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) { + dest = filepath.Clean(dest) + + // Ensure it is a Windows-style volume path + dest = longpath.AddPrefix(dest) + + if decompress { + decompressed, err := archive.DecompressStream(layer) + if err != nil { + return 0, err + } + defer decompressed.Close() + + layer = decompressed + } + + tmpDir, err := ioutil.TempDir(os.Getenv("temp"), "temp-storage-extract") + if err != nil { + return 0, fmt.Errorf("ApplyLayer failed to create temp-storage-extract under %s. %s", dest, err) + } + + s, err := archive.UnpackLayer(dest, layer, nil) + os.RemoveAll(tmpDir) + if err != nil { + return 0, fmt.Errorf("ApplyLayer %s failed UnpackLayer to %s: %s", layer, dest, err) + } + + return s, nil +} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go new file mode 100644 index 00000000000..ea08135e4d5 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go @@ -0,0 +1,29 @@ +// +build !windows + +package chrootarchive + +import ( + "fmt" + "io" + "io/ioutil" + "os" + + "github.com/containers/storage/pkg/reexec" +) + +func init() { + reexec.Register("storage-applyLayer", applyLayer) + reexec.Register("storage-untar", untar) + reexec.Register("storage-tar", tar) +} + +func fatal(err error) { + fmt.Fprint(os.Stderr, err) + os.Exit(1) +} + +// flush consumes all the bytes from the reader discarding +// any errors +func flush(r io.Reader) (bytes int64, err error) { + return io.Copy(ioutil.Discard, r) +} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/init_windows.go b/vendor/github.com/containers/storage/pkg/chrootarchive/init_windows.go new file mode 100644 index 00000000000..fa17c9bf831 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/init_windows.go @@ -0,0 +1,4 @@ +package chrootarchive + +func init() { +} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/jsoniter.go b/vendor/github.com/containers/storage/pkg/chrootarchive/jsoniter.go new file mode 100644 index 00000000000..63f9704564c --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/jsoniter.go @@ -0,0 +1,5 @@ +package chrootarchive + +import jsoniter "github.com/json-iterator/go" + +var json = jsoniter.ConfigCompatibleWithStandardLibrary diff --git a/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go b/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go new file mode 100644 index 00000000000..a931fb5d137 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go @@ -0,0 +1,630 @@ +package chunked + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + "io/ioutil" + "os" + "sort" + "strconv" + "strings" + "sync" + "time" + "unsafe" + + storage "github.com/containers/storage" + "github.com/containers/storage/pkg/chunked/internal" + "github.com/containers/storage/pkg/ioutils" + jsoniter "github.com/json-iterator/go" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const ( + cacheKey = "chunked-manifest-cache" + cacheVersion = 1 +) + +type metadata struct { + tagLen int + digestLen int + tags []byte + vdata []byte +} + +type layer struct { + id string + metadata *metadata + target string +} + +type layersCache struct { + layers []layer + refs int + store storage.Store + mutex sync.RWMutex + created time.Time +} + +var cacheMutex sync.Mutex +var cache *layersCache + +func (c *layersCache) release() { + cacheMutex.Lock() + defer cacheMutex.Unlock() + + c.refs-- + if c.refs == 0 { + cache = nil + } +} + +func getLayersCacheRef(store storage.Store) *layersCache { + cacheMutex.Lock() + defer cacheMutex.Unlock() + if cache != nil && cache.store == store && time.Since(cache.created).Minutes() < 10 { + cache.refs++ + return cache + } + cache := &layersCache{ + store: store, + refs: 1, + created: time.Now(), + } + return cache +} + +func getLayersCache(store storage.Store) (*layersCache, error) { + c := getLayersCacheRef(store) + + if err := c.load(); err != nil { + c.release() + return nil, err + } + return c, nil +} + +func (c *layersCache) load() error { + c.mutex.Lock() + defer c.mutex.Unlock() + + allLayers, err := c.store.Layers() + if err != nil { + return err + } + existingLayers := make(map[string]string) + for _, r := range c.layers { + existingLayers[r.id] = r.target + } + + currentLayers := make(map[string]string) + for _, r := range allLayers { + currentLayers[r.ID] = r.ID + if _, found := existingLayers[r.ID]; found { + continue + } + + bigData, err := c.store.LayerBigData(r.ID, cacheKey) + if err != nil { + if errors.Cause(err) == os.ErrNotExist { + continue + } + return err + } + defer bigData.Close() + + metadata, err := readMetadataFromCache(bigData) + if err != nil { + logrus.Warningf("Error reading cache file for layer %q: %v", r.ID, err) + } + + if metadata != nil { + c.addLayer(r.ID, metadata) + continue + } + + manifestReader, err := c.store.LayerBigData(r.ID, bigDataKey) + if err != nil { + continue + } + defer manifestReader.Close() + manifest, err := ioutil.ReadAll(manifestReader) + if err != nil { + return fmt.Errorf("open manifest file for layer %q: %w", r.ID, err) + } + + metadata, err = writeCache(manifest, r.ID, c.store) + if err == nil { + c.addLayer(r.ID, metadata) + } + } + + var newLayers []layer + for _, l := range c.layers { + if _, found := currentLayers[l.id]; found { + newLayers = append(newLayers, l) + } + } + c.layers = newLayers + + return nil +} + +// calculateHardLinkFingerprint calculates a hash that can be used to verify if a file +// is usable for deduplication with hardlinks. +// To calculate the digest, it uses the file payload digest, UID, GID, mode and xattrs. +func calculateHardLinkFingerprint(f *internal.FileMetadata) (string, error) { + digester := digest.Canonical.Digester() + + modeString := fmt.Sprintf("%d:%d:%o", f.UID, f.GID, f.Mode) + hash := digester.Hash() + + if _, err := hash.Write([]byte(f.Digest)); err != nil { + return "", err + } + + if _, err := hash.Write([]byte(modeString)); err != nil { + return "", err + } + + if len(f.Xattrs) > 0 { + keys := make([]string, 0, len(f.Xattrs)) + for k := range f.Xattrs { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + if _, err := hash.Write([]byte(k)); err != nil { + return "", err + } + if _, err := hash.Write([]byte(f.Xattrs[k])); err != nil { + return "", err + } + } + } + return string(digester.Digest()), nil +} + +// generateFileLocation generates a file location in the form $OFFSET@$PATH +func generateFileLocation(path string, offset uint64) []byte { + return []byte(fmt.Sprintf("%d@%s", offset, path)) +} + +// generateTag generates a tag in the form $DIGEST$OFFSET@LEN. +// the [OFFSET; LEN] points to the variable length data where the file locations +// are stored. $DIGEST has length digestLen stored in the metadata file header. +func generateTag(digest string, offset, len uint64) string { + return fmt.Sprintf("%s%.20d@%.20d", digest, offset, len) +} + +type setBigData interface { + // SetLayerBigData stores a (possibly large) chunk of named data + SetLayerBigData(id, key string, data io.Reader) error +} + +// writeCache write a cache for the layer ID. +// It generates a sorted list of digests with their offset to the path location and offset. +// The same cache is used to lookup files, chunks and candidates for deduplication with hard links. +// There are 3 kind of digests stored: +// - digest(file.payload)) +// - digest(digest(file.payload) + file.UID + file.GID + file.mode + file.xattrs) +// - digest(i) for each i in chunks(file payload) +func writeCache(manifest []byte, id string, dest setBigData) (*metadata, error) { + var vdata bytes.Buffer + tagLen := 0 + digestLen := 0 + var tagsBuffer bytes.Buffer + + toc, err := prepareMetadata(manifest) + if err != nil { + return nil, err + } + + var tags []string + for _, k := range toc { + if k.Digest != "" { + location := generateFileLocation(k.Name, 0) + + off := uint64(vdata.Len()) + l := uint64(len(location)) + + d := generateTag(k.Digest, off, l) + if tagLen == 0 { + tagLen = len(d) + } + if tagLen != len(d) { + return nil, errors.New("digest with different length found") + } + tags = append(tags, d) + + fp, err := calculateHardLinkFingerprint(k) + if err != nil { + return nil, err + } + d = generateTag(fp, off, l) + if tagLen != len(d) { + return nil, errors.New("digest with different length found") + } + tags = append(tags, d) + + if _, err := vdata.Write(location); err != nil { + return nil, err + } + + digestLen = len(k.Digest) + } + if k.ChunkDigest != "" { + location := generateFileLocation(k.Name, uint64(k.ChunkOffset)) + off := uint64(vdata.Len()) + l := uint64(len(location)) + d := generateTag(k.ChunkDigest, off, l) + if tagLen == 0 { + tagLen = len(d) + } + if tagLen != len(d) { + return nil, errors.New("digest with different length found") + } + tags = append(tags, d) + + if _, err := vdata.Write(location); err != nil { + return nil, err + } + digestLen = len(k.ChunkDigest) + } + } + + sort.Strings(tags) + + for _, t := range tags { + if _, err := tagsBuffer.Write([]byte(t)); err != nil { + return nil, err + } + } + + pipeReader, pipeWriter := io.Pipe() + errChan := make(chan error, 1) + go func() { + defer pipeWriter.Close() + defer close(errChan) + + // version + if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(cacheVersion)); err != nil { + errChan <- err + return + } + + // len of a tag + if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(tagLen)); err != nil { + errChan <- err + return + } + + // len of a digest + if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(digestLen)); err != nil { + errChan <- err + return + } + + // tags length + if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(tagsBuffer.Len())); err != nil { + errChan <- err + return + } + + // vdata length + if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(vdata.Len())); err != nil { + errChan <- err + return + } + + // tags + if _, err := pipeWriter.Write(tagsBuffer.Bytes()); err != nil { + errChan <- err + return + } + + // variable length data + if _, err := pipeWriter.Write(vdata.Bytes()); err != nil { + errChan <- err + return + } + + errChan <- nil + }() + defer pipeReader.Close() + + counter := ioutils.NewWriteCounter(ioutil.Discard) + + r := io.TeeReader(pipeReader, counter) + + if err := dest.SetLayerBigData(id, cacheKey, r); err != nil { + return nil, err + } + + if err := <-errChan; err != nil { + return nil, err + } + + logrus.Debugf("Written lookaside cache for layer %q with length %v", id, counter.Count) + + return &metadata{ + digestLen: digestLen, + tagLen: tagLen, + tags: tagsBuffer.Bytes(), + vdata: vdata.Bytes(), + }, nil +} + +func readMetadataFromCache(bigData io.Reader) (*metadata, error) { + var version, tagLen, digestLen, tagsLen, vdataLen uint64 + if err := binary.Read(bigData, binary.LittleEndian, &version); err != nil { + return nil, err + } + if version != cacheVersion { + return nil, nil + } + if err := binary.Read(bigData, binary.LittleEndian, &tagLen); err != nil { + return nil, err + } + if err := binary.Read(bigData, binary.LittleEndian, &digestLen); err != nil { + return nil, err + } + if err := binary.Read(bigData, binary.LittleEndian, &tagsLen); err != nil { + return nil, err + } + if err := binary.Read(bigData, binary.LittleEndian, &vdataLen); err != nil { + return nil, err + } + + tags := make([]byte, tagsLen) + if _, err := bigData.Read(tags); err != nil { + return nil, err + } + + vdata := make([]byte, vdataLen) + if _, err := bigData.Read(vdata); err != nil { + return nil, err + } + + return &metadata{ + tagLen: int(tagLen), + digestLen: int(digestLen), + tags: tags, + vdata: vdata, + }, nil +} + +func prepareMetadata(manifest []byte) ([]*internal.FileMetadata, error) { + toc, err := unmarshalToc(manifest) + if err != nil { + // ignore errors here. They might be caused by a different manifest format. + return nil, nil + } + + var r []*internal.FileMetadata + chunkSeen := make(map[string]bool) + for i := range toc.Entries { + d := toc.Entries[i].Digest + if d != "" { + r = append(r, &toc.Entries[i]) + continue + } + + // chunks do not use hard link dedup so keeping just one candidate is enough + cd := toc.Entries[i].ChunkDigest + if cd != "" && !chunkSeen[cd] { + r = append(r, &toc.Entries[i]) + chunkSeen[cd] = true + } + } + return r, nil +} + +func (c *layersCache) addLayer(id string, metadata *metadata) error { + target, err := c.store.DifferTarget(id) + if err != nil { + return fmt.Errorf("get checkout directory layer %q: %w", id, err) + } + + l := layer{ + id: id, + metadata: metadata, + target: target, + } + c.layers = append(c.layers, l) + return nil +} + +func byteSliceAsString(b []byte) string { + return *(*string)(unsafe.Pointer(&b)) +} + +func findTag(digest string, metadata *metadata) (string, uint64, uint64) { + if len(digest) != metadata.digestLen { + return "", 0, 0 + } + + nElements := len(metadata.tags) / metadata.tagLen + + i := sort.Search(nElements, func(i int) bool { + d := byteSliceAsString(metadata.tags[i*metadata.tagLen : i*metadata.tagLen+metadata.digestLen]) + return strings.Compare(d, digest) >= 0 + }) + if i < nElements { + d := string(metadata.tags[i*metadata.tagLen : i*metadata.tagLen+len(digest)]) + if digest == d { + startOff := i*metadata.tagLen + metadata.digestLen + parts := strings.Split(string(metadata.tags[startOff:(i+1)*metadata.tagLen]), "@") + off, _ := strconv.ParseInt(parts[0], 10, 64) + len, _ := strconv.ParseInt(parts[1], 10, 64) + return digest, uint64(off), uint64(len) + } + } + return "", 0, 0 +} + +func (c *layersCache) findDigestInternal(digest string) (string, string, int64, error) { + if digest == "" { + return "", "", -1, nil + } + + c.mutex.RLock() + defer c.mutex.RUnlock() + + for _, layer := range c.layers { + digest, off, len := findTag(digest, layer.metadata) + if digest != "" { + position := string(layer.metadata.vdata[off : off+len]) + parts := strings.SplitN(position, "@", 2) + offFile, _ := strconv.ParseInt(parts[0], 10, 64) + return layer.target, parts[1], offFile, nil + } + } + + return "", "", -1, nil +} + +// findFileInOtherLayers finds the specified file in other layers. +// file is the file to look for. +func (c *layersCache) findFileInOtherLayers(file *internal.FileMetadata, useHardLinks bool) (string, string, error) { + digest := file.Digest + if useHardLinks { + var err error + digest, err = calculateHardLinkFingerprint(file) + if err != nil { + return "", "", err + } + } + target, name, off, err := c.findDigestInternal(digest) + if off == 0 { + return target, name, err + } + return "", "", nil +} + +func (c *layersCache) findChunkInOtherLayers(chunk *internal.FileMetadata) (string, string, int64, error) { + return c.findDigestInternal(chunk.ChunkDigest) +} + +func unmarshalToc(manifest []byte) (*internal.TOC, error) { + var buf bytes.Buffer + count := 0 + var toc internal.TOC + + iter := jsoniter.ParseBytes(jsoniter.ConfigFastest, manifest) + for field := iter.ReadObject(); field != ""; field = iter.ReadObject() { + if field != "entries" { + iter.Skip() + continue + } + for iter.ReadArray() { + for field := iter.ReadObject(); field != ""; field = iter.ReadObject() { + switch field { + case "type", "name", "linkName", "digest", "chunkDigest", "chunkType": + count += len(iter.ReadStringAsSlice()) + case "xattrs": + for key := iter.ReadObject(); key != ""; key = iter.ReadObject() { + count += len(iter.ReadStringAsSlice()) + } + default: + iter.Skip() + } + } + } + break + } + + buf.Grow(count) + + getString := func(b []byte) string { + from := buf.Len() + buf.Write(b) + to := buf.Len() + return byteSliceAsString(buf.Bytes()[from:to]) + } + + iter = jsoniter.ParseBytes(jsoniter.ConfigFastest, manifest) + for field := iter.ReadObject(); field != ""; field = iter.ReadObject() { + if field == "version" { + toc.Version = iter.ReadInt() + continue + } + if field != "entries" { + iter.Skip() + continue + } + for iter.ReadArray() { + var m internal.FileMetadata + for field := iter.ReadObject(); field != ""; field = iter.ReadObject() { + switch field { + case "type": + m.Type = getString(iter.ReadStringAsSlice()) + case "name": + m.Name = getString(iter.ReadStringAsSlice()) + case "linkName": + m.Linkname = getString(iter.ReadStringAsSlice()) + case "mode": + m.Mode = iter.ReadInt64() + case "size": + m.Size = iter.ReadInt64() + case "UID": + m.UID = iter.ReadInt() + case "GID": + m.GID = iter.ReadInt() + case "ModTime": + time, err := time.Parse(time.RFC3339, byteSliceAsString(iter.ReadStringAsSlice())) + if err != nil { + return nil, err + } + m.ModTime = &time + case "accesstime": + time, err := time.Parse(time.RFC3339, byteSliceAsString(iter.ReadStringAsSlice())) + if err != nil { + return nil, err + } + m.AccessTime = &time + case "changetime": + time, err := time.Parse(time.RFC3339, byteSliceAsString(iter.ReadStringAsSlice())) + if err != nil { + return nil, err + } + m.ChangeTime = &time + case "devMajor": + m.Devmajor = iter.ReadInt64() + case "devMinor": + m.Devminor = iter.ReadInt64() + case "digest": + m.Digest = getString(iter.ReadStringAsSlice()) + case "offset": + m.Offset = iter.ReadInt64() + case "endOffset": + m.EndOffset = iter.ReadInt64() + case "chunkSize": + m.ChunkSize = iter.ReadInt64() + case "chunkOffset": + m.ChunkOffset = iter.ReadInt64() + case "chunkDigest": + m.ChunkDigest = getString(iter.ReadStringAsSlice()) + case "chunkType": + m.ChunkType = getString(iter.ReadStringAsSlice()) + case "xattrs": + m.Xattrs = make(map[string]string) + for key := iter.ReadObject(); key != ""; key = iter.ReadObject() { + value := iter.ReadStringAsSlice() + m.Xattrs[key] = getString(value) + } + default: + iter.Skip() + } + } + toc.Entries = append(toc.Entries, m) + } + break + } + toc.StringsBuf = buf + return &toc, nil +} diff --git a/vendor/github.com/containers/storage/pkg/chunked/compression.go b/vendor/github.com/containers/storage/pkg/chunked/compression.go new file mode 100644 index 00000000000..96254bc4e54 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chunked/compression.go @@ -0,0 +1,277 @@ +package chunked + +import ( + archivetar "archive/tar" + "bytes" + "encoding/binary" + "fmt" + "io" + "strconv" + + "github.com/containerd/stargz-snapshotter/estargz" + "github.com/containers/storage/pkg/chunked/compressor" + "github.com/containers/storage/pkg/chunked/internal" + "github.com/klauspost/compress/zstd" + "github.com/klauspost/pgzip" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/vbatts/tar-split/archive/tar" +) + +const ( + TypeReg = internal.TypeReg + TypeChunk = internal.TypeChunk + TypeLink = internal.TypeLink + TypeChar = internal.TypeChar + TypeBlock = internal.TypeBlock + TypeDir = internal.TypeDir + TypeFifo = internal.TypeFifo + TypeSymlink = internal.TypeSymlink +) + +var typesToTar = map[string]byte{ + TypeReg: tar.TypeReg, + TypeLink: tar.TypeLink, + TypeChar: tar.TypeChar, + TypeBlock: tar.TypeBlock, + TypeDir: tar.TypeDir, + TypeFifo: tar.TypeFifo, + TypeSymlink: tar.TypeSymlink, +} + +func typeToTarType(t string) (byte, error) { + r, found := typesToTar[t] + if !found { + return 0, fmt.Errorf("unknown type: %v", t) + } + return r, nil +} + +func isZstdChunkedFrameMagic(data []byte) bool { + if len(data) < 8 { + return false + } + return bytes.Equal(internal.ZstdChunkedFrameMagic, data[:8]) +} + +func readEstargzChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, annotations map[string]string) ([]byte, int64, error) { + // information on the format here https://github.com/containerd/stargz-snapshotter/blob/main/docs/stargz-estargz.md + footerSize := int64(51) + if blobSize <= footerSize { + return nil, 0, errors.New("blob too small") + } + chunk := ImageSourceChunk{ + Offset: uint64(blobSize - footerSize), + Length: uint64(footerSize), + } + parts, errs, err := blobStream.GetBlobAt([]ImageSourceChunk{chunk}) + if err != nil { + return nil, 0, err + } + var reader io.ReadCloser + select { + case r := <-parts: + reader = r + case err := <-errs: + return nil, 0, err + } + defer reader.Close() + footer := make([]byte, footerSize) + if _, err := io.ReadFull(reader, footer); err != nil { + return nil, 0, err + } + + /* Read the ToC offset: + - 10 bytes gzip header + - 2 bytes XLEN (length of Extra field) = 26 (4 bytes header + 16 hex digits + len("STARGZ")) + - 2 bytes Extra: SI1 = 'S', SI2 = 'G' + - 2 bytes Extra: LEN = 22 (16 hex digits + len("STARGZ")) + - 22 bytes Extra: subfield = fmt.Sprintf("%016xSTARGZ", offsetOfTOC) + - 5 bytes flate header: BFINAL = 1(last block), BTYPE = 0(non-compressed block), LEN = 0 + - 8 bytes gzip footer + */ + tocOffset, err := strconv.ParseInt(string(footer[16:16+22-6]), 16, 64) + if err != nil { + return nil, 0, errors.Wrap(err, "parse ToC offset") + } + + size := int64(blobSize - footerSize - tocOffset) + // set a reasonable limit + if size > (1<<20)*50 { + return nil, 0, errors.New("manifest too big") + } + + chunk = ImageSourceChunk{ + Offset: uint64(tocOffset), + Length: uint64(size), + } + parts, errs, err = blobStream.GetBlobAt([]ImageSourceChunk{chunk}) + if err != nil { + return nil, 0, err + } + + var tocReader io.ReadCloser + select { + case r := <-parts: + tocReader = r + case err := <-errs: + return nil, 0, err + } + defer tocReader.Close() + + r, err := pgzip.NewReader(tocReader) + if err != nil { + return nil, 0, err + } + defer r.Close() + + aTar := archivetar.NewReader(r) + + header, err := aTar.Next() + if err != nil { + return nil, 0, err + } + // set a reasonable limit + if header.Size > (1<<20)*50 { + return nil, 0, errors.New("manifest too big") + } + + manifestUncompressed := make([]byte, header.Size) + if _, err := io.ReadFull(aTar, manifestUncompressed); err != nil { + return nil, 0, err + } + + manifestDigester := digest.Canonical.Digester() + manifestChecksum := manifestDigester.Hash() + if _, err := manifestChecksum.Write(manifestUncompressed); err != nil { + return nil, 0, err + } + + d, err := digest.Parse(annotations[estargz.TOCJSONDigestAnnotation]) + if err != nil { + return nil, 0, err + } + if manifestDigester.Digest() != d { + return nil, 0, errors.New("invalid manifest checksum") + } + + return manifestUncompressed, tocOffset, nil +} + +// readZstdChunkedManifest reads the zstd:chunked manifest from the seekable stream blobStream. The blob total size must +// be specified. +// This function uses the io.containers.zstd-chunked. annotations when specified. +func readZstdChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, annotations map[string]string) ([]byte, int64, error) { + footerSize := int64(internal.FooterSizeSupported) + if blobSize <= footerSize { + return nil, 0, errors.New("blob too small") + } + + manifestChecksumAnnotation := annotations[internal.ManifestChecksumKey] + if manifestChecksumAnnotation == "" { + return nil, 0, fmt.Errorf("manifest checksum annotation %q not found", internal.ManifestChecksumKey) + } + + var offset, length, lengthUncompressed, manifestType uint64 + + if offsetMetadata := annotations[internal.ManifestInfoKey]; offsetMetadata != "" { + if _, err := fmt.Sscanf(offsetMetadata, "%d:%d:%d:%d", &offset, &length, &lengthUncompressed, &manifestType); err != nil { + return nil, 0, err + } + } else { + chunk := ImageSourceChunk{ + Offset: uint64(blobSize - footerSize), + Length: uint64(footerSize), + } + parts, errs, err := blobStream.GetBlobAt([]ImageSourceChunk{chunk}) + if err != nil { + return nil, 0, err + } + var reader io.ReadCloser + select { + case r := <-parts: + reader = r + case err := <-errs: + return nil, 0, err + } + footer := make([]byte, footerSize) + if _, err := io.ReadFull(reader, footer); err != nil { + return nil, 0, err + } + + offset = binary.LittleEndian.Uint64(footer[0:8]) + length = binary.LittleEndian.Uint64(footer[8:16]) + lengthUncompressed = binary.LittleEndian.Uint64(footer[16:24]) + manifestType = binary.LittleEndian.Uint64(footer[24:32]) + if !isZstdChunkedFrameMagic(footer[32:40]) { + return nil, 0, errors.New("invalid magic number") + } + } + + if manifestType != internal.ManifestTypeCRFS { + return nil, 0, errors.New("invalid manifest type") + } + + // set a reasonable limit + if length > (1<<20)*50 { + return nil, 0, errors.New("manifest too big") + } + if lengthUncompressed > (1<<20)*50 { + return nil, 0, errors.New("manifest too big") + } + + chunk := ImageSourceChunk{ + Offset: offset, + Length: length, + } + + parts, errs, err := blobStream.GetBlobAt([]ImageSourceChunk{chunk}) + if err != nil { + return nil, 0, err + } + var reader io.ReadCloser + select { + case r := <-parts: + reader = r + case err := <-errs: + return nil, 0, err + } + + manifest := make([]byte, length) + if _, err := io.ReadFull(reader, manifest); err != nil { + return nil, 0, err + } + + manifestDigester := digest.Canonical.Digester() + manifestChecksum := manifestDigester.Hash() + if _, err := manifestChecksum.Write(manifest); err != nil { + return nil, 0, err + } + + d, err := digest.Parse(manifestChecksumAnnotation) + if err != nil { + return nil, 0, err + } + if manifestDigester.Digest() != d { + return nil, 0, errors.New("invalid manifest checksum") + } + + decoder, err := zstd.NewReader(nil) + if err != nil { + return nil, 0, err + } + defer decoder.Close() + + b := make([]byte, 0, lengthUncompressed) + if decoded, err := decoder.DecodeAll(manifest, b); err == nil { + return decoded, int64(offset), nil + } + + return manifest, int64(offset), nil +} + +// ZstdCompressor is a CompressorFunc for the zstd compression algorithm. +// Deprecated: Use pkg/chunked/compressor.ZstdCompressor. +func ZstdCompressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) { + return compressor.ZstdCompressor(r, metadata, level) +} diff --git a/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go b/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go new file mode 100644 index 00000000000..aeb7cfd4f01 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go @@ -0,0 +1,454 @@ +package compressor + +// NOTE: This is used from github.com/containers/image by callers that +// don't otherwise use containers/storage, so don't make this depend on any +// larger software like the graph drivers. + +import ( + "bufio" + "encoding/base64" + "io" + "io/ioutil" + + "github.com/containers/storage/pkg/chunked/internal" + "github.com/containers/storage/pkg/ioutils" + "github.com/opencontainers/go-digest" + "github.com/vbatts/tar-split/archive/tar" +) + +const RollsumBits = 16 +const holesThreshold = int64(1 << 10) + +type holesFinder struct { + reader *bufio.Reader + fileOff int64 + zeros int64 + from int64 + threshold int64 + + state int +} + +const ( + holesFinderStateRead = iota + holesFinderStateAccumulate + holesFinderStateFound + holesFinderStateEOF +) + +// ReadByte reads a single byte from the underlying reader. +// If a single byte is read, the return value is (0, RAW-BYTE-VALUE, nil). +// If there are at least f.THRESHOLD consecutive zeros, then the +// return value is (N_CONSECUTIVE_ZEROS, '\x00'). +func (f *holesFinder) ReadByte() (int64, byte, error) { + for { + switch f.state { + // reading the file stream + case holesFinderStateRead: + if f.zeros > 0 { + f.zeros-- + return 0, 0, nil + } + b, err := f.reader.ReadByte() + if err != nil { + return 0, b, err + } + + if b != 0 { + return 0, b, err + } + + f.zeros = 1 + if f.zeros == f.threshold { + f.state = holesFinderStateFound + } else { + f.state = holesFinderStateAccumulate + } + // accumulating zeros, but still didn't reach the threshold + case holesFinderStateAccumulate: + b, err := f.reader.ReadByte() + if err != nil { + if err == io.EOF { + f.state = holesFinderStateEOF + continue + } + return 0, b, err + } + + if b == 0 { + f.zeros++ + if f.zeros == f.threshold { + f.state = holesFinderStateFound + } + } else { + if f.reader.UnreadByte(); err != nil { + return 0, 0, err + } + f.state = holesFinderStateRead + } + // found a hole. Number of zeros >= threshold + case holesFinderStateFound: + b, err := f.reader.ReadByte() + if err != nil { + if err == io.EOF { + f.state = holesFinderStateEOF + } + holeLen := f.zeros + f.zeros = 0 + return holeLen, 0, nil + } + if b != 0 { + if f.reader.UnreadByte(); err != nil { + return 0, 0, err + } + f.state = holesFinderStateRead + + holeLen := f.zeros + f.zeros = 0 + return holeLen, 0, nil + } + f.zeros++ + // reached EOF. Flush pending zeros if any. + case holesFinderStateEOF: + if f.zeros > 0 { + f.zeros-- + return 0, 0, nil + } + return 0, 0, io.EOF + } + } +} + +type rollingChecksumReader struct { + reader *holesFinder + closed bool + rollsum *RollSum + pendingHole int64 + + // WrittenOut is the total number of bytes read from + // the stream. + WrittenOut int64 + + // IsLastChunkZeros tells whether the last generated + // chunk is a hole (made of consecutive zeros). If it + // is false, then the last chunk is a data chunk + // generated by the rolling checksum. + IsLastChunkZeros bool +} + +func (rc *rollingChecksumReader) Read(b []byte) (bool, int, error) { + rc.IsLastChunkZeros = false + + if rc.pendingHole > 0 { + toCopy := int64(len(b)) + if rc.pendingHole < toCopy { + toCopy = rc.pendingHole + } + rc.pendingHole -= toCopy + for i := int64(0); i < toCopy; i++ { + b[i] = 0 + } + + rc.WrittenOut += toCopy + + rc.IsLastChunkZeros = true + + // if there are no other zeros left, terminate the chunk + return rc.pendingHole == 0, int(toCopy), nil + } + + if rc.closed { + return false, 0, io.EOF + } + + for i := 0; i < len(b); i++ { + holeLen, n, err := rc.reader.ReadByte() + if err != nil { + if err == io.EOF { + rc.closed = true + if i == 0 { + return false, 0, err + } + return false, i, nil + } + // Report any other error type + return false, -1, err + } + if holeLen > 0 { + for j := int64(0); j < holeLen; j++ { + rc.rollsum.Roll(0) + } + rc.pendingHole = holeLen + return true, i, nil + } + b[i] = n + rc.WrittenOut++ + rc.rollsum.Roll(n) + if rc.rollsum.OnSplitWithBits(RollsumBits) { + return true, i + 1, nil + } + } + return false, len(b), nil +} + +type chunk struct { + ChunkOffset int64 + Offset int64 + Checksum string + ChunkSize int64 + ChunkType string +} + +func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, reader io.Reader, level int) error { + // total written so far. Used to retrieve partial offsets in the file + dest := ioutils.NewWriteCounter(destFile) + + tr := tar.NewReader(reader) + tr.RawAccounting = true + + buf := make([]byte, 4096) + + zstdWriter, err := internal.ZstdWriterWithLevel(dest, level) + if err != nil { + return err + } + defer func() { + if zstdWriter != nil { + zstdWriter.Close() + zstdWriter.Flush() + } + }() + + restartCompression := func() (int64, error) { + var offset int64 + if zstdWriter != nil { + if err := zstdWriter.Close(); err != nil { + return 0, err + } + if err := zstdWriter.Flush(); err != nil { + return 0, err + } + offset = dest.Count + zstdWriter.Reset(dest) + } + return offset, nil + } + + var metadata []internal.FileMetadata + for { + hdr, err := tr.Next() + if err != nil { + if err == io.EOF { + break + } + return err + } + + rawBytes := tr.RawBytes() + if _, err := zstdWriter.Write(rawBytes); err != nil { + return err + } + + payloadDigester := digest.Canonical.Digester() + chunkDigester := digest.Canonical.Digester() + + // Now handle the payload, if any + startOffset := int64(0) + lastOffset := int64(0) + lastChunkOffset := int64(0) + + checksum := "" + + chunks := []chunk{} + + hf := &holesFinder{ + threshold: holesThreshold, + reader: bufio.NewReader(tr), + } + + rcReader := &rollingChecksumReader{ + reader: hf, + rollsum: NewRollSum(), + } + + payloadDest := io.MultiWriter(payloadDigester.Hash(), chunkDigester.Hash(), zstdWriter) + for { + mustSplit, read, errRead := rcReader.Read(buf) + if errRead != nil && errRead != io.EOF { + return err + } + // restart the compression only if there is a payload. + if read > 0 { + if startOffset == 0 { + startOffset, err = restartCompression() + if err != nil { + return err + } + lastOffset = startOffset + } + + if _, err := payloadDest.Write(buf[:read]); err != nil { + return err + } + } + if (mustSplit || errRead == io.EOF) && startOffset > 0 { + off, err := restartCompression() + if err != nil { + return err + } + + chunkSize := rcReader.WrittenOut - lastChunkOffset + if chunkSize > 0 { + chunkType := internal.ChunkTypeData + if rcReader.IsLastChunkZeros { + chunkType = internal.ChunkTypeZeros + } + + chunks = append(chunks, chunk{ + ChunkOffset: lastChunkOffset, + Offset: lastOffset, + Checksum: chunkDigester.Digest().String(), + ChunkSize: chunkSize, + ChunkType: chunkType, + }) + } + + lastOffset = off + lastChunkOffset = rcReader.WrittenOut + chunkDigester = digest.Canonical.Digester() + payloadDest = io.MultiWriter(payloadDigester.Hash(), chunkDigester.Hash(), zstdWriter) + } + if errRead == io.EOF { + if startOffset > 0 { + checksum = payloadDigester.Digest().String() + } + break + } + } + + typ, err := internal.GetType(hdr.Typeflag) + if err != nil { + return err + } + xattrs := make(map[string]string) + for k, v := range hdr.Xattrs { + xattrs[k] = base64.StdEncoding.EncodeToString([]byte(v)) + } + entries := []internal.FileMetadata{ + { + Type: typ, + Name: hdr.Name, + Linkname: hdr.Linkname, + Mode: hdr.Mode, + Size: hdr.Size, + UID: hdr.Uid, + GID: hdr.Gid, + ModTime: &hdr.ModTime, + AccessTime: &hdr.AccessTime, + ChangeTime: &hdr.ChangeTime, + Devmajor: hdr.Devmajor, + Devminor: hdr.Devminor, + Xattrs: xattrs, + Digest: checksum, + Offset: startOffset, + EndOffset: lastOffset, + }, + } + for i := 1; i < len(chunks); i++ { + entries = append(entries, internal.FileMetadata{ + Type: internal.TypeChunk, + Name: hdr.Name, + ChunkOffset: chunks[i].ChunkOffset, + }) + } + if len(chunks) > 1 { + for i := range chunks { + entries[i].ChunkSize = chunks[i].ChunkSize + entries[i].Offset = chunks[i].Offset + entries[i].ChunkDigest = chunks[i].Checksum + entries[i].ChunkType = chunks[i].ChunkType + } + } + metadata = append(metadata, entries...) + } + + rawBytes := tr.RawBytes() + if _, err := zstdWriter.Write(rawBytes); err != nil { + return err + } + if err := zstdWriter.Flush(); err != nil { + return err + } + if err := zstdWriter.Close(); err != nil { + return err + } + zstdWriter = nil + + return internal.WriteZstdChunkedManifest(dest, outMetadata, uint64(dest.Count), metadata, level) +} + +type zstdChunkedWriter struct { + tarSplitOut *io.PipeWriter + tarSplitErr chan error +} + +func (w zstdChunkedWriter) Close() error { + err := <-w.tarSplitErr + if err != nil { + w.tarSplitOut.Close() + return err + } + return w.tarSplitOut.Close() +} + +func (w zstdChunkedWriter) Write(p []byte) (int, error) { + select { + case err := <-w.tarSplitErr: + w.tarSplitOut.Close() + return 0, err + default: + return w.tarSplitOut.Write(p) + } +} + +// zstdChunkedWriterWithLevel writes a zstd compressed tarball where each file is +// compressed separately so it can be addressed separately. Idea based on CRFS: +// https://github.com/google/crfs +// The difference with CRFS is that the zstd compression is used instead of gzip. +// The reason for it is that zstd supports embedding metadata ignored by the decoder +// as part of the compressed stream. +// A manifest json file with all the metadata is appended at the end of the tarball +// stream, using zstd skippable frames. +// The final file will look like: +// [FILE_1][FILE_2]..[FILE_N][SKIPPABLE FRAME 1][SKIPPABLE FRAME 2] +// Where: +// [FILE_N]: [ZSTD HEADER][TAR HEADER][PAYLOAD FILE_N][ZSTD FOOTER] +// [SKIPPABLE FRAME 1]: [ZSTD SKIPPABLE FRAME, SIZE=MANIFEST LENGTH][MANIFEST] +// [SKIPPABLE FRAME 2]: [ZSTD SKIPPABLE FRAME, SIZE=16][MANIFEST_OFFSET][MANIFEST_LENGTH][MANIFEST_LENGTH_UNCOMPRESSED][MANIFEST_TYPE][CHUNKED_ZSTD_MAGIC_NUMBER] +// MANIFEST_OFFSET, MANIFEST_LENGTH, MANIFEST_LENGTH_UNCOMPRESSED and CHUNKED_ZSTD_MAGIC_NUMBER are 64 bits unsigned in little endian format. +func zstdChunkedWriterWithLevel(out io.Writer, metadata map[string]string, level int) (io.WriteCloser, error) { + ch := make(chan error, 1) + r, w := io.Pipe() + + go func() { + ch <- writeZstdChunkedStream(out, metadata, r, level) + io.Copy(ioutil.Discard, r) + r.Close() + close(ch) + }() + + return zstdChunkedWriter{ + tarSplitOut: w, + tarSplitErr: ch, + }, nil +} + +// ZstdCompressor is a CompressorFunc for the zstd compression algorithm. +func ZstdCompressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) { + if level == nil { + l := 10 + level = &l + } + + return zstdChunkedWriterWithLevel(r, metadata, *level) +} diff --git a/vendor/github.com/containers/storage/pkg/chunked/compressor/rollsum.go b/vendor/github.com/containers/storage/pkg/chunked/compressor/rollsum.go new file mode 100644 index 00000000000..f4dfad822e9 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chunked/compressor/rollsum.go @@ -0,0 +1,81 @@ +/* +Copyright 2011 The Perkeep Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package rollsum implements rolling checksums similar to apenwarr's bup, which +// is similar to librsync. +// +// The bup project is at https://github.com/apenwarr/bup and its splitting in +// particular is at https://github.com/apenwarr/bup/blob/master/lib/bup/bupsplit.c +package compressor + +import ( + "math/bits" +) + +const windowSize = 64 // Roll assumes windowSize is a power of 2 +const charOffset = 31 + +const blobBits = 13 +const blobSize = 1 << blobBits // 8k + +type RollSum struct { + s1, s2 uint32 + window [windowSize]uint8 + wofs int +} + +func NewRollSum() *RollSum { + return &RollSum{ + s1: windowSize * charOffset, + s2: windowSize * (windowSize - 1) * charOffset, + } +} + +func (rs *RollSum) add(drop, add uint32) { + s1 := rs.s1 + add - drop + rs.s1 = s1 + rs.s2 += s1 - uint32(windowSize)*(drop+charOffset) +} + +// Roll adds ch to the rolling sum. +func (rs *RollSum) Roll(ch byte) { + wp := &rs.window[rs.wofs] + rs.add(uint32(*wp), uint32(ch)) + *wp = ch + rs.wofs = (rs.wofs + 1) & (windowSize - 1) +} + +// OnSplit reports whether at least 13 consecutive trailing bits of +// the current checksum are set the same way. +func (rs *RollSum) OnSplit() bool { + return (rs.s2 & (blobSize - 1)) == ((^0) & (blobSize - 1)) +} + +// OnSplitWithBits reports whether at least n consecutive trailing bits +// of the current checksum are set the same way. +func (rs *RollSum) OnSplitWithBits(n uint32) bool { + mask := (uint32(1) << n) - 1 + return rs.s2&mask == (^uint32(0))&mask +} + +func (rs *RollSum) Bits() int { + rsum := rs.Digest() >> (blobBits + 1) + return blobBits + bits.TrailingZeros32(^rsum) +} + +func (rs *RollSum) Digest() uint32 { + return (rs.s1 << 16) | (rs.s2 & 0xffff) +} diff --git a/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go b/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go new file mode 100644 index 00000000000..3bb5286d92d --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go @@ -0,0 +1,184 @@ +package internal + +// NOTE: This is used from github.com/containers/image by callers that +// don't otherwise use containers/storage, so don't make this depend on any +// larger software like the graph drivers. + +import ( + "archive/tar" + "bytes" + "encoding/binary" + "fmt" + "io" + "time" + + jsoniter "github.com/json-iterator/go" + "github.com/klauspost/compress/zstd" + "github.com/opencontainers/go-digest" +) + +type TOC struct { + Version int `json:"version"` + Entries []FileMetadata `json:"entries"` + + // internal: used by unmarshalToc + StringsBuf bytes.Buffer `json:"-"` +} + +type FileMetadata struct { + Type string `json:"type"` + Name string `json:"name"` + Linkname string `json:"linkName,omitempty"` + Mode int64 `json:"mode,omitempty"` + Size int64 `json:"size,omitempty"` + UID int `json:"uid,omitempty"` + GID int `json:"gid,omitempty"` + ModTime *time.Time `json:"modtime,omitempty"` + AccessTime *time.Time `json:"accesstime,omitempty"` + ChangeTime *time.Time `json:"changetime,omitempty"` + Devmajor int64 `json:"devMajor,omitempty"` + Devminor int64 `json:"devMinor,omitempty"` + Xattrs map[string]string `json:"xattrs,omitempty"` + Digest string `json:"digest,omitempty"` + Offset int64 `json:"offset,omitempty"` + EndOffset int64 `json:"endOffset,omitempty"` + + ChunkSize int64 `json:"chunkSize,omitempty"` + ChunkOffset int64 `json:"chunkOffset,omitempty"` + ChunkDigest string `json:"chunkDigest,omitempty"` + ChunkType string `json:"chunkType,omitempty"` + + // internal: computed by mergeTOCEntries. + Chunks []*FileMetadata `json:"-"` +} + +const ( + ChunkTypeData = "" + ChunkTypeZeros = "zeros" +) + +const ( + TypeReg = "reg" + TypeChunk = "chunk" + TypeLink = "hardlink" + TypeChar = "char" + TypeBlock = "block" + TypeDir = "dir" + TypeFifo = "fifo" + TypeSymlink = "symlink" +) + +var TarTypes = map[byte]string{ + tar.TypeReg: TypeReg, + tar.TypeRegA: TypeReg, + tar.TypeLink: TypeLink, + tar.TypeChar: TypeChar, + tar.TypeBlock: TypeBlock, + tar.TypeDir: TypeDir, + tar.TypeFifo: TypeFifo, + tar.TypeSymlink: TypeSymlink, +} + +func GetType(t byte) (string, error) { + r, found := TarTypes[t] + if !found { + return "", fmt.Errorf("unknown tarball type: %v", t) + } + return r, nil +} + +const ( + ManifestChecksumKey = "io.containers.zstd-chunked.manifest-checksum" + ManifestInfoKey = "io.containers.zstd-chunked.manifest-position" + + // ManifestTypeCRFS is a manifest file compatible with the CRFS TOC file. + ManifestTypeCRFS = 1 + + // FooterSizeSupported is the footer size supported by this implementation. + // Newer versions of the image format might increase this value, so reject + // any version that is not supported. + FooterSizeSupported = 40 +) + +var ( + // when the zstd decoder encounters a skippable frame + 1 byte for the size, it + // will ignore it. + // https://tools.ietf.org/html/rfc8478#section-3.1.2 + skippableFrameMagic = []byte{0x50, 0x2a, 0x4d, 0x18} + + ZstdChunkedFrameMagic = []byte{0x47, 0x6e, 0x55, 0x6c, 0x49, 0x6e, 0x55, 0x78} +) + +func appendZstdSkippableFrame(dest io.Writer, data []byte) error { + if _, err := dest.Write(skippableFrameMagic); err != nil { + return err + } + + var size []byte = make([]byte, 4) + binary.LittleEndian.PutUint32(size, uint32(len(data))) + if _, err := dest.Write(size); err != nil { + return err + } + if _, err := dest.Write(data); err != nil { + return err + } + return nil +} + +func WriteZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, offset uint64, metadata []FileMetadata, level int) error { + // 8 is the size of the zstd skippable frame header + the frame size + manifestOffset := offset + 8 + + toc := TOC{ + Version: 1, + Entries: metadata, + } + + var json = jsoniter.ConfigCompatibleWithStandardLibrary + // Generate the manifest + manifest, err := json.Marshal(toc) + if err != nil { + return err + } + + var compressedBuffer bytes.Buffer + zstdWriter, err := ZstdWriterWithLevel(&compressedBuffer, level) + if err != nil { + return err + } + if _, err := zstdWriter.Write(manifest); err != nil { + zstdWriter.Close() + return err + } + if err := zstdWriter.Close(); err != nil { + return err + } + compressedManifest := compressedBuffer.Bytes() + + manifestDigester := digest.Canonical.Digester() + manifestChecksum := manifestDigester.Hash() + if _, err := manifestChecksum.Write(compressedManifest); err != nil { + return err + } + + outMetadata[ManifestChecksumKey] = manifestDigester.Digest().String() + outMetadata[ManifestInfoKey] = fmt.Sprintf("%d:%d:%d:%d", manifestOffset, len(compressedManifest), len(manifest), ManifestTypeCRFS) + if err := appendZstdSkippableFrame(dest, compressedManifest); err != nil { + return err + } + + // Store the offset to the manifest and its size in LE order + var manifestDataLE []byte = make([]byte, FooterSizeSupported) + binary.LittleEndian.PutUint64(manifestDataLE, manifestOffset) + binary.LittleEndian.PutUint64(manifestDataLE[8:], uint64(len(compressedManifest))) + binary.LittleEndian.PutUint64(manifestDataLE[16:], uint64(len(manifest))) + binary.LittleEndian.PutUint64(manifestDataLE[24:], uint64(ManifestTypeCRFS)) + copy(manifestDataLE[32:], ZstdChunkedFrameMagic) + + return appendZstdSkippableFrame(dest, manifestDataLE) +} + +func ZstdWriterWithLevel(dest io.Writer, level int) (*zstd.Encoder, error) { + el := zstd.EncoderLevelFromZstd(level) + return zstd.NewWriter(dest, zstd.WithEncoderLevel(el)) +} diff --git a/vendor/github.com/containers/storage/pkg/chunked/storage.go b/vendor/github.com/containers/storage/pkg/chunked/storage.go new file mode 100644 index 00000000000..9212cbbcff8 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chunked/storage.go @@ -0,0 +1,26 @@ +package chunked + +import ( + "fmt" + "io" +) + +// ImageSourceChunk is a portion of a blob. +type ImageSourceChunk struct { + Offset uint64 + Length uint64 +} + +// ImageSourceSeekable is an image source that permits to fetch chunks of the entire blob. +type ImageSourceSeekable interface { + // GetBlobAt returns a stream for the specified blob. + GetBlobAt([]ImageSourceChunk) (chan io.ReadCloser, chan error, error) +} + +// ErrBadRequest is returned when the request is not valid +type ErrBadRequest struct { +} + +func (e ErrBadRequest) Error() string { + return fmt.Sprintf("bad request") +} diff --git a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go new file mode 100644 index 00000000000..92b15c2bfdd --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go @@ -0,0 +1,1774 @@ +package chunked + +import ( + archivetar "archive/tar" + "context" + "encoding/base64" + "fmt" + "hash" + "io" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "sort" + "strings" + "sync" + "sync/atomic" + "syscall" + "time" + + "github.com/containerd/stargz-snapshotter/estargz" + storage "github.com/containers/storage" + graphdriver "github.com/containers/storage/drivers" + driversCopy "github.com/containers/storage/drivers/copy" + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/chunked/internal" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/system" + "github.com/containers/storage/types" + securejoin "github.com/cyphar/filepath-securejoin" + "github.com/klauspost/compress/zstd" + "github.com/klauspost/pgzip" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/vbatts/tar-split/archive/tar" + "golang.org/x/sys/unix" +) + +const ( + maxNumberMissingChunks = 1024 + newFileFlags = (unix.O_CREAT | unix.O_TRUNC | unix.O_EXCL | unix.O_WRONLY) + containersOverrideXattr = "user.containers.override_stat" + bigDataKey = "zstd-chunked-manifest" + + fileTypeZstdChunked = iota + fileTypeEstargz + fileTypeNoCompression + fileTypeHole + + copyGoRoutines = 32 +) + +type compressedFileType int + +type chunkedDiffer struct { + stream ImageSourceSeekable + manifest []byte + layersCache *layersCache + tocOffset int64 + fileType compressedFileType + + copyBuffer []byte + + gzipReader *pgzip.Reader + zstdReader *zstd.Decoder + rawReader io.Reader +} + +var xattrsToIgnore = map[string]interface{}{ + "security.selinux": true, +} + +func timeToTimespec(time *time.Time) (ts unix.Timespec) { + if time == nil || time.IsZero() { + // Return UTIME_OMIT special value + ts.Sec = 0 + ts.Nsec = ((1 << 30) - 2) + return + } + return unix.NsecToTimespec(time.UnixNano()) +} + +func doHardLink(srcFd int, destDirFd int, destBase string) error { + doLink := func() error { + // Using unix.AT_EMPTY_PATH requires CAP_DAC_READ_SEARCH while this variant that uses + // /proc/self/fd doesn't and can be used with rootless. + srcPath := fmt.Sprintf("/proc/self/fd/%d", srcFd) + return unix.Linkat(unix.AT_FDCWD, srcPath, destDirFd, destBase, unix.AT_SYMLINK_FOLLOW) + } + + err := doLink() + + // if the destination exists, unlink it first and try again + if err != nil && os.IsExist(err) { + unix.Unlinkat(destDirFd, destBase, 0) + return doLink() + } + return err +} + +func copyFileContent(srcFd int, destFile string, dirfd int, mode os.FileMode, useHardLinks bool) (*os.File, int64, error) { + src := fmt.Sprintf("/proc/self/fd/%d", srcFd) + st, err := os.Stat(src) + if err != nil { + return nil, -1, fmt.Errorf("copy file content for %q: %w", destFile, err) + } + + copyWithFileRange, copyWithFileClone := true, true + + if useHardLinks { + destDirPath := filepath.Dir(destFile) + destBase := filepath.Base(destFile) + destDir, err := openFileUnderRoot(destDirPath, dirfd, 0, mode) + if err == nil { + defer destDir.Close() + + err := doHardLink(srcFd, int(destDir.Fd()), destBase) + if err == nil { + return nil, st.Size(), nil + } + } + } + + // If the destination file already exists, we shouldn't blow it away + dstFile, err := openFileUnderRoot(destFile, dirfd, newFileFlags, mode) + if err != nil { + return nil, -1, fmt.Errorf("open file %q under rootfs for copy: %w", destFile, err) + } + + err = driversCopy.CopyRegularToFile(src, dstFile, st, ©WithFileRange, ©WithFileClone) + if err != nil { + dstFile.Close() + return nil, -1, fmt.Errorf("copy to file %q under rootfs: %w", destFile, err) + } + return dstFile, st.Size(), nil +} + +// GetDiffer returns a differ than can be used with ApplyDiffWithDiffer. +func GetDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (graphdriver.Differ, error) { + if _, ok := annotations[internal.ManifestChecksumKey]; ok { + return makeZstdChunkedDiffer(ctx, store, blobSize, annotations, iss) + } + if _, ok := annotations[estargz.TOCJSONDigestAnnotation]; ok { + return makeEstargzChunkedDiffer(ctx, store, blobSize, annotations, iss) + } + return nil, errors.New("blob type not supported for partial retrieval") +} + +func makeZstdChunkedDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (*chunkedDiffer, error) { + manifest, tocOffset, err := readZstdChunkedManifest(iss, blobSize, annotations) + if err != nil { + return nil, fmt.Errorf("read zstd:chunked manifest: %w", err) + } + layersCache, err := getLayersCache(store) + if err != nil { + return nil, err + } + + return &chunkedDiffer{ + copyBuffer: makeCopyBuffer(), + stream: iss, + manifest: manifest, + layersCache: layersCache, + tocOffset: tocOffset, + fileType: fileTypeZstdChunked, + }, nil +} + +func makeEstargzChunkedDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (*chunkedDiffer, error) { + manifest, tocOffset, err := readEstargzChunkedManifest(iss, blobSize, annotations) + if err != nil { + return nil, fmt.Errorf("read zstd:chunked manifest: %w", err) + } + layersCache, err := getLayersCache(store) + if err != nil { + return nil, err + } + + return &chunkedDiffer{ + copyBuffer: makeCopyBuffer(), + stream: iss, + manifest: manifest, + layersCache: layersCache, + tocOffset: tocOffset, + fileType: fileTypeEstargz, + }, nil +} + +func makeCopyBuffer() []byte { + return make([]byte, 2<<20) +} + +// copyFileFromOtherLayer copies a file from another layer +// file is the file to look for. +// source is the path to the source layer checkout. +// name is the path to the file to copy in source. +// dirfd is an open file descriptor to the destination root directory. +// useHardLinks defines whether the deduplication can be performed using hard links. +func copyFileFromOtherLayer(file *internal.FileMetadata, source string, name string, dirfd int, useHardLinks bool) (bool, *os.File, int64, error) { + srcDirfd, err := unix.Open(source, unix.O_RDONLY, 0) + if err != nil { + return false, nil, 0, fmt.Errorf("open source file: %w", err) + } + defer unix.Close(srcDirfd) + + srcFile, err := openFileUnderRoot(name, srcDirfd, unix.O_RDONLY, 0) + if err != nil { + return false, nil, 0, fmt.Errorf("open source file under target rootfs: %w", err) + } + defer srcFile.Close() + + dstFile, written, err := copyFileContent(int(srcFile.Fd()), file.Name, dirfd, 0, useHardLinks) + if err != nil { + return false, nil, 0, fmt.Errorf("copy content to %q: %w", file.Name, err) + } + return true, dstFile, written, nil +} + +// canDedupMetadataWithHardLink says whether it is possible to deduplicate file with otherFile. +// It checks that the two files have the same UID, GID, file mode and xattrs. +func canDedupMetadataWithHardLink(file *internal.FileMetadata, otherFile *internal.FileMetadata) bool { + if file.UID != otherFile.UID { + return false + } + if file.GID != otherFile.GID { + return false + } + if file.Mode != otherFile.Mode { + return false + } + if !reflect.DeepEqual(file.Xattrs, otherFile.Xattrs) { + return false + } + return true +} + +// canDedupFileWithHardLink checks if the specified file can be deduplicated by an +// open file, given its descriptor and stat data. +func canDedupFileWithHardLink(file *internal.FileMetadata, fd int, s os.FileInfo) bool { + st, ok := s.Sys().(*syscall.Stat_t) + if !ok { + return false + } + + path := fmt.Sprintf("/proc/self/fd/%d", fd) + + listXattrs, err := system.Llistxattr(path) + if err != nil { + return false + } + + xattrs := make(map[string]string) + for _, x := range listXattrs { + v, err := system.Lgetxattr(path, x) + if err != nil { + return false + } + + if _, found := xattrsToIgnore[x]; found { + continue + } + xattrs[x] = string(v) + } + // fill only the attributes used by canDedupMetadataWithHardLink. + otherFile := internal.FileMetadata{ + UID: int(st.Uid), + GID: int(st.Gid), + Mode: int64(st.Mode), + Xattrs: xattrs, + } + return canDedupMetadataWithHardLink(file, &otherFile) +} + +func getFileDigest(f *os.File, buf []byte) (digest.Digest, error) { + digester := digest.Canonical.Digester() + if _, err := io.CopyBuffer(digester.Hash(), f, buf); err != nil { + return "", err + } + return digester.Digest(), nil +} + +// findFileInOSTreeRepos checks whether the requested file already exist in one of the OSTree repo and copies the file content from there if possible. +// file is the file to look for. +// ostreeRepos is a list of OSTree repos. +// dirfd is an open fd to the destination checkout. +// useHardLinks defines whether the deduplication can be performed using hard links. +func findFileInOSTreeRepos(file *internal.FileMetadata, ostreeRepos []string, dirfd int, useHardLinks bool) (bool, *os.File, int64, error) { + digest, err := digest.Parse(file.Digest) + if err != nil { + return false, nil, 0, nil + } + payloadLink := digest.Encoded() + ".payload-link" + if len(payloadLink) < 2 { + return false, nil, 0, nil + } + + for _, repo := range ostreeRepos { + sourceFile := filepath.Join(repo, "objects", payloadLink[:2], payloadLink[2:]) + st, err := os.Stat(sourceFile) + if err != nil || !st.Mode().IsRegular() { + continue + } + if st.Size() != file.Size { + continue + } + fd, err := unix.Open(sourceFile, unix.O_RDONLY|unix.O_NONBLOCK, 0) + if err != nil { + return false, nil, 0, nil + } + f := os.NewFile(uintptr(fd), "fd") + defer f.Close() + + // check if the open file can be deduplicated with hard links + if useHardLinks && !canDedupFileWithHardLink(file, fd, st) { + continue + } + + dstFile, written, err := copyFileContent(fd, file.Name, dirfd, 0, useHardLinks) + if err != nil { + return false, nil, 0, nil + } + return true, dstFile, written, nil + } + // If hard links deduplication was used and it has failed, try again without hard links. + if useHardLinks { + return findFileInOSTreeRepos(file, ostreeRepos, dirfd, false) + } + + return false, nil, 0, nil +} + +// findFileOnTheHost checks whether the requested file already exist on the host and copies the file content from there if possible. +// It is currently implemented to look only at the file with the same path. Ideally it can detect the same content also at different +// paths. +// file is the file to look for. +// dirfd is an open fd to the destination checkout. +// useHardLinks defines whether the deduplication can be performed using hard links. +func findFileOnTheHost(file *internal.FileMetadata, dirfd int, useHardLinks bool, buf []byte) (bool, *os.File, int64, error) { + sourceFile := filepath.Clean(filepath.Join("/", file.Name)) + if !strings.HasPrefix(sourceFile, "/usr/") { + // limit host deduplication to files under /usr. + return false, nil, 0, nil + } + + st, err := os.Stat(sourceFile) + if err != nil || !st.Mode().IsRegular() { + return false, nil, 0, nil + } + + if st.Size() != file.Size { + return false, nil, 0, nil + } + + fd, err := unix.Open(sourceFile, unix.O_RDONLY|unix.O_NONBLOCK, 0) + if err != nil { + return false, nil, 0, nil + } + + f := os.NewFile(uintptr(fd), "fd") + defer f.Close() + + manifestChecksum, err := digest.Parse(file.Digest) + if err != nil { + return false, nil, 0, err + } + + checksum, err := getFileDigest(f, buf) + if err != nil { + return false, nil, 0, err + } + + if checksum != manifestChecksum { + return false, nil, 0, nil + } + + // check if the open file can be deduplicated with hard links + useHardLinks = useHardLinks && canDedupFileWithHardLink(file, fd, st) + + dstFile, written, err := copyFileContent(fd, file.Name, dirfd, 0, useHardLinks) + if err != nil { + return false, nil, 0, nil + } + + // calculate the checksum again to make sure the file wasn't modified while it was copied + if _, err := f.Seek(0, 0); err != nil { + dstFile.Close() + return false, nil, 0, err + } + checksum, err = getFileDigest(f, buf) + if err != nil { + dstFile.Close() + return false, nil, 0, err + } + if checksum != manifestChecksum { + dstFile.Close() + return false, nil, 0, nil + } + return true, dstFile, written, nil +} + +// findFileInOtherLayers finds the specified file in other layers. +// cache is the layers cache to use. +// file is the file to look for. +// dirfd is an open file descriptor to the checkout root directory. +// useHardLinks defines whether the deduplication can be performed using hard links. +func findFileInOtherLayers(cache *layersCache, file *internal.FileMetadata, dirfd int, useHardLinks bool) (bool, *os.File, int64, error) { + target, name, err := cache.findFileInOtherLayers(file, useHardLinks) + if err != nil || name == "" { + return false, nil, 0, err + } + return copyFileFromOtherLayer(file, target, name, dirfd, useHardLinks) +} + +func maybeDoIDRemap(manifest []internal.FileMetadata, options *archive.TarOptions) error { + if options.ChownOpts == nil && len(options.UIDMaps) == 0 || len(options.GIDMaps) == 0 { + return nil + } + + idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) + + for i := range manifest { + if options.ChownOpts != nil { + manifest[i].UID = options.ChownOpts.UID + manifest[i].GID = options.ChownOpts.GID + } else { + pair := idtools.IDPair{ + UID: manifest[i].UID, + GID: manifest[i].GID, + } + var err error + manifest[i].UID, manifest[i].GID, err = idMappings.ToContainer(pair) + if err != nil { + return err + } + } + } + return nil +} + +type originFile struct { + Root string + Path string + Offset int64 +} + +type missingFileChunk struct { + Gap int64 + Hole bool + + File *internal.FileMetadata + + CompressedSize int64 + UncompressedSize int64 +} + +type missingPart struct { + Hole bool + SourceChunk *ImageSourceChunk + OriginFile *originFile + Chunks []missingFileChunk +} + +func (o *originFile) OpenFile() (io.ReadCloser, error) { + srcDirfd, err := unix.Open(o.Root, unix.O_RDONLY, 0) + if err != nil { + return nil, fmt.Errorf("open source file: %w", err) + } + defer unix.Close(srcDirfd) + + srcFile, err := openFileUnderRoot(o.Path, srcDirfd, unix.O_RDONLY, 0) + if err != nil { + return nil, fmt.Errorf("open source file under target rootfs: %w", err) + } + + if _, err := srcFile.Seek(o.Offset, 0); err != nil { + srcFile.Close() + return nil, err + } + return srcFile, nil +} + +// setFileAttrs sets the file attributes for file given metadata +func setFileAttrs(dirfd int, file *os.File, mode os.FileMode, metadata *internal.FileMetadata, options *archive.TarOptions, usePath bool) error { + if file == nil || file.Fd() < 0 { + return errors.Errorf("invalid file") + } + fd := int(file.Fd()) + + t, err := typeToTarType(metadata.Type) + if err != nil { + return err + } + + // If it is a symlink, force to use the path + if t == tar.TypeSymlink { + usePath = true + } + + baseName := "" + if usePath { + dirName := filepath.Dir(metadata.Name) + if dirName != "" { + parentFd, err := openFileUnderRoot(dirName, dirfd, unix.O_PATH|unix.O_DIRECTORY, 0) + if err != nil { + return err + } + defer parentFd.Close() + + dirfd = int(parentFd.Fd()) + } + baseName = filepath.Base(metadata.Name) + } + + doChown := func() error { + if usePath { + return unix.Fchownat(dirfd, baseName, metadata.UID, metadata.GID, unix.AT_SYMLINK_NOFOLLOW) + } + return unix.Fchown(fd, metadata.UID, metadata.GID) + } + + doSetXattr := func(k string, v []byte) error { + return unix.Fsetxattr(fd, k, v, 0) + } + + doUtimes := func() error { + ts := []unix.Timespec{timeToTimespec(metadata.AccessTime), timeToTimespec(metadata.ModTime)} + if usePath { + return unix.UtimesNanoAt(dirfd, baseName, ts, unix.AT_SYMLINK_NOFOLLOW) + } + return unix.UtimesNanoAt(unix.AT_FDCWD, fmt.Sprintf("/proc/self/fd/%d", fd), ts, 0) + } + + doChmod := func() error { + if usePath { + return unix.Fchmodat(dirfd, baseName, uint32(mode), unix.AT_SYMLINK_NOFOLLOW) + } + return unix.Fchmod(fd, uint32(mode)) + } + + if err := doChown(); err != nil { + if !options.IgnoreChownErrors { + return fmt.Errorf("chown %q to %d:%d: %w", metadata.Name, metadata.UID, metadata.GID, err) + } + } + + canIgnore := func(err error) bool { + return err == nil || errors.Is(err, unix.ENOSYS) || errors.Is(err, unix.ENOTSUP) + } + + for k, v := range metadata.Xattrs { + if _, found := xattrsToIgnore[k]; found { + continue + } + data, err := base64.StdEncoding.DecodeString(v) + if err != nil { + return fmt.Errorf("decode xattr %q: %w", v, err) + } + if err := doSetXattr(k, data); !canIgnore(err) { + return fmt.Errorf("set xattr %s=%q for %q: %w", k, data, metadata.Name, err) + } + } + + if err := doUtimes(); !canIgnore(err) { + return fmt.Errorf("set utimes for %q: %w", metadata.Name, err) + } + + if err := doChmod(); !canIgnore(err) { + return fmt.Errorf("chmod %q: %w", metadata.Name, err) + } + return nil +} + +func openFileUnderRootFallback(dirfd int, name string, flags uint64, mode os.FileMode) (int, error) { + root := fmt.Sprintf("/proc/self/fd/%d", dirfd) + + targetRoot, err := os.Readlink(root) + if err != nil { + return -1, err + } + + hasNoFollow := (flags & unix.O_NOFOLLOW) != 0 + + fd := -1 + // If O_NOFOLLOW is specified in the flags, then resolve only the parent directory and use the + // last component as the path to openat(). + if hasNoFollow { + dirName := filepath.Dir(name) + if dirName != "" { + newRoot, err := securejoin.SecureJoin(root, filepath.Dir(name)) + if err != nil { + return -1, err + } + root = newRoot + } + + parentDirfd, err := unix.Open(root, unix.O_PATH, 0) + if err != nil { + return -1, err + } + defer unix.Close(parentDirfd) + + fd, err = unix.Openat(parentDirfd, filepath.Base(name), int(flags), uint32(mode)) + if err != nil { + return -1, err + } + } else { + newPath, err := securejoin.SecureJoin(root, name) + if err != nil { + return -1, err + } + fd, err = unix.Openat(dirfd, newPath, int(flags), uint32(mode)) + if err != nil { + return -1, err + } + } + + target, err := os.Readlink(fmt.Sprintf("/proc/self/fd/%d", fd)) + if err != nil { + unix.Close(fd) + return -1, err + } + + // Add an additional check to make sure the opened fd is inside the rootfs + if !strings.HasPrefix(target, targetRoot) { + unix.Close(fd) + return -1, fmt.Errorf("error while resolving %q. It resolves outside the root directory", name) + } + + return fd, err +} + +func openFileUnderRootOpenat2(dirfd int, name string, flags uint64, mode os.FileMode) (int, error) { + how := unix.OpenHow{ + Flags: flags, + Mode: uint64(mode & 07777), + Resolve: unix.RESOLVE_IN_ROOT, + } + return unix.Openat2(dirfd, name, &how) +} + +// skipOpenat2 is set when openat2 is not supported by the underlying kernel and avoid +// using it again. +var skipOpenat2 int32 + +// openFileUnderRootRaw tries to open a file using openat2 and if it is not supported fallbacks to a +// userspace lookup. +func openFileUnderRootRaw(dirfd int, name string, flags uint64, mode os.FileMode) (int, error) { + var fd int + var err error + if atomic.LoadInt32(&skipOpenat2) > 0 { + fd, err = openFileUnderRootFallback(dirfd, name, flags, mode) + } else { + fd, err = openFileUnderRootOpenat2(dirfd, name, flags, mode) + // If the function failed with ENOSYS, switch off the support for openat2 + // and fallback to using safejoin. + if err != nil && errors.Is(err, unix.ENOSYS) { + atomic.StoreInt32(&skipOpenat2, 1) + fd, err = openFileUnderRootFallback(dirfd, name, flags, mode) + } + } + return fd, err +} + +// openFileUnderRoot safely opens a file under the specified root directory using openat2 +// name is the path to open relative to dirfd. +// dirfd is an open file descriptor to the target checkout directory. +// flags are the flags to pass to the open syscall. +// mode specifies the mode to use for newly created files. +func openFileUnderRoot(name string, dirfd int, flags uint64, mode os.FileMode) (*os.File, error) { + fd, err := openFileUnderRootRaw(dirfd, name, flags, mode) + if err == nil { + return os.NewFile(uintptr(fd), name), nil + } + + hasCreate := (flags & unix.O_CREAT) != 0 + if errors.Is(err, unix.ENOENT) && hasCreate { + parent := filepath.Dir(name) + if parent != "" { + newDirfd, err2 := openOrCreateDirUnderRoot(parent, dirfd, 0) + if err2 == nil { + defer newDirfd.Close() + fd, err := openFileUnderRootRaw(int(newDirfd.Fd()), filepath.Base(name), flags, mode) + if err == nil { + return os.NewFile(uintptr(fd), name), nil + } + } + } + } + return nil, fmt.Errorf("open %q under the rootfs: %w", name, err) +} + +// openOrCreateDirUnderRoot safely opens a directory or create it if it is missing. +// name is the path to open relative to dirfd. +// dirfd is an open file descriptor to the target checkout directory. +// mode specifies the mode to use for newly created files. +func openOrCreateDirUnderRoot(name string, dirfd int, mode os.FileMode) (*os.File, error) { + fd, err := openFileUnderRootRaw(dirfd, name, unix.O_DIRECTORY|unix.O_RDONLY, mode) + if err == nil { + return os.NewFile(uintptr(fd), name), nil + } + + if errors.Is(err, unix.ENOENT) { + parent := filepath.Dir(name) + if parent != "" { + pDir, err2 := openOrCreateDirUnderRoot(parent, dirfd, mode) + if err2 != nil { + return nil, err + } + defer pDir.Close() + + baseName := filepath.Base(name) + + if err2 := unix.Mkdirat(int(pDir.Fd()), baseName, 0755); err2 != nil { + return nil, err + } + + fd, err = openFileUnderRootRaw(int(pDir.Fd()), baseName, unix.O_DIRECTORY|unix.O_RDONLY, mode) + if err == nil { + return os.NewFile(uintptr(fd), name), nil + } + } + } + return nil, err +} + +func (c *chunkedDiffer) prepareCompressedStreamToFile(partCompression compressedFileType, from io.Reader, mf *missingFileChunk) (compressedFileType, error) { + switch { + case partCompression == fileTypeHole: + // The entire part is a hole. Do not need to read from a file. + c.rawReader = nil + return fileTypeHole, nil + case mf.Hole: + // Only the missing chunk in the requested part refers to a hole. + // The received data must be discarded. + limitReader := io.LimitReader(from, mf.CompressedSize) + _, err := io.CopyBuffer(ioutil.Discard, limitReader, c.copyBuffer) + return fileTypeHole, err + case partCompression == fileTypeZstdChunked: + c.rawReader = io.LimitReader(from, mf.CompressedSize) + if c.zstdReader == nil { + r, err := zstd.NewReader(c.rawReader) + if err != nil { + return partCompression, err + } + c.zstdReader = r + } else { + if err := c.zstdReader.Reset(c.rawReader); err != nil { + return partCompression, err + } + } + case partCompression == fileTypeEstargz: + c.rawReader = io.LimitReader(from, mf.CompressedSize) + if c.gzipReader == nil { + r, err := pgzip.NewReader(c.rawReader) + if err != nil { + return partCompression, err + } + c.gzipReader = r + } else { + if err := c.gzipReader.Reset(c.rawReader); err != nil { + return partCompression, err + } + } + case partCompression == fileTypeNoCompression: + c.rawReader = io.LimitReader(from, mf.UncompressedSize) + default: + return partCompression, fmt.Errorf("unknown file type %q", c.fileType) + } + return partCompression, nil +} + +// hashHole writes SIZE zeros to the specified hasher +func hashHole(h hash.Hash, size int64, copyBuffer []byte) error { + count := int64(len(copyBuffer)) + if size < count { + count = size + } + for i := int64(0); i < count; i++ { + copyBuffer[i] = 0 + } + for size > 0 { + count = int64(len(copyBuffer)) + if size < count { + count = size + } + if _, err := h.Write(copyBuffer[:count]); err != nil { + return err + } + size -= count + } + return nil +} + +// appendHole creates a hole with the specified size at the open fd. +func appendHole(fd int, size int64) error { + off, err := unix.Seek(fd, size, unix.SEEK_CUR) + if err != nil { + return err + } + // Make sure the file size is changed. It might be the last hole and no other data written afterwards. + if err := unix.Ftruncate(fd, off); err != nil { + return err + } + return nil +} + +func (c *chunkedDiffer) appendCompressedStreamToFile(compression compressedFileType, destFile *destinationFile, size int64) error { + switch compression { + case fileTypeZstdChunked: + defer c.zstdReader.Reset(nil) + if _, err := io.CopyBuffer(destFile.to, io.LimitReader(c.zstdReader, size), c.copyBuffer); err != nil { + return err + } + case fileTypeEstargz: + defer c.gzipReader.Close() + if _, err := io.CopyBuffer(destFile.to, io.LimitReader(c.gzipReader, size), c.copyBuffer); err != nil { + return err + } + case fileTypeNoCompression: + if _, err := io.CopyBuffer(destFile.to, io.LimitReader(c.rawReader, size), c.copyBuffer); err != nil { + return err + } + case fileTypeHole: + if err := appendHole(int(destFile.file.Fd()), size); err != nil { + return err + } + if err := hashHole(destFile.hash, size, c.copyBuffer); err != nil { + return err + } + default: + return fmt.Errorf("unknown file type %q", c.fileType) + } + return nil +} + +type destinationFile struct { + dirfd int + file *os.File + digester digest.Digester + hash hash.Hash + to io.Writer + metadata *internal.FileMetadata + options *archive.TarOptions +} + +func openDestinationFile(dirfd int, metadata *internal.FileMetadata, options *archive.TarOptions) (*destinationFile, error) { + file, err := openFileUnderRoot(metadata.Name, dirfd, newFileFlags, 0) + if err != nil { + return nil, err + } + + digester := digest.Canonical.Digester() + hash := digester.Hash() + to := io.MultiWriter(file, hash) + + return &destinationFile{ + file: file, + digester: digester, + hash: hash, + to: to, + metadata: metadata, + options: options, + dirfd: dirfd, + }, nil +} + +func (d *destinationFile) Close() error { + manifestChecksum, err := digest.Parse(d.metadata.Digest) + if err != nil { + return err + } + if d.digester.Digest() != manifestChecksum { + return fmt.Errorf("checksum mismatch for %q (got %q instead of %q)", d.file.Name(), d.digester.Digest(), manifestChecksum) + } + + return setFileAttrs(d.dirfd, d.file, os.FileMode(d.metadata.Mode), d.metadata, d.options, false) +} + +func closeDestinationFiles(files chan *destinationFile, errors chan error) { + for f := range files { + errors <- f.Close() + } + close(errors) +} + +func (c *chunkedDiffer) storeMissingFiles(streams chan io.ReadCloser, errs chan error, dest string, dirfd int, missingParts []missingPart, options *archive.TarOptions) (Err error) { + var destFile *destinationFile + + filesToClose := make(chan *destinationFile, 3) + closeFilesErrors := make(chan error, 2) + + go closeDestinationFiles(filesToClose, closeFilesErrors) + defer func() { + close(filesToClose) + for e := range closeFilesErrors { + if e != nil && Err == nil { + Err = e + } + } + }() + + for _, missingPart := range missingParts { + var part io.ReadCloser + partCompression := c.fileType + switch { + case missingPart.Hole: + partCompression = fileTypeHole + case missingPart.OriginFile != nil: + var err error + part, err = missingPart.OriginFile.OpenFile() + if err != nil { + return err + } + partCompression = fileTypeNoCompression + case missingPart.SourceChunk != nil: + select { + case p := <-streams: + part = p + case err := <-errs: + return err + } + if part == nil { + return errors.Errorf("invalid stream returned") + } + default: + return errors.Errorf("internal error: missing part misses both local and remote data stream") + } + + for _, mf := range missingPart.Chunks { + if mf.Gap > 0 { + limitReader := io.LimitReader(part, mf.Gap) + _, err := io.CopyBuffer(ioutil.Discard, limitReader, c.copyBuffer) + if err != nil { + Err = err + goto exit + } + continue + } + + if mf.File.Name == "" { + Err = errors.Errorf("file name empty") + goto exit + } + + compression, err := c.prepareCompressedStreamToFile(partCompression, part, &mf) + if err != nil { + Err = err + goto exit + } + + // Open the new file if it is different that what is already + // opened + if destFile == nil || destFile.metadata.Name != mf.File.Name { + var err error + if destFile != nil { + cleanup: + for { + select { + case err = <-closeFilesErrors: + if err != nil { + Err = err + goto exit + } + default: + break cleanup + } + } + filesToClose <- destFile + } + destFile, err = openDestinationFile(dirfd, mf.File, options) + if err != nil { + Err = err + goto exit + } + } + + if err := c.appendCompressedStreamToFile(compression, destFile, mf.UncompressedSize); err != nil { + Err = err + goto exit + } + if c.rawReader != nil { + if _, err := io.CopyBuffer(ioutil.Discard, c.rawReader, c.copyBuffer); err != nil { + Err = err + goto exit + } + } + } + exit: + if part != nil { + part.Close() + if Err != nil { + break + } + } + } + + if destFile != nil { + return destFile.Close() + } + + return nil +} + +func mergeMissingChunks(missingParts []missingPart, target int) []missingPart { + getGap := func(missingParts []missingPart, i int) int { + prev := missingParts[i-1].SourceChunk.Offset + missingParts[i-1].SourceChunk.Length + return int(missingParts[i].SourceChunk.Offset - prev) + } + getCost := func(missingParts []missingPart, i int) int { + cost := getGap(missingParts, i) + if missingParts[i-1].OriginFile != nil { + cost += int(missingParts[i-1].SourceChunk.Length) + } + if missingParts[i].OriginFile != nil { + cost += int(missingParts[i].SourceChunk.Length) + } + return cost + } + + // simple case: merge chunks from the same file. + newMissingParts := missingParts[0:1] + prevIndex := 0 + for i := 1; i < len(missingParts); i++ { + gap := getGap(missingParts, i) + if gap == 0 && missingParts[prevIndex].OriginFile == nil && + missingParts[i].OriginFile == nil && + !missingParts[prevIndex].Hole && !missingParts[i].Hole && + len(missingParts[prevIndex].Chunks) == 1 && len(missingParts[i].Chunks) == 1 && + missingParts[prevIndex].Chunks[0].File.Name == missingParts[i].Chunks[0].File.Name { + missingParts[prevIndex].SourceChunk.Length += uint64(gap) + missingParts[i].SourceChunk.Length + missingParts[prevIndex].Chunks[0].CompressedSize += missingParts[i].Chunks[0].CompressedSize + missingParts[prevIndex].Chunks[0].UncompressedSize += missingParts[i].Chunks[0].UncompressedSize + } else { + newMissingParts = append(newMissingParts, missingParts[i]) + prevIndex++ + } + } + missingParts = newMissingParts + + if len(missingParts) <= target { + return missingParts + } + + // this implementation doesn't account for duplicates, so it could merge + // more than necessary to reach the specified target. Since target itself + // is a heuristic value, it doesn't matter. + costs := make([]int, len(missingParts)-1) + for i := 1; i < len(missingParts); i++ { + costs[i-1] = getCost(missingParts, i) + } + sort.Ints(costs) + + toShrink := len(missingParts) - target + if toShrink >= len(costs) { + toShrink = len(costs) - 1 + } + targetValue := costs[toShrink] + + newMissingParts = missingParts[0:1] + for i := 1; i < len(missingParts); i++ { + if getCost(missingParts, i) > targetValue { + newMissingParts = append(newMissingParts, missingParts[i]) + } else { + gap := getGap(missingParts, i) + prev := &newMissingParts[len(newMissingParts)-1] + prev.SourceChunk.Length += uint64(gap) + missingParts[i].SourceChunk.Length + prev.Hole = false + prev.OriginFile = nil + if gap > 0 { + gapFile := missingFileChunk{ + Gap: int64(gap), + } + prev.Chunks = append(prev.Chunks, gapFile) + } + prev.Chunks = append(prev.Chunks, missingParts[i].Chunks...) + } + } + return newMissingParts +} + +func (c *chunkedDiffer) retrieveMissingFiles(dest string, dirfd int, missingParts []missingPart, options *archive.TarOptions) error { + var chunksToRequest []ImageSourceChunk + for _, c := range missingParts { + if c.OriginFile == nil && !c.Hole { + chunksToRequest = append(chunksToRequest, *c.SourceChunk) + } + } + + // There are some missing files. Prepare a multirange request for the missing chunks. + var streams chan io.ReadCloser + var err error + var errs chan error + for { + streams, errs, err = c.stream.GetBlobAt(chunksToRequest) + if err == nil { + break + } + + if _, ok := err.(ErrBadRequest); ok { + requested := len(missingParts) + // If the server cannot handle at least 64 chunks in a single request, just give up. + if requested < 64 { + return err + } + + // Merge more chunks to request + missingParts = mergeMissingChunks(missingParts, requested/2) + continue + } + return err + } + + if err := c.storeMissingFiles(streams, errs, dest, dirfd, missingParts, options); err != nil { + return err + } + return nil +} + +func safeMkdir(dirfd int, mode os.FileMode, name string, metadata *internal.FileMetadata, options *archive.TarOptions) error { + parent := filepath.Dir(name) + base := filepath.Base(name) + + parentFd := dirfd + if parent != "." { + parentFile, err := openOrCreateDirUnderRoot(parent, dirfd, 0) + if err != nil { + return err + } + defer parentFile.Close() + parentFd = int(parentFile.Fd()) + } + + if err := unix.Mkdirat(parentFd, base, uint32(mode)); err != nil { + if !os.IsExist(err) { + return fmt.Errorf("mkdir %q: %w", name, err) + } + } + + file, err := openFileUnderRoot(base, parentFd, unix.O_DIRECTORY|unix.O_RDONLY, 0) + if err != nil { + return err + } + defer file.Close() + + return setFileAttrs(dirfd, file, mode, metadata, options, false) +} + +func safeLink(dirfd int, mode os.FileMode, metadata *internal.FileMetadata, options *archive.TarOptions) error { + sourceFile, err := openFileUnderRoot(metadata.Linkname, dirfd, unix.O_PATH|unix.O_RDONLY|unix.O_NOFOLLOW, 0) + if err != nil { + return err + } + defer sourceFile.Close() + + destDir, destBase := filepath.Dir(metadata.Name), filepath.Base(metadata.Name) + destDirFd := dirfd + if destDir != "." { + f, err := openOrCreateDirUnderRoot(destDir, dirfd, 0) + if err != nil { + return err + } + defer f.Close() + destDirFd = int(f.Fd()) + } + + err = doHardLink(int(sourceFile.Fd()), destDirFd, destBase) + if err != nil { + return fmt.Errorf("create hardlink %q pointing to %q: %w", metadata.Name, metadata.Linkname, err) + } + + newFile, err := openFileUnderRoot(metadata.Name, dirfd, unix.O_WRONLY|unix.O_NOFOLLOW, 0) + if err != nil { + // If the target is a symlink, open the file with O_PATH. + if errors.Is(err, unix.ELOOP) { + newFile, err := openFileUnderRoot(metadata.Name, dirfd, unix.O_PATH|unix.O_NOFOLLOW, 0) + if err != nil { + return err + } + defer newFile.Close() + + return setFileAttrs(dirfd, newFile, mode, metadata, options, true) + } + return err + } + defer newFile.Close() + + return setFileAttrs(dirfd, newFile, mode, metadata, options, false) +} + +func safeSymlink(dirfd int, mode os.FileMode, metadata *internal.FileMetadata, options *archive.TarOptions) error { + destDir, destBase := filepath.Dir(metadata.Name), filepath.Base(metadata.Name) + destDirFd := dirfd + if destDir != "." { + f, err := openOrCreateDirUnderRoot(destDir, dirfd, 0) + if err != nil { + return err + } + defer f.Close() + destDirFd = int(f.Fd()) + } + + if err := unix.Symlinkat(metadata.Linkname, destDirFd, destBase); err != nil { + return fmt.Errorf("create symlink %q pointing to %q: %w", metadata.Name, metadata.Linkname, err) + } + return nil +} + +type whiteoutHandler struct { + Dirfd int + Root string +} + +func (d whiteoutHandler) Setxattr(path, name string, value []byte) error { + file, err := openOrCreateDirUnderRoot(path, d.Dirfd, 0) + if err != nil { + return err + } + defer file.Close() + + if err := unix.Fsetxattr(int(file.Fd()), name, value, 0); err != nil { + return fmt.Errorf("set xattr %s=%q for %q: %w", name, value, path, err) + } + return nil +} + +func (d whiteoutHandler) Mknod(path string, mode uint32, dev int) error { + dir := filepath.Dir(path) + base := filepath.Base(path) + + dirfd := d.Dirfd + if dir != "" { + dir, err := openOrCreateDirUnderRoot(dir, d.Dirfd, 0) + if err != nil { + return err + } + defer dir.Close() + + dirfd = int(dir.Fd()) + } + + if err := unix.Mknodat(dirfd, base, mode, dev); err != nil { + return fmt.Errorf("mknod %q: %w", path, err) + } + + return nil +} + +func checkChownErr(err error, name string, uid, gid int) error { + if errors.Is(err, syscall.EINVAL) { + return fmt.Errorf("potentially insufficient UIDs or GIDs available in user namespace (requested %d:%d for %s): Check /etc/subuid and /etc/subgid if configured locally: %w", uid, gid, name, err) + } + return err +} + +func (d whiteoutHandler) Chown(path string, uid, gid int) error { + file, err := openFileUnderRoot(path, d.Dirfd, unix.O_PATH, 0) + if err != nil { + return err + } + defer file.Close() + + if err := unix.Fchownat(int(file.Fd()), "", uid, gid, unix.AT_EMPTY_PATH); err != nil { + var stat unix.Stat_t + if unix.Fstat(int(file.Fd()), &stat) == nil { + if stat.Uid == uint32(uid) && stat.Gid == uint32(gid) { + return nil + } + } + return checkChownErr(err, path, uid, gid) + } + return nil +} + +type hardLinkToCreate struct { + dest string + dirfd int + mode os.FileMode + metadata *internal.FileMetadata +} + +func parseBooleanPullOption(storeOpts *storage.StoreOptions, name string, def bool) bool { + if value, ok := storeOpts.PullOptions[name]; ok { + return strings.ToLower(value) == "true" + } + return def +} + +type findAndCopyFileOptions struct { + useHardLinks bool + enableHostDedup bool + ostreeRepos []string + options *archive.TarOptions +} + +func (c *chunkedDiffer) findAndCopyFile(dirfd int, r *internal.FileMetadata, copyOptions *findAndCopyFileOptions, mode os.FileMode) (bool, error) { + finalizeFile := func(dstFile *os.File) error { + if dstFile != nil { + defer dstFile.Close() + if err := setFileAttrs(dirfd, dstFile, mode, r, copyOptions.options, false); err != nil { + return err + } + } + return nil + } + + found, dstFile, _, err := findFileInOtherLayers(c.layersCache, r, dirfd, copyOptions.useHardLinks) + if err != nil { + return false, err + } + if found { + if err := finalizeFile(dstFile); err != nil { + return false, err + } + return true, nil + } + + found, dstFile, _, err = findFileInOSTreeRepos(r, copyOptions.ostreeRepos, dirfd, copyOptions.useHardLinks) + if err != nil { + return false, err + } + if found { + if err := finalizeFile(dstFile); err != nil { + return false, err + } + return true, nil + } + + if copyOptions.enableHostDedup { + found, dstFile, _, err = findFileOnTheHost(r, dirfd, copyOptions.useHardLinks, c.copyBuffer) + if err != nil { + return false, err + } + if found { + if err := finalizeFile(dstFile); err != nil { + return false, err + } + return true, nil + } + } + return false, nil +} + +func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (graphdriver.DriverWithDifferOutput, error) { + defer c.layersCache.release() + defer func() { + if c.zstdReader != nil { + c.zstdReader.Close() + } + }() + + bigData := map[string][]byte{ + bigDataKey: c.manifest, + } + output := graphdriver.DriverWithDifferOutput{ + Differ: c, + BigData: bigData, + } + + storeOpts, err := types.DefaultStoreOptionsAutoDetectUID() + if err != nil { + return output, err + } + + if !parseBooleanPullOption(&storeOpts, "enable_partial_images", false) { + return output, errors.New("enable_partial_images not configured") + } + + enableHostDedup := parseBooleanPullOption(&storeOpts, "enable_host_deduplication", false) + + // When the hard links deduplication is used, file attributes are ignored because setting them + // modifies the source file as well. + useHardLinks := parseBooleanPullOption(&storeOpts, "use_hard_links", false) + + // List of OSTree repositories to use for deduplication + ostreeRepos := strings.Split(storeOpts.PullOptions["ostree_repos"], ":") + + // Generate the manifest + toc, err := unmarshalToc(c.manifest) + if err != nil { + return output, err + } + + whiteoutConverter := archive.GetWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData) + + var missingParts []missingPart + + mergedEntries, err := c.mergeTocEntries(c.fileType, toc.Entries) + if err != nil { + return output, err + } + if err := maybeDoIDRemap(mergedEntries, options); err != nil { + return output, err + } + + if options.ForceMask != nil { + uid, gid, mode, err := archive.GetFileOwner(dest) + if err == nil { + value := fmt.Sprintf("%d:%d:0%o", uid, gid, mode) + if err := unix.Setxattr(dest, containersOverrideXattr, []byte(value), 0); err != nil { + return output, err + } + } + } + + dirfd, err := unix.Open(dest, unix.O_RDONLY|unix.O_PATH, 0) + if err != nil { + return output, fmt.Errorf("cannot open %q: %w", dest, err) + } + defer unix.Close(dirfd) + + // hardlinks can point to missing files. So create them after all files + // are retrieved + var hardLinks []hardLinkToCreate + + missingPartsSize, totalChunksSize := int64(0), int64(0) + + copyOptions := findAndCopyFileOptions{ + useHardLinks: useHardLinks, + enableHostDedup: enableHostDedup, + ostreeRepos: ostreeRepos, + options: options, + } + + type copyFileJob struct { + njob int + index int + mode os.FileMode + metadata *internal.FileMetadata + + found bool + err error + } + + var wg sync.WaitGroup + + copyResults := make([]copyFileJob, len(mergedEntries)) + + copyFileJobs := make(chan copyFileJob) + defer func() { + if copyFileJobs != nil { + close(copyFileJobs) + } + wg.Wait() + }() + + for i := 0; i < copyGoRoutines; i++ { + wg.Add(1) + jobs := copyFileJobs + + go func() { + defer wg.Done() + for job := range jobs { + found, err := c.findAndCopyFile(dirfd, job.metadata, ©Options, job.mode) + job.err = err + job.found = found + copyResults[job.njob] = job + } + }() + } + + filesToWaitFor := 0 + for i, r := range mergedEntries { + if options.ForceMask != nil { + value := fmt.Sprintf("%d:%d:0%o", r.UID, r.GID, r.Mode&07777) + r.Xattrs[containersOverrideXattr] = base64.StdEncoding.EncodeToString([]byte(value)) + r.Mode = int64(*options.ForceMask) + } + + mode := os.FileMode(r.Mode) + + r.Name = filepath.Clean(r.Name) + r.Linkname = filepath.Clean(r.Linkname) + + t, err := typeToTarType(r.Type) + if err != nil { + return output, err + } + if whiteoutConverter != nil { + hdr := archivetar.Header{ + Typeflag: t, + Name: r.Name, + Linkname: r.Linkname, + Size: r.Size, + Mode: r.Mode, + Uid: r.UID, + Gid: r.GID, + } + handler := whiteoutHandler{ + Dirfd: dirfd, + Root: dest, + } + writeFile, err := whiteoutConverter.ConvertReadWithHandler(&hdr, r.Name, &handler) + if err != nil { + return output, err + } + if !writeFile { + continue + } + } + switch t { + case tar.TypeReg: + // Create directly empty files. + if r.Size == 0 { + // Used to have a scope for cleanup. + createEmptyFile := func() error { + file, err := openFileUnderRoot(r.Name, dirfd, newFileFlags, 0) + if err != nil { + return err + } + defer file.Close() + if err := setFileAttrs(dirfd, file, mode, &r, options, false); err != nil { + return err + } + return nil + } + if err := createEmptyFile(); err != nil { + return output, err + } + continue + } + + case tar.TypeDir: + if err := safeMkdir(dirfd, mode, r.Name, &r, options); err != nil { + return output, err + } + continue + + case tar.TypeLink: + dest := dest + dirfd := dirfd + mode := mode + r := r + hardLinks = append(hardLinks, hardLinkToCreate{ + dest: dest, + dirfd: dirfd, + mode: mode, + metadata: &r, + }) + continue + + case tar.TypeSymlink: + if err := safeSymlink(dirfd, mode, &r, options); err != nil { + return output, err + } + continue + + case tar.TypeChar: + case tar.TypeBlock: + case tar.TypeFifo: + /* Ignore. */ + default: + return output, fmt.Errorf("invalid type %q", t) + } + + totalChunksSize += r.Size + + if t == tar.TypeReg { + index := i + njob := filesToWaitFor + job := copyFileJob{ + mode: mode, + metadata: &mergedEntries[index], + index: index, + njob: njob, + } + copyFileJobs <- job + filesToWaitFor++ + } + } + + close(copyFileJobs) + copyFileJobs = nil + + wg.Wait() + + for _, res := range copyResults[:filesToWaitFor] { + if res.err != nil { + return output, res.err + } + // the file was already copied to its destination + // so nothing left to do. + if res.found { + continue + } + + r := &mergedEntries[res.index] + + missingPartsSize += r.Size + + remainingSize := r.Size + + // the file is missing, attempt to find individual chunks. + for _, chunk := range r.Chunks { + compressedSize := int64(chunk.EndOffset - chunk.Offset) + size := remainingSize + if chunk.ChunkSize > 0 { + size = chunk.ChunkSize + } + remainingSize = remainingSize - size + + rawChunk := ImageSourceChunk{ + Offset: uint64(chunk.Offset), + Length: uint64(compressedSize), + } + file := missingFileChunk{ + File: &mergedEntries[res.index], + CompressedSize: compressedSize, + UncompressedSize: size, + } + mp := missingPart{ + SourceChunk: &rawChunk, + Chunks: []missingFileChunk{ + file, + }, + } + + switch chunk.ChunkType { + case internal.ChunkTypeData: + root, path, offset, err := c.layersCache.findChunkInOtherLayers(chunk) + if err != nil { + return output, err + } + if offset >= 0 && validateChunkChecksum(chunk, root, path, offset, c.copyBuffer) { + missingPartsSize -= size + mp.OriginFile = &originFile{ + Root: root, + Path: path, + Offset: offset, + } + } + case internal.ChunkTypeZeros: + missingPartsSize -= size + mp.Hole = true + // Mark all chunks belonging to the missing part as holes + for i := range mp.Chunks { + mp.Chunks[i].Hole = true + } + } + missingParts = append(missingParts, mp) + } + } + // There are some missing files. Prepare a multirange request for the missing chunks. + if len(missingParts) > 0 { + missingParts = mergeMissingChunks(missingParts, maxNumberMissingChunks) + if err := c.retrieveMissingFiles(dest, dirfd, missingParts, options); err != nil { + return output, err + } + } + + for _, m := range hardLinks { + if err := safeLink(m.dirfd, m.mode, m.metadata, options); err != nil { + return output, err + } + } + + if totalChunksSize > 0 { + logrus.Debugf("Missing %d bytes out of %d (%.2f %%)", missingPartsSize, totalChunksSize, float32(missingPartsSize*100.0)/float32(totalChunksSize)) + } + return output, nil +} + +func mustSkipFile(fileType compressedFileType, e internal.FileMetadata) bool { + // ignore the metadata files for the estargz format. + if fileType != fileTypeEstargz { + return false + } + switch e.Name { + // ignore the metadata files for the estargz format. + case estargz.PrefetchLandmark, estargz.NoPrefetchLandmark, estargz.TOCTarName: + return true + } + return false +} + +func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []internal.FileMetadata) ([]internal.FileMetadata, error) { + countNextChunks := func(start int) int { + count := 0 + for _, e := range entries[start:] { + if e.Type != TypeChunk { + return count + } + count++ + } + return count + } + + size := 0 + for _, entry := range entries { + if mustSkipFile(fileType, entry) { + continue + } + if entry.Type != TypeChunk { + size++ + } + } + + mergedEntries := make([]internal.FileMetadata, size) + m := 0 + for i := 0; i < len(entries); i++ { + e := entries[i] + if mustSkipFile(fileType, e) { + continue + } + if e.Type == TypeChunk { + return nil, fmt.Errorf("chunk type without a regular file") + } + + if e.Type == TypeReg { + nChunks := countNextChunks(i + 1) + + e.Chunks = make([]*internal.FileMetadata, nChunks+1) + for j := 0; j <= nChunks; j++ { + e.Chunks[j] = &entries[i+j] + e.EndOffset = entries[i+j].EndOffset + } + i += nChunks + } + mergedEntries[m] = e + m++ + } + // stargz/estargz doesn't store EndOffset so let's calculate it here + lastOffset := c.tocOffset + for i := len(mergedEntries) - 1; i >= 0; i-- { + if mergedEntries[i].EndOffset == 0 { + mergedEntries[i].EndOffset = lastOffset + } + if mergedEntries[i].Offset != 0 { + lastOffset = mergedEntries[i].Offset + } + + lastChunkOffset := mergedEntries[i].EndOffset + for j := len(mergedEntries[i].Chunks) - 1; j >= 0; j-- { + mergedEntries[i].Chunks[j].EndOffset = lastChunkOffset + mergedEntries[i].Chunks[j].Size = mergedEntries[i].Chunks[j].EndOffset - mergedEntries[i].Chunks[j].Offset + lastChunkOffset = mergedEntries[i].Chunks[j].Offset + } + } + return mergedEntries, nil +} + +// validateChunkChecksum checks if the file at $root/$path[offset:chunk.ChunkSize] has the +// same digest as chunk.ChunkDigest +func validateChunkChecksum(chunk *internal.FileMetadata, root, path string, offset int64, copyBuffer []byte) bool { + parentDirfd, err := unix.Open(root, unix.O_PATH, 0) + if err != nil { + return false + } + defer unix.Close(parentDirfd) + + fd, err := openFileUnderRoot(path, parentDirfd, unix.O_RDONLY, 0) + if err != nil { + return false + } + defer fd.Close() + + if _, err := unix.Seek(int(fd.Fd()), offset, 0); err != nil { + return false + } + + r := io.LimitReader(fd, chunk.ChunkSize) + digester := digest.Canonical.Digester() + + if _, err := io.CopyBuffer(digester.Hash(), r, copyBuffer); err != nil { + return false + } + + digest, err := digest.Parse(chunk.ChunkDigest) + if err != nil { + return false + } + + return digester.Digest() == digest +} diff --git a/vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go b/vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go new file mode 100644 index 00000000000..3a406ba786a --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go @@ -0,0 +1,16 @@ +// +build !linux + +package chunked + +import ( + "context" + + storage "github.com/containers/storage" + graphdriver "github.com/containers/storage/drivers" + "github.com/pkg/errors" +) + +// GetDiffer returns a differ than can be used with ApplyDiffWithDiffer. +func GetDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (graphdriver.Differ, error) { + return nil, errors.New("format not supported on this architecture") +} diff --git a/vendor/github.com/containers/storage/pkg/config/config.go b/vendor/github.com/containers/storage/pkg/config/config.go new file mode 100644 index 00000000000..e6622cf1466 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/config/config.go @@ -0,0 +1,337 @@ +package config + +import ( + "fmt" + "os" +) + +// ThinpoolOptionsConfig represents the "storage.options.thinpool" +// TOML config table. +type ThinpoolOptionsConfig struct { + // AutoExtendPercent determines the amount by which pool needs to be + // grown. This is specified in terms of % of pool size. So a value of + // 20 means that when threshold is hit, pool will be grown by 20% of + // existing pool size. + AutoExtendPercent string `toml:"autoextend_percent"` + + // AutoExtendThreshold determines the pool extension threshold in terms + // of percentage of pool size. For example, if threshold is 60, that + // means when pool is 60% full, threshold has been hit. + AutoExtendThreshold string `toml:"autoextend_threshold"` + + // BaseSize specifies the size to use when creating the base device, + // which limits the size of images and containers. + BaseSize string `toml:"basesize"` + + // BlockSize specifies a custom blocksize to use for the thin pool. + BlockSize string `toml:"blocksize"` + + // DirectLvmDevice specifies a custom block storage device to use for + // the thin pool. + DirectLvmDevice string `toml:"directlvm_device"` + + // DirectLvmDeviceForcewipes device even if device already has a + // filesystem + DirectLvmDeviceForce string `toml:"directlvm_device_force"` + + // Fs specifies the filesystem type to use for the base device. + Fs string `toml:"fs"` + + // log_level sets the log level of devicemapper. + LogLevel string `toml:"log_level"` + + // MetadataSize specifies the size of the metadata for the thinpool + // It will be used with the `pvcreate --metadata` option. + MetadataSize string `toml:"metadatasize"` + + // MinFreeSpace specifies the min free space percent in a thin pool + // require for new device creation to + MinFreeSpace string `toml:"min_free_space"` + + // MkfsArg specifies extra mkfs arguments to be used when creating the + // basedevice. + MkfsArg string `toml:"mkfsarg"` + + // MountOpt specifies extra mount options used when mounting the thin + // devices. + MountOpt string `toml:"mountopt"` + + // Size + Size string `toml:"size"` + + // UseDeferredDeletion marks device for deferred deletion + UseDeferredDeletion string `toml:"use_deferred_deletion"` + + // UseDeferredRemoval marks device for deferred removal + UseDeferredRemoval string `toml:"use_deferred_removal"` + + // XfsNoSpaceMaxRetriesFreeSpace specifies the maximum number of + // retries XFS should attempt to complete IO when ENOSPC (no space) + // error is returned by underlying storage device. + XfsNoSpaceMaxRetries string `toml:"xfs_nospace_max_retries"` +} + +type AufsOptionsConfig struct { + // MountOpt specifies extra mount options used when mounting + MountOpt string `toml:"mountopt"` +} + +type BtrfsOptionsConfig struct { + // MinSpace is the minimal spaces allocated to the device + MinSpace string `toml:"min_space"` + // Size + Size string `toml:"size"` +} + +type OverlayOptionsConfig struct { + // IgnoreChownErrors is a flag for whether chown errors should be + // ignored when building an image. + IgnoreChownErrors string `toml:"ignore_chown_errors"` + // MountOpt specifies extra mount options used when mounting + MountOpt string `toml:"mountopt"` + // Alternative program to use for the mount of the file system + MountProgram string `toml:"mount_program"` + // Size + Size string `toml:"size"` + // Inodes is used to set a maximum inodes of the container image. + Inodes string `toml:"inodes"` + // Do not create a bind mount on the storage home + SkipMountHome string `toml:"skip_mount_home"` + // ForceMask indicates the permissions mask (e.g. "0755") to use for new + // files and directories + ForceMask string `toml:"force_mask"` +} + +type VfsOptionsConfig struct { + // IgnoreChownErrors is a flag for whether chown errors should be + // ignored when building an image. + IgnoreChownErrors string `toml:"ignore_chown_errors"` +} + +type ZfsOptionsConfig struct { + // MountOpt specifies extra mount options used when mounting + MountOpt string `toml:"mountopt"` + // Name is the File System name of the ZFS File system + Name string `toml:"fsname"` + // Size + Size string `toml:"size"` +} + +// OptionsConfig represents the "storage.options" TOML config table. +type OptionsConfig struct { + // AdditionalImagesStores is the location of additional read/only + // Image stores. Usually used to access Networked File System + // for shared image content + AdditionalImageStores []string `toml:"additionalimagestores"` + + // AdditionalLayerStores is the location of additional read/only + // Layer stores. Usually used to access Networked File System + // for shared image content + // This API is experimental and can be changed without bumping the + // major version number. + AdditionalLayerStores []string `toml:"additionallayerstores"` + + // Size + Size string `toml:"size"` + + // RemapUIDs is a list of default UID mappings to use for layers. + RemapUIDs string `toml:"remap-uids"` + // RemapGIDs is a list of default GID mappings to use for layers. + RemapGIDs string `toml:"remap-gids"` + // IgnoreChownErrors is a flag for whether chown errors should be + // ignored when building an image. + IgnoreChownErrors string `toml:"ignore_chown_errors"` + + // ForceMask indicates the permissions mask (e.g. "0755") to use for new + // files and directories. + ForceMask os.FileMode `toml:"force_mask"` + + // RemapUser is the name of one or more entries in /etc/subuid which + // should be used to set up default UID mappings. + RemapUser string `toml:"remap-user"` + // RemapGroup is the name of one or more entries in /etc/subgid which + // should be used to set up default GID mappings. + RemapGroup string `toml:"remap-group"` + + // RootAutoUsernsUser is the name of one or more entries in /etc/subuid and + // /etc/subgid which should be used to set up automatically a userns. + RootAutoUsernsUser string `toml:"root-auto-userns-user"` + + // AutoUsernsMinSize is the minimum size for a user namespace that is + // created automatically. + AutoUsernsMinSize uint32 `toml:"auto-userns-min-size"` + + // AutoUsernsMaxSize is the maximum size for a user namespace that is + // created automatically. + AutoUsernsMaxSize uint32 `toml:"auto-userns-max-size"` + + // Aufs container options to be handed to aufs drivers + Aufs struct{ AufsOptionsConfig } `toml:"aufs"` + + // Btrfs container options to be handed to btrfs drivers + Btrfs struct{ BtrfsOptionsConfig } `toml:"btrfs"` + + // Thinpool container options to be handed to thinpool drivers + Thinpool struct{ ThinpoolOptionsConfig } `toml:"thinpool"` + + // Overlay container options to be handed to overlay drivers + Overlay struct{ OverlayOptionsConfig } `toml:"overlay"` + + // Vfs container options to be handed to VFS drivers + Vfs struct{ VfsOptionsConfig } `toml:"vfs"` + + // Zfs container options to be handed to ZFS drivers + Zfs struct{ ZfsOptionsConfig } `toml:"zfs"` + + // Do not create a bind mount on the storage home + SkipMountHome string `toml:"skip_mount_home"` + + // Alternative program to use for the mount of the file system + MountProgram string `toml:"mount_program"` + + // MountOpt specifies extra mount options used when mounting + MountOpt string `toml:"mountopt"` + + // PullOptions specifies options to be handed to pull managers + // This API is experimental and can be changed without bumping the major version number. + PullOptions map[string]string `toml:"pull_options"` + + // DisableVolatile doesn't allow volatile mounts when it is set. + DisableVolatile bool `toml:"disable-volatile"` +} + +// GetGraphDriverOptions returns the driver specific options +func GetGraphDriverOptions(driverName string, options OptionsConfig) []string { + var doptions []string + switch driverName { + case "aufs": + if options.Aufs.MountOpt != "" { + return append(doptions, fmt.Sprintf("%s.mountopt=%s", driverName, options.Aufs.MountOpt)) + } else if options.MountOpt != "" { + doptions = append(doptions, fmt.Sprintf("%s.mountopt=%s", driverName, options.MountOpt)) + } + + case "btrfs": + if options.Btrfs.MinSpace != "" { + return append(doptions, fmt.Sprintf("%s.min_space=%s", driverName, options.Btrfs.MinSpace)) + } + if options.Btrfs.Size != "" { + doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Btrfs.Size)) + } else if options.Size != "" { + doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Size)) + } + + case "devicemapper": + if options.Thinpool.AutoExtendPercent != "" { + doptions = append(doptions, fmt.Sprintf("dm.thinp_autoextend_percent=%s", options.Thinpool.AutoExtendPercent)) + } + if options.Thinpool.AutoExtendThreshold != "" { + doptions = append(doptions, fmt.Sprintf("dm.thinp_autoextend_threshold=%s", options.Thinpool.AutoExtendThreshold)) + } + if options.Thinpool.BaseSize != "" { + doptions = append(doptions, fmt.Sprintf("dm.basesize=%s", options.Thinpool.BaseSize)) + } + if options.Thinpool.BlockSize != "" { + doptions = append(doptions, fmt.Sprintf("dm.blocksize=%s", options.Thinpool.BlockSize)) + } + if options.Thinpool.DirectLvmDevice != "" { + doptions = append(doptions, fmt.Sprintf("dm.directlvm_device=%s", options.Thinpool.DirectLvmDevice)) + } + if options.Thinpool.DirectLvmDeviceForce != "" { + doptions = append(doptions, fmt.Sprintf("dm.directlvm_device_force=%s", options.Thinpool.DirectLvmDeviceForce)) + } + if options.Thinpool.Fs != "" { + doptions = append(doptions, fmt.Sprintf("dm.fs=%s", options.Thinpool.Fs)) + } + if options.Thinpool.LogLevel != "" { + doptions = append(doptions, fmt.Sprintf("dm.libdm_log_level=%s", options.Thinpool.LogLevel)) + } + if options.Thinpool.MetadataSize != "" { + doptions = append(doptions, fmt.Sprintf("dm.metadata_size=%s", options.Thinpool.MetadataSize)) + } + if options.Thinpool.MinFreeSpace != "" { + doptions = append(doptions, fmt.Sprintf("dm.min_free_space=%s", options.Thinpool.MinFreeSpace)) + } + if options.Thinpool.MkfsArg != "" { + doptions = append(doptions, fmt.Sprintf("dm.mkfsarg=%s", options.Thinpool.MkfsArg)) + } + if options.Thinpool.MountOpt != "" { + doptions = append(doptions, fmt.Sprintf("%s.mountopt=%s", driverName, options.Thinpool.MountOpt)) + } else if options.MountOpt != "" { + doptions = append(doptions, fmt.Sprintf("%s.mountopt=%s", driverName, options.MountOpt)) + } + + if options.Thinpool.Size != "" { + doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Thinpool.Size)) + } else if options.Size != "" { + doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Size)) + } + + if options.Thinpool.UseDeferredDeletion != "" { + doptions = append(doptions, fmt.Sprintf("dm.use_deferred_deletion=%s", options.Thinpool.UseDeferredDeletion)) + } + if options.Thinpool.UseDeferredRemoval != "" { + doptions = append(doptions, fmt.Sprintf("dm.use_deferred_removal=%s", options.Thinpool.UseDeferredRemoval)) + } + if options.Thinpool.XfsNoSpaceMaxRetries != "" { + doptions = append(doptions, fmt.Sprintf("dm.xfs_nospace_max_retries=%s", options.Thinpool.XfsNoSpaceMaxRetries)) + } + + case "overlay", "overlay2": + if options.Overlay.IgnoreChownErrors != "" { + doptions = append(doptions, fmt.Sprintf("%s.ignore_chown_errors=%s", driverName, options.Overlay.IgnoreChownErrors)) + } else if options.IgnoreChownErrors != "" { + doptions = append(doptions, fmt.Sprintf("%s.ignore_chown_errors=%s", driverName, options.IgnoreChownErrors)) + } + if options.Overlay.MountProgram != "" { + doptions = append(doptions, fmt.Sprintf("%s.mount_program=%s", driverName, options.Overlay.MountProgram)) + } else if options.MountProgram != "" { + doptions = append(doptions, fmt.Sprintf("%s.mount_program=%s", driverName, options.MountProgram)) + } + if options.Overlay.MountOpt != "" { + doptions = append(doptions, fmt.Sprintf("%s.mountopt=%s", driverName, options.Overlay.MountOpt)) + } else if options.MountOpt != "" { + doptions = append(doptions, fmt.Sprintf("%s.mountopt=%s", driverName, options.MountOpt)) + } + if options.Overlay.Size != "" { + doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Overlay.Size)) + } else if options.Size != "" { + doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Size)) + } + if options.Overlay.Inodes != "" { + doptions = append(doptions, fmt.Sprintf("%s.inodes=%s", driverName, options.Overlay.Inodes)) + } + if options.Overlay.SkipMountHome != "" { + doptions = append(doptions, fmt.Sprintf("%s.skip_mount_home=%s", driverName, options.Overlay.SkipMountHome)) + } else if options.SkipMountHome != "" { + doptions = append(doptions, fmt.Sprintf("%s.skip_mount_home=%s", driverName, options.SkipMountHome)) + } + if options.Overlay.ForceMask != "" { + doptions = append(doptions, fmt.Sprintf("%s.force_mask=%s", driverName, options.Overlay.ForceMask)) + } else if options.ForceMask != 0 { + doptions = append(doptions, fmt.Sprintf("%s.force_mask=%s", driverName, options.ForceMask)) + } + case "vfs": + if options.Vfs.IgnoreChownErrors != "" { + doptions = append(doptions, fmt.Sprintf("%s.ignore_chown_errors=%s", driverName, options.Vfs.IgnoreChownErrors)) + } else if options.IgnoreChownErrors != "" { + doptions = append(doptions, fmt.Sprintf("%s.ignore_chown_errors=%s", driverName, options.IgnoreChownErrors)) + } + + case "zfs": + if options.Zfs.Name != "" { + doptions = append(doptions, fmt.Sprintf("%s.fsname=%s", driverName, options.Zfs.Name)) + } + if options.Zfs.MountOpt != "" { + doptions = append(doptions, fmt.Sprintf("%s.mountopt=%s", driverName, options.Zfs.MountOpt)) + } else if options.MountOpt != "" { + doptions = append(doptions, fmt.Sprintf("%s.mountopt=%s", driverName, options.MountOpt)) + } + if options.Zfs.Size != "" { + doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Zfs.Size)) + } else if options.Size != "" { + doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Size)) + } + } + return doptions +} diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go new file mode 100644 index 00000000000..6a0ac246479 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go @@ -0,0 +1,821 @@ +// +build linux,cgo + +package devicemapper + +import ( + "errors" + "fmt" + "os" + "runtime" + "unsafe" + + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +// Same as DM_DEVICE_* enum values from libdevmapper.h +// nolint: deadcode +const ( + deviceCreate TaskType = iota + deviceReload + deviceRemove + deviceRemoveAll + deviceSuspend + deviceResume + deviceInfo + deviceDeps + deviceRename + deviceVersion + deviceStatus + deviceTable + deviceWaitevent + deviceList + deviceClear + deviceMknodes + deviceListVersions + deviceTargetMsg + deviceSetGeometry +) + +const ( + addNodeOnResume AddNodeType = iota + addNodeOnCreate +) + +// List of errors returned when using devicemapper. +var ( + ErrTaskRun = errors.New("dm_task_run failed") + ErrTaskSetName = errors.New("dm_task_set_name failed") + ErrTaskSetMessage = errors.New("dm_task_set_message failed") + ErrTaskSetAddNode = errors.New("dm_task_set_add_node failed") + ErrTaskSetRo = errors.New("dm_task_set_ro failed") + ErrTaskAddTarget = errors.New("dm_task_add_target failed") + ErrTaskSetSector = errors.New("dm_task_set_sector failed") + ErrTaskGetDeps = errors.New("dm_task_get_deps failed") + ErrTaskGetInfo = errors.New("dm_task_get_info failed") + ErrTaskGetDriverVersion = errors.New("dm_task_get_driver_version failed") + ErrTaskDeferredRemove = errors.New("dm_task_deferred_remove failed") + ErrTaskSetCookie = errors.New("dm_task_set_cookie failed") + ErrNilCookie = errors.New("cookie ptr can't be nil") + ErrGetBlockSize = errors.New("Can't get block size") + ErrUdevWait = errors.New("wait on udev cookie failed") + ErrSetDevDir = errors.New("dm_set_dev_dir failed") + ErrGetLibraryVersion = errors.New("dm_get_library_version failed") + ErrCreateRemoveTask = errors.New("Can't create task of type deviceRemove") + ErrRunRemoveDevice = errors.New("running RemoveDevice failed") + ErrInvalidAddNode = errors.New("Invalid AddNode type") + ErrBusy = errors.New("Device is Busy") + ErrDeviceIDExists = errors.New("Device Id Exists") + ErrEnxio = errors.New("No such device or address") +) + +var ( + dmSawBusy bool + dmSawExist bool + dmSawEnxio bool // No Such Device or Address +) + +type ( + // Task represents a devicemapper task (like lvcreate, etc.) ; a task is needed for each ioctl + // command to execute. + Task struct { + unmanaged *cdmTask + } + // Deps represents dependents (layer) of a device. + Deps struct { + Count uint32 + Filler uint32 + Device []uint64 + } + // Info represents information about a device. + Info struct { + Exists int + Suspended int + LiveTable int + InactiveTable int + OpenCount int32 + EventNr uint32 + Major uint32 + Minor uint32 + ReadOnly int + TargetCount int32 + DeferredRemove int + } + // TaskType represents a type of task + TaskType int + // AddNodeType represents a type of node to be added + AddNodeType int +) + +// DeviceIDExists returns whether error conveys the information about device Id already +// exist or not. This will be true if device creation or snap creation +// operation fails if device or snap device already exists in pool. +// Current implementation is little crude as it scans the error string +// for exact pattern match. Replacing it with more robust implementation +// is desirable. +func DeviceIDExists(err error) bool { + return fmt.Sprint(err) == fmt.Sprint(ErrDeviceIDExists) +} + +func (t *Task) destroy() { + if t != nil { + DmTaskDestroy(t.unmanaged) + runtime.SetFinalizer(t, nil) + } +} + +// TaskCreateNamed is a convenience function for TaskCreate when a name +// will be set on the task as well +func TaskCreateNamed(t TaskType, name string) (*Task, error) { + task := TaskCreate(t) + if task == nil { + return nil, fmt.Errorf("devicemapper: Can't create task of type %d", int(t)) + } + if err := task.setName(name); err != nil { + return nil, fmt.Errorf("devicemapper: Can't set task name %s", name) + } + return task, nil +} + +// TaskCreate initializes a devicemapper task of tasktype +func TaskCreate(tasktype TaskType) *Task { + Ctask := DmTaskCreate(int(tasktype)) + if Ctask == nil { + return nil + } + task := &Task{unmanaged: Ctask} + runtime.SetFinalizer(task, (*Task).destroy) + return task +} + +func (t *Task) run() error { + if res := DmTaskRun(t.unmanaged); res != 1 { + return ErrTaskRun + } + runtime.KeepAlive(t) + return nil +} + +func (t *Task) setName(name string) error { + if res := DmTaskSetName(t.unmanaged, name); res != 1 { + return ErrTaskSetName + } + return nil +} + +func (t *Task) setMessage(message string) error { + if res := DmTaskSetMessage(t.unmanaged, message); res != 1 { + return ErrTaskSetMessage + } + return nil +} + +func (t *Task) setSector(sector uint64) error { + if res := DmTaskSetSector(t.unmanaged, sector); res != 1 { + return ErrTaskSetSector + } + return nil +} + +func (t *Task) setCookie(cookie *uint, flags uint16) error { + if cookie == nil { + return ErrNilCookie + } + if res := DmTaskSetCookie(t.unmanaged, cookie, flags); res != 1 { + return ErrTaskSetCookie + } + return nil +} + +func (t *Task) setAddNode(addNode AddNodeType) error { + if addNode != addNodeOnResume && addNode != addNodeOnCreate { + return ErrInvalidAddNode + } + if res := DmTaskSetAddNode(t.unmanaged, addNode); res != 1 { + return ErrTaskSetAddNode + } + return nil +} + +func (t *Task) setRo() error { + if res := DmTaskSetRo(t.unmanaged); res != 1 { + return ErrTaskSetRo + } + return nil +} + +func (t *Task) addTarget(start, size uint64, ttype, params string) error { + if res := DmTaskAddTarget(t.unmanaged, start, size, + ttype, params); res != 1 { + return ErrTaskAddTarget + } + return nil +} + +func (t *Task) getDeps() (*Deps, error) { + var deps *Deps + if deps = DmTaskGetDeps(t.unmanaged); deps == nil { + return nil, ErrTaskGetDeps + } + return deps, nil +} + +func (t *Task) getInfo() (*Info, error) { + info := &Info{} + if res := DmTaskGetInfo(t.unmanaged, info); res != 1 { + return nil, ErrTaskGetInfo + } + return info, nil +} + +func (t *Task) getInfoWithDeferred() (*Info, error) { + info := &Info{} + if res := DmTaskGetInfoWithDeferred(t.unmanaged, info); res != 1 { + return nil, ErrTaskGetInfo + } + return info, nil +} + +func (t *Task) getDriverVersion() (string, error) { + res := DmTaskGetDriverVersion(t.unmanaged) + if res == "" { + return "", ErrTaskGetDriverVersion + } + return res, nil +} + +func (t *Task) getNextTarget(next unsafe.Pointer) (nextPtr unsafe.Pointer, start uint64, + length uint64, targetType string, params string) { + + return DmGetNextTarget(t.unmanaged, next, &start, &length, + &targetType, ¶ms), + start, length, targetType, params +} + +// UdevWait waits for any processes that are waiting for udev to complete the specified cookie. +func UdevWait(cookie *uint) error { + if res := DmUdevWait(*cookie); res != 1 { + logrus.Debugf("devicemapper: Failed to wait on udev cookie %d, %d", *cookie, res) + return ErrUdevWait + } + return nil +} + +// SetDevDir sets the dev folder for the device mapper library (usually /dev). +func SetDevDir(dir string) error { + if res := DmSetDevDir(dir); res != 1 { + logrus.Debug("devicemapper: Error dm_set_dev_dir") + return ErrSetDevDir + } + return nil +} + +// GetLibraryVersion returns the device mapper library version. +func GetLibraryVersion() (string, error) { + var version string + if res := DmGetLibraryVersion(&version); res != 1 { + return "", ErrGetLibraryVersion + } + return version, nil +} + +// UdevSyncSupported returns whether device-mapper is able to sync with udev +// +// This is essential otherwise race conditions can arise where both udev and +// device-mapper attempt to create and destroy devices. +func UdevSyncSupported() bool { + return DmUdevGetSyncSupport() != 0 +} + +// UdevSetSyncSupport allows setting whether the udev sync should be enabled. +// The return bool indicates the state of whether the sync is enabled. +func UdevSetSyncSupport(enable bool) bool { + if enable { + DmUdevSetSyncSupport(1) + } else { + DmUdevSetSyncSupport(0) + } + + return UdevSyncSupported() +} + +// CookieSupported returns whether the version of device-mapper supports the +// use of cookie's in the tasks. +// This is largely a lower level call that other functions use. +func CookieSupported() bool { + return DmCookieSupported() != 0 +} + +// RemoveDevice is a useful helper for cleaning up a device. +func RemoveDevice(name string) error { + task, err := TaskCreateNamed(deviceRemove, name) + if task == nil { + return err + } + + cookie := new(uint) + if err := task.setCookie(cookie, 0); err != nil { + return fmt.Errorf("devicemapper: Can not set cookie: %s", err) + } + defer UdevWait(cookie) + + dmSawBusy = false // reset before the task is run + dmSawEnxio = false + if err = task.run(); err != nil { + if dmSawBusy { + return ErrBusy + } + if dmSawEnxio { + return ErrEnxio + } + return fmt.Errorf("devicemapper: Error running RemoveDevice %s", err) + } + + return nil +} + +// RemoveDeviceDeferred is a useful helper for cleaning up a device, but deferred. +func RemoveDeviceDeferred(name string) error { + logrus.Debugf("devicemapper: RemoveDeviceDeferred START(%s)", name) + defer logrus.Debugf("devicemapper: RemoveDeviceDeferred END(%s)", name) + task, err := TaskCreateNamed(deviceRemove, name) + if task == nil { + return err + } + + if err := DmTaskDeferredRemove(task.unmanaged); err != 1 { + return ErrTaskDeferredRemove + } + + // set a task cookie and disable library fallback, or else libdevmapper will + // disable udev dm rules and delete the symlink under /dev/mapper by itself, + // even if the removal is deferred by the kernel. + cookie := new(uint) + var flags uint16 + flags = DmUdevDisableLibraryFallback + if err := task.setCookie(cookie, flags); err != nil { + return fmt.Errorf("devicemapper: Can not set cookie: %s", err) + } + + // libdevmapper and udev relies on System V semaphore for synchronization, + // semaphores created in `task.setCookie` will be cleaned up in `UdevWait`. + // So these two function call must come in pairs, otherwise semaphores will + // be leaked, and the limit of number of semaphores defined in `/proc/sys/kernel/sem` + // will be reached, which will eventually make all following calls to 'task.SetCookie' + // fail. + // this call will not wait for the deferred removal's final executing, since no + // udev event will be generated, and the semaphore's value will not be incremented + // by udev, what UdevWait is just cleaning up the semaphore. + defer UdevWait(cookie) + + dmSawEnxio = false + if err = task.run(); err != nil { + if dmSawEnxio { + return ErrEnxio + } + return fmt.Errorf("devicemapper: Error running RemoveDeviceDeferred %s", err) + } + + return nil +} + +// CancelDeferredRemove cancels a deferred remove for a device. +func CancelDeferredRemove(deviceName string) error { + task, err := TaskCreateNamed(deviceTargetMsg, deviceName) + if task == nil { + return err + } + + if err := task.setSector(0); err != nil { + return fmt.Errorf("devicemapper: Can't set sector %s", err) + } + + if err := task.setMessage(fmt.Sprintf("@cancel_deferred_remove")); err != nil { + return fmt.Errorf("devicemapper: Can't set message %s", err) + } + + dmSawBusy = false + dmSawEnxio = false + if err := task.run(); err != nil { + // A device might be being deleted already + if dmSawBusy { + return ErrBusy + } else if dmSawEnxio { + return ErrEnxio + } + return fmt.Errorf("devicemapper: Error running CancelDeferredRemove %s", err) + + } + return nil +} + +// GetBlockDeviceSize returns the size of a block device identified by the specified file. +func GetBlockDeviceSize(file *os.File) (uint64, error) { + size, err := ioctlBlkGetSize64(file.Fd()) + if err != nil { + logrus.Errorf("devicemapper: Error getblockdevicesize: %s", err) + return 0, ErrGetBlockSize + } + return uint64(size), nil +} + +// BlockDeviceDiscard runs discard for the given path. +// This is used as a workaround for the kernel not discarding block so +// on the thin pool when we remove a thinp device, so we do it +// manually +func BlockDeviceDiscard(path string) error { + file, err := os.OpenFile(path, os.O_RDWR, 0) + if err != nil { + return err + } + defer file.Close() + + size, err := GetBlockDeviceSize(file) + if err != nil { + return err + } + + if err := ioctlBlkDiscard(file.Fd(), 0, size); err != nil { + return err + } + + // Without this sometimes the remove of the device that happens after + // discard fails with EBUSY. + unix.Sync() + + return nil +} + +// CreatePool is the programmatic example of "dmsetup create". +// It creates a device with the specified poolName, data and metadata file and block size. +func CreatePool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error { + task, err := TaskCreateNamed(deviceCreate, poolName) + if task == nil { + return err + } + + size, err := GetBlockDeviceSize(dataFile) + if err != nil { + return fmt.Errorf("devicemapper: Can't get data size %s", err) + } + + params := fmt.Sprintf("%s %s %d 32768 1 skip_block_zeroing", metadataFile.Name(), dataFile.Name(), poolBlockSize) + if err := task.addTarget(0, size/512, "thin-pool", params); err != nil { + return fmt.Errorf("devicemapper: Can't add target %s", err) + } + + cookie := new(uint) + var flags uint16 + flags = DmUdevDisableSubsystemRulesFlag | DmUdevDisableDiskRulesFlag | DmUdevDisableOtherRulesFlag + if err := task.setCookie(cookie, flags); err != nil { + return fmt.Errorf("devicemapper: Can't set cookie %s", err) + } + defer UdevWait(cookie) + + if err := task.run(); err != nil { + return fmt.Errorf("devicemapper: Error running deviceCreate (CreatePool) %s", err) + } + + return nil +} + +// ReloadPool is the programmatic example of "dmsetup reload". +// It reloads the table with the specified poolName, data and metadata file and block size. +func ReloadPool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error { + task, err := TaskCreateNamed(deviceReload, poolName) + if task == nil { + return err + } + + size, err := GetBlockDeviceSize(dataFile) + if err != nil { + return fmt.Errorf("devicemapper: Can't get data size %s", err) + } + + params := fmt.Sprintf("%s %s %d 32768 1 skip_block_zeroing", metadataFile.Name(), dataFile.Name(), poolBlockSize) + if err := task.addTarget(0, size/512, "thin-pool", params); err != nil { + return fmt.Errorf("devicemapper: Can't add target %s", err) + } + + if err := task.run(); err != nil { + return fmt.Errorf("devicemapper: Error running ReloadPool %s", err) + } + + return nil +} + +// GetDeps is the programmatic example of "dmsetup deps". +// It outputs a list of devices referenced by the live table for the specified device. +func GetDeps(name string) (*Deps, error) { + task, err := TaskCreateNamed(deviceDeps, name) + if task == nil { + return nil, err + } + if err := task.run(); err != nil { + return nil, err + } + return task.getDeps() +} + +// GetInfo is the programmatic example of "dmsetup info". +// It outputs some brief information about the device. +func GetInfo(name string) (*Info, error) { + task, err := TaskCreateNamed(deviceInfo, name) + if task == nil { + return nil, err + } + if err := task.run(); err != nil { + return nil, err + } + return task.getInfo() +} + +// GetInfoWithDeferred is the programmatic example of "dmsetup info", but deferred. +// It outputs some brief information about the device. +func GetInfoWithDeferred(name string) (*Info, error) { + task, err := TaskCreateNamed(deviceInfo, name) + if task == nil { + return nil, err + } + if err := task.run(); err != nil { + return nil, err + } + return task.getInfoWithDeferred() +} + +// GetDriverVersion is the programmatic example of "dmsetup version". +// It outputs version information of the driver. +func GetDriverVersion() (string, error) { + task := TaskCreate(deviceVersion) + if task == nil { + return "", fmt.Errorf("devicemapper: Can't create deviceVersion task") + } + if err := task.run(); err != nil { + return "", err + } + return task.getDriverVersion() +} + +// GetStatus is the programmatic example of "dmsetup status". +// It outputs status information for the specified device name. +func GetStatus(name string) (uint64, uint64, string, string, error) { + task, err := TaskCreateNamed(deviceStatus, name) + if task == nil { + logrus.Debugf("devicemapper: GetStatus() Error TaskCreateNamed: %s", err) + return 0, 0, "", "", err + } + if err := task.run(); err != nil { + logrus.Debugf("devicemapper: GetStatus() Error Run: %s", err) + return 0, 0, "", "", err + } + + devinfo, err := task.getInfo() + if err != nil { + logrus.Debugf("devicemapper: GetStatus() Error GetInfo: %s", err) + return 0, 0, "", "", err + } + if devinfo.Exists == 0 { + logrus.Debugf("devicemapper: GetStatus() Non existing device %s", name) + return 0, 0, "", "", fmt.Errorf("devicemapper: Non existing device %s", name) + } + + _, start, length, targetType, params := task.getNextTarget(unsafe.Pointer(nil)) + return start, length, targetType, params, nil +} + +// GetTable is the programmatic example for "dmsetup table". +// It outputs the current table for the specified device name. +func GetTable(name string) (uint64, uint64, string, string, error) { + task, err := TaskCreateNamed(deviceTable, name) + if task == nil { + logrus.Debugf("devicemapper: GetTable() Error TaskCreateNamed: %s", err) + return 0, 0, "", "", err + } + if err := task.run(); err != nil { + logrus.Debugf("devicemapper: GetTable() Error Run: %s", err) + return 0, 0, "", "", err + } + + devinfo, err := task.getInfo() + if err != nil { + logrus.Debugf("devicemapper: GetTable() Error GetInfo: %s", err) + return 0, 0, "", "", err + } + if devinfo.Exists == 0 { + logrus.Debugf("devicemapper: GetTable() Non existing device %s", name) + return 0, 0, "", "", fmt.Errorf("devicemapper: Non existing device %s", name) + } + + _, start, length, targetType, params := task.getNextTarget(unsafe.Pointer(nil)) + return start, length, targetType, params, nil +} + +// SetTransactionID sets a transaction id for the specified device name. +func SetTransactionID(poolName string, oldID uint64, newID uint64) error { + task, err := TaskCreateNamed(deviceTargetMsg, poolName) + if task == nil { + return err + } + + if err := task.setSector(0); err != nil { + return fmt.Errorf("devicemapper: Can't set sector %s", err) + } + + if err := task.setMessage(fmt.Sprintf("set_transaction_id %d %d", oldID, newID)); err != nil { + return fmt.Errorf("devicemapper: Can't set message %s", err) + } + + if err := task.run(); err != nil { + return fmt.Errorf("devicemapper: Error running SetTransactionID %s", err) + } + return nil +} + +// SuspendDevice is the programmatic example of "dmsetup suspend". +// It suspends the specified device. +func SuspendDevice(name string) error { + task, err := TaskCreateNamed(deviceSuspend, name) + if task == nil { + return err + } + if err := task.run(); err != nil { + return fmt.Errorf("devicemapper: Error running deviceSuspend %s", err) + } + return nil +} + +// ResumeDevice is the programmatic example of "dmsetup resume". +// It un-suspends the specified device. +func ResumeDevice(name string) error { + task, err := TaskCreateNamed(deviceResume, name) + if task == nil { + return err + } + + cookie := new(uint) + if err := task.setCookie(cookie, 0); err != nil { + return fmt.Errorf("devicemapper: Can't set cookie %s", err) + } + defer UdevWait(cookie) + + if err := task.run(); err != nil { + return fmt.Errorf("devicemapper: Error running deviceResume %s", err) + } + + return nil +} + +// CreateDevice creates a device with the specified poolName with the specified device id. +func CreateDevice(poolName string, deviceID int) error { + logrus.Debugf("devicemapper: CreateDevice(poolName=%v, deviceID=%v)", poolName, deviceID) + task, err := TaskCreateNamed(deviceTargetMsg, poolName) + if task == nil { + return err + } + + if err := task.setSector(0); err != nil { + return fmt.Errorf("devicemapper: Can't set sector %s", err) + } + + if err := task.setMessage(fmt.Sprintf("create_thin %d", deviceID)); err != nil { + return fmt.Errorf("devicemapper: Can't set message %s", err) + } + + dmSawExist = false // reset before the task is run + if err := task.run(); err != nil { + // Caller wants to know about ErrDeviceIDExists so that it can try with a different device id. + if dmSawExist { + return ErrDeviceIDExists + } + + return fmt.Errorf("devicemapper: Error running CreateDevice %s", err) + + } + return nil +} + +// DeleteDevice deletes a device with the specified poolName with the specified device id. +func DeleteDevice(poolName string, deviceID int) error { + task, err := TaskCreateNamed(deviceTargetMsg, poolName) + if task == nil { + return err + } + + if err := task.setSector(0); err != nil { + return fmt.Errorf("devicemapper: Can't set sector %s", err) + } + + if err := task.setMessage(fmt.Sprintf("delete %d", deviceID)); err != nil { + return fmt.Errorf("devicemapper: Can't set message %s", err) + } + + dmSawBusy = false + if err := task.run(); err != nil { + if dmSawBusy { + return ErrBusy + } + return fmt.Errorf("devicemapper: Error running DeleteDevice %s", err) + } + return nil +} + +// ActivateDevice activates the device identified by the specified +// poolName, name and deviceID with the specified size. +func ActivateDevice(poolName string, name string, deviceID int, size uint64) error { + return activateDevice(poolName, name, deviceID, size, "") +} + +// ActivateDeviceWithExternal activates the device identified by the specified +// poolName, name and deviceID with the specified size. +func ActivateDeviceWithExternal(poolName string, name string, deviceID int, size uint64, external string) error { + return activateDevice(poolName, name, deviceID, size, external) +} + +func activateDevice(poolName string, name string, deviceID int, size uint64, external string) error { + task, err := TaskCreateNamed(deviceCreate, name) + if task == nil { + return err + } + + var params string + if len(external) > 0 { + params = fmt.Sprintf("%s %d %s", poolName, deviceID, external) + } else { + params = fmt.Sprintf("%s %d", poolName, deviceID) + } + if err := task.addTarget(0, size/512, "thin", params); err != nil { + return fmt.Errorf("devicemapper: Can't add target %s", err) + } + if err := task.setAddNode(addNodeOnCreate); err != nil { + return fmt.Errorf("devicemapper: Can't add node %s", err) + } + + cookie := new(uint) + if err := task.setCookie(cookie, 0); err != nil { + return fmt.Errorf("devicemapper: Can't set cookie %s", err) + } + + defer UdevWait(cookie) + + if err := task.run(); err != nil { + return fmt.Errorf("devicemapper: Error running deviceCreate (ActivateDevice) %s", err) + } + + return nil +} + +// CreateSnapDeviceRaw creates a snapshot device. Caller needs to suspend and resume the origin device if it is active. +func CreateSnapDeviceRaw(poolName string, deviceID int, baseDeviceID int) error { + task, err := TaskCreateNamed(deviceTargetMsg, poolName) + if task == nil { + return err + } + + if err := task.setSector(0); err != nil { + return fmt.Errorf("devicemapper: Can't set sector %s", err) + } + + if err := task.setMessage(fmt.Sprintf("create_snap %d %d", deviceID, baseDeviceID)); err != nil { + return fmt.Errorf("devicemapper: Can't set message %s", err) + } + + dmSawExist = false // reset before the task is run + if err := task.run(); err != nil { + // Caller wants to know about ErrDeviceIDExists so that it can try with a different device id. + if dmSawExist { + return ErrDeviceIDExists + } + return fmt.Errorf("devicemapper: Error running deviceCreate (CreateSnapDeviceRaw) %s", err) + } + + return nil +} + +// CreateSnapDevice creates a snapshot based on the device identified by the baseName and baseDeviceId, +func CreateSnapDevice(poolName string, deviceID int, baseName string, baseDeviceID int) error { + devinfo, _ := GetInfo(baseName) + doSuspend := devinfo != nil && devinfo.Exists != 0 + + if doSuspend { + if err := SuspendDevice(baseName); err != nil { + return err + } + } + + if err := CreateSnapDeviceRaw(poolName, deviceID, baseDeviceID); err != nil { + if doSuspend { + if err2 := ResumeDevice(baseName); err2 != nil { + return fmt.Errorf("CreateSnapDeviceRaw Error: (%v): ResumeDevice Error: (%v)", err, err2) + } + } + return err + } + + if doSuspend { + if err := ResumeDevice(baseName); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_log.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_log.go new file mode 100644 index 00000000000..082fb1ba329 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_log.go @@ -0,0 +1,121 @@ +// +build linux,cgo + +package devicemapper + +import "C" + +import ( + "fmt" + "strings" + + "github.com/sirupsen/logrus" +) + +// DevmapperLogger defines methods required to register as a callback for +// logging events received from devicemapper. Note that devicemapper will send +// *all* logs regardless to callbacks (including debug logs) so it's +// recommended to not spam the console with the outputs. +type DevmapperLogger interface { + // DMLog is the logging callback containing all of the information from + // devicemapper. The interface is identical to the C libdm counterpart. + DMLog(level int, file string, line int, dmError int, message string) +} + +// dmLogger is the current logger in use that is being forwarded our messages. +var dmLogger DevmapperLogger + +// LogInit changes the logging callback called after processing libdm logs for +// error message information. The default logger simply forwards all logs to +// logrus. Calling LogInit(nil) disables the calling of callbacks. +func LogInit(logger DevmapperLogger) { + dmLogger = logger +} + +// Due to the way cgo works this has to be in a separate file, as devmapper.go has +// definitions in the cgo block, which is incompatible with using "//export" + +// StorageDevmapperLogCallback exports the devmapper log callback for cgo. Note that +// because we are using callbacks, this function will be called for *every* log +// in libdm (even debug ones because there's no way of setting the verbosity +// level for an external logging callback). +//export StorageDevmapperLogCallback +func StorageDevmapperLogCallback(level C.int, file *C.char, line, dmErrnoOrClass C.int, message *C.char) { + msg := C.GoString(message) + + // Track what errno libdm saw, because the library only gives us 0 or 1. + if level < LogLevelDebug { + if strings.Contains(msg, "busy") { + dmSawBusy = true + } + + if strings.Contains(msg, "File exists") { + dmSawExist = true + } + + if strings.Contains(msg, "No such device or address") { + dmSawEnxio = true + } + } + + if dmLogger != nil { + dmLogger.DMLog(int(level), C.GoString(file), int(line), int(dmErrnoOrClass), msg) + } +} + +// DefaultLogger is the default logger used by pkg/devicemapper. It forwards +// all logs that are of higher or equal priority to the given level to the +// corresponding logrus level. +type DefaultLogger struct { + // Level corresponds to the highest libdm level that will be forwarded to + // logrus. In order to change this, register a new DefaultLogger. + Level int +} + +// DMLog is the logging callback containing all of the information from +// devicemapper. The interface is identical to the C libdm counterpart. +func (l DefaultLogger) DMLog(level int, file string, line, dmError int, message string) { + if level <= l.Level { + // Forward the log to the correct logrus level, if allowed by dmLogLevel. + logMsg := fmt.Sprintf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message) + switch level { + case LogLevelFatal, LogLevelErr: + logrus.Error(logMsg) + case LogLevelWarn: + logrus.Warn(logMsg) + case LogLevelNotice, LogLevelInfo: + logrus.Info(logMsg) + case LogLevelDebug: + logrus.Debug(logMsg) + default: + // Don't drop any "unknown" levels. + logrus.Info(logMsg) + } + } +} + +// registerLogCallback registers our own logging callback function for libdm +// (which is StorageDevmapperLogCallback). +// +// Because libdm only gives us {0,1} error codes we need to parse the logs +// produced by libdm (to set dmSawBusy and so on). Note that by registering a +// callback using StorageDevmapperLogCallback, libdm will no longer output logs to +// stderr so we have to log everything ourselves. None of this handling is +// optional because we depend on log callbacks to parse the logs, and if we +// don't forward the log information we'll be in a lot of trouble when +// debugging things. +func registerLogCallback() { + LogWithErrnoInit() +} + +func init() { + // Use the default logger by default. We only allow LogLevelFatal by + // default, because internally we mask a lot of libdm errors by retrying + // and similar tricks. Also, libdm is very chatty and we don't want to + // worry users for no reason. + dmLogger = DefaultLogger{ + Level: LogLevelFatal, + } + + // Register as early as possible so we don't miss anything. + registerLogCallback() +} diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper.go new file mode 100644 index 00000000000..190d83d4999 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper.go @@ -0,0 +1,252 @@ +// +build linux,cgo + +package devicemapper + +/* +#define _GNU_SOURCE +#include +#include // FIXME: present only for BLKGETSIZE64, maybe we can remove it? + +// FIXME: Can't we find a way to do the logging in pure Go? +extern void StorageDevmapperLogCallback(int level, char *file, int line, int dm_errno_or_class, char *str); + +static void log_cb(int level, const char *file, int line, int dm_errno_or_class, const char *f, ...) +{ + char *buffer = NULL; + va_list ap; + int ret; + + va_start(ap, f); + ret = vasprintf(&buffer, f, ap); + va_end(ap); + if (ret < 0) { + // memory allocation failed -- should never happen? + return; + } + + StorageDevmapperLogCallback(level, (char *)file, line, dm_errno_or_class, buffer); + free(buffer); +} + +static void log_with_errno_init() +{ + dm_log_with_errno_init(log_cb); +} +*/ +import "C" + +import ( + "reflect" + "unsafe" +) + +type ( + cdmTask C.struct_dm_task +) + +// IOCTL consts +const ( + BlkGetSize64 = C.BLKGETSIZE64 + BlkDiscard = C.BLKDISCARD +) + +// Devicemapper cookie flags. +const ( + DmUdevDisableSubsystemRulesFlag = C.DM_UDEV_DISABLE_SUBSYSTEM_RULES_FLAG + DmUdevDisableDiskRulesFlag = C.DM_UDEV_DISABLE_DISK_RULES_FLAG + DmUdevDisableOtherRulesFlag = C.DM_UDEV_DISABLE_OTHER_RULES_FLAG + DmUdevDisableLibraryFallback = C.DM_UDEV_DISABLE_LIBRARY_FALLBACK +) + +// DeviceMapper mapped functions. +var ( + DmGetLibraryVersion = dmGetLibraryVersionFct + DmGetNextTarget = dmGetNextTargetFct + DmSetDevDir = dmSetDevDirFct + DmTaskAddTarget = dmTaskAddTargetFct + DmTaskCreate = dmTaskCreateFct + DmTaskDestroy = dmTaskDestroyFct + DmTaskGetDeps = dmTaskGetDepsFct + DmTaskGetInfo = dmTaskGetInfoFct + DmTaskGetDriverVersion = dmTaskGetDriverVersionFct + DmTaskRun = dmTaskRunFct + DmTaskSetAddNode = dmTaskSetAddNodeFct + DmTaskSetCookie = dmTaskSetCookieFct + DmTaskSetMessage = dmTaskSetMessageFct + DmTaskSetName = dmTaskSetNameFct + DmTaskSetRo = dmTaskSetRoFct + DmTaskSetSector = dmTaskSetSectorFct + DmUdevWait = dmUdevWaitFct + DmUdevSetSyncSupport = dmUdevSetSyncSupportFct + DmUdevGetSyncSupport = dmUdevGetSyncSupportFct + DmCookieSupported = dmCookieSupportedFct + LogWithErrnoInit = logWithErrnoInitFct + DmTaskDeferredRemove = dmTaskDeferredRemoveFct + DmTaskGetInfoWithDeferred = dmTaskGetInfoWithDeferredFct +) + +func free(p *C.char) { + C.free(unsafe.Pointer(p)) +} + +func dmTaskDestroyFct(task *cdmTask) { + C.dm_task_destroy((*C.struct_dm_task)(task)) +} + +func dmTaskCreateFct(taskType int) *cdmTask { + return (*cdmTask)(C.dm_task_create(C.int(taskType))) +} + +func dmTaskRunFct(task *cdmTask) int { + ret, _ := C.dm_task_run((*C.struct_dm_task)(task)) + return int(ret) +} + +func dmTaskSetNameFct(task *cdmTask, name string) int { + Cname := C.CString(name) + defer free(Cname) + + return int(C.dm_task_set_name((*C.struct_dm_task)(task), Cname)) +} + +func dmTaskSetMessageFct(task *cdmTask, message string) int { + Cmessage := C.CString(message) + defer free(Cmessage) + + return int(C.dm_task_set_message((*C.struct_dm_task)(task), Cmessage)) +} + +func dmTaskSetSectorFct(task *cdmTask, sector uint64) int { + return int(C.dm_task_set_sector((*C.struct_dm_task)(task), C.uint64_t(sector))) +} + +func dmTaskSetCookieFct(task *cdmTask, cookie *uint, flags uint16) int { + cCookie := C.uint32_t(*cookie) + defer func() { + *cookie = uint(cCookie) + }() + return int(C.dm_task_set_cookie((*C.struct_dm_task)(task), &cCookie, C.uint16_t(flags))) +} + +func dmTaskSetAddNodeFct(task *cdmTask, addNode AddNodeType) int { + return int(C.dm_task_set_add_node((*C.struct_dm_task)(task), C.dm_add_node_t(addNode))) +} + +func dmTaskSetRoFct(task *cdmTask) int { + return int(C.dm_task_set_ro((*C.struct_dm_task)(task))) +} + +func dmTaskAddTargetFct(task *cdmTask, + start, size uint64, ttype, params string) int { + + Cttype := C.CString(ttype) + defer free(Cttype) + + Cparams := C.CString(params) + defer free(Cparams) + + return int(C.dm_task_add_target((*C.struct_dm_task)(task), C.uint64_t(start), C.uint64_t(size), Cttype, Cparams)) +} + +func dmTaskGetDepsFct(task *cdmTask) *Deps { + Cdeps := C.dm_task_get_deps((*C.struct_dm_task)(task)) + if Cdeps == nil { + return nil + } + + // golang issue: https://github.com/golang/go/issues/11925 + hdr := reflect.SliceHeader{ + Data: uintptr(unsafe.Pointer(uintptr(unsafe.Pointer(Cdeps)) + unsafe.Sizeof(*Cdeps))), + Len: int(Cdeps.count), + Cap: int(Cdeps.count), + } + devices := *(*[]C.uint64_t)(unsafe.Pointer(&hdr)) + + deps := &Deps{ + Count: uint32(Cdeps.count), + Filler: uint32(Cdeps.filler), + } + for _, device := range devices { + deps.Device = append(deps.Device, uint64(device)) + } + return deps +} + +func dmTaskGetInfoFct(task *cdmTask, info *Info) int { + Cinfo := C.struct_dm_info{} + defer func() { + info.Exists = int(Cinfo.exists) + info.Suspended = int(Cinfo.suspended) + info.LiveTable = int(Cinfo.live_table) + info.InactiveTable = int(Cinfo.inactive_table) + info.OpenCount = int32(Cinfo.open_count) + info.EventNr = uint32(Cinfo.event_nr) + info.Major = uint32(Cinfo.major) + info.Minor = uint32(Cinfo.minor) + info.ReadOnly = int(Cinfo.read_only) + info.TargetCount = int32(Cinfo.target_count) + }() + return int(C.dm_task_get_info((*C.struct_dm_task)(task), &Cinfo)) +} + +func dmTaskGetDriverVersionFct(task *cdmTask) string { + buffer := C.malloc(128) + defer C.free(buffer) + res := C.dm_task_get_driver_version((*C.struct_dm_task)(task), (*C.char)(buffer), 128) + if res == 0 { + return "" + } + return C.GoString((*C.char)(buffer)) +} + +func dmGetNextTargetFct(task *cdmTask, next unsafe.Pointer, start, length *uint64, target, params *string) unsafe.Pointer { + var ( + Cstart, Clength C.uint64_t + CtargetType, Cparams *C.char + ) + defer func() { + *start = uint64(Cstart) + *length = uint64(Clength) + *target = C.GoString(CtargetType) + *params = C.GoString(Cparams) + }() + + nextp := C.dm_get_next_target((*C.struct_dm_task)(task), next, &Cstart, &Clength, &CtargetType, &Cparams) + return nextp +} + +func dmUdevSetSyncSupportFct(syncWithUdev int) { + (C.dm_udev_set_sync_support(C.int(syncWithUdev))) +} + +func dmUdevGetSyncSupportFct() int { + return int(C.dm_udev_get_sync_support()) +} + +func dmUdevWaitFct(cookie uint) int { + return int(C.dm_udev_wait(C.uint32_t(cookie))) +} + +func dmCookieSupportedFct() int { + return int(C.dm_cookie_supported()) +} + +func logWithErrnoInitFct() { + C.log_with_errno_init() +} + +func dmSetDevDirFct(dir string) int { + Cdir := C.CString(dir) + defer free(Cdir) + + return int(C.dm_set_dev_dir(Cdir)) +} + +func dmGetLibraryVersionFct(version *string) int { + buffer := C.CString(string(make([]byte, 128))) + defer free(buffer) + defer func() { + *version = C.GoString(buffer) + }() + return int(C.dm_get_library_version(buffer, 128)) +} diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_deferred_remove.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_deferred_remove.go new file mode 100644 index 00000000000..7f793c27086 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_deferred_remove.go @@ -0,0 +1,31 @@ +// +build linux,cgo,!libdm_no_deferred_remove + +package devicemapper + +// #include +import "C" + +// LibraryDeferredRemovalSupport tells if the feature is enabled in the build +const LibraryDeferredRemovalSupport = true + +func dmTaskDeferredRemoveFct(task *cdmTask) int { + return int(C.dm_task_deferred_remove((*C.struct_dm_task)(task))) +} + +func dmTaskGetInfoWithDeferredFct(task *cdmTask, info *Info) int { + Cinfo := C.struct_dm_info{} + defer func() { + info.Exists = int(Cinfo.exists) + info.Suspended = int(Cinfo.suspended) + info.LiveTable = int(Cinfo.live_table) + info.InactiveTable = int(Cinfo.inactive_table) + info.OpenCount = int32(Cinfo.open_count) + info.EventNr = uint32(Cinfo.event_nr) + info.Major = uint32(Cinfo.major) + info.Minor = uint32(Cinfo.minor) + info.ReadOnly = int(Cinfo.read_only) + info.TargetCount = int32(Cinfo.target_count) + info.DeferredRemove = int(Cinfo.deferred_remove) + }() + return int(C.dm_task_get_info((*C.struct_dm_task)(task), &Cinfo)) +} diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_dynamic.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_dynamic.go new file mode 100644 index 00000000000..7d84508982d --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_dynamic.go @@ -0,0 +1,6 @@ +// +build linux,cgo,!static_build + +package devicemapper + +// #cgo pkg-config: devmapper +import "C" diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go new file mode 100644 index 00000000000..a880fec8c49 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go @@ -0,0 +1,15 @@ +// +build linux,cgo,libdm_no_deferred_remove + +package devicemapper + +// LibraryDeferredRemovalSupport tells if the feature is enabled in the build +const LibraryDeferredRemovalSupport = false + +func dmTaskDeferredRemoveFct(task *cdmTask) int { + // Error. Nobody should be calling it. + return -1 +} + +func dmTaskGetInfoWithDeferredFct(task *cdmTask, info *Info) int { + return -1 +} diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_static.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_static.go new file mode 100644 index 00000000000..cf7f26a4c67 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_static.go @@ -0,0 +1,6 @@ +// +build linux,cgo,static_build + +package devicemapper + +// #cgo pkg-config: --static devmapper +import "C" diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/ioctl.go b/vendor/github.com/containers/storage/pkg/devicemapper/ioctl.go new file mode 100644 index 00000000000..50ea7c48238 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/devicemapper/ioctl.go @@ -0,0 +1,28 @@ +// +build linux,cgo + +package devicemapper + +import ( + "unsafe" + + "golang.org/x/sys/unix" +) + +func ioctlBlkGetSize64(fd uintptr) (int64, error) { + var size int64 + if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, BlkGetSize64, uintptr(unsafe.Pointer(&size))); err != 0 { + return 0, err + } + return size, nil +} + +func ioctlBlkDiscard(fd uintptr, offset, length uint64) error { + var r [2]uint64 + r[0] = offset + r[1] = length + + if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, BlkDiscard, uintptr(unsafe.Pointer(&r[0]))); err != 0 { + return err + } + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/log.go b/vendor/github.com/containers/storage/pkg/devicemapper/log.go new file mode 100644 index 00000000000..cee5e545498 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/devicemapper/log.go @@ -0,0 +1,11 @@ +package devicemapper + +// definitions from lvm2 lib/log/log.h +const ( + LogLevelFatal = 2 + iota // _LOG_FATAL + LogLevelErr // _LOG_ERR + LogLevelWarn // _LOG_WARN + LogLevelNotice // _LOG_NOTICE + LogLevelInfo // _LOG_INFO + LogLevelDebug // _LOG_DEBUG +) diff --git a/vendor/github.com/containers/storage/pkg/directory/directory.go b/vendor/github.com/containers/storage/pkg/directory/directory.go new file mode 100644 index 00000000000..b0ce706e5ed --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/directory/directory.go @@ -0,0 +1,32 @@ +package directory + +import ( + "io/ioutil" + "os" + "path/filepath" +) + +// DiskUsage is a structure that describes the disk usage (size and inode count) +// of a particular directory. +type DiskUsage struct { + Size int64 + InodeCount int64 +} + +// MoveToSubdir moves all contents of a directory to a subdirectory underneath the original path +func MoveToSubdir(oldpath, subdir string) error { + infos, err := ioutil.ReadDir(oldpath) + if err != nil { + return err + } + for _, info := range infos { + if info.Name() != subdir { + oldName := filepath.Join(oldpath, info.Name()) + newName := filepath.Join(oldpath, subdir, info.Name()) + if err := os.Rename(oldName, newName); err != nil { + return err + } + } + } + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/directory/directory_unix.go b/vendor/github.com/containers/storage/pkg/directory/directory_unix.go new file mode 100644 index 00000000000..8d58d24cac8 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/directory/directory_unix.go @@ -0,0 +1,60 @@ +// +build linux darwin freebsd solaris + +package directory + +import ( + "os" + "path/filepath" + "syscall" +) + +// Size walks a directory tree and returns its total size in bytes. +func Size(dir string) (size int64, err error) { + usage, err := Usage(dir) + if err != nil { + return 0, err + } + return usage.Size, nil +} + +// Usage walks a directory tree and returns its total size in bytes and the number of inodes. +func Usage(dir string) (usage *DiskUsage, err error) { + usage = &DiskUsage{} + data := make(map[uint64]struct{}) + err = filepath.Walk(dir, func(d string, fileInfo os.FileInfo, err error) error { + if err != nil { + // if dir does not exist, Usage() returns the error. + // if dir/x disappeared while walking, Usage() ignores dir/x. + if os.IsNotExist(err) && d != dir { + return nil + } + return err + } + + if fileInfo == nil { + return nil + } + + // Check inode to only count the sizes of files with multiple hard links once. + inode := fileInfo.Sys().(*syscall.Stat_t).Ino + // inode is not a uint64 on all platforms. Cast it to avoid issues. + if _, exists := data[uint64(inode)]; exists { + return nil + } + + // inode is not a uint64 on all platforms. Cast it to avoid issues. + data[uint64(inode)] = struct{}{} + + // Ignore directory sizes + if fileInfo.IsDir() { + return nil + } + + usage.Size += fileInfo.Size() + + return nil + }) + // inode count is the number of unique inode numbers we saw + usage.InodeCount = int64(len(data)) + return +} diff --git a/vendor/github.com/containers/storage/pkg/directory/directory_windows.go b/vendor/github.com/containers/storage/pkg/directory/directory_windows.go new file mode 100644 index 00000000000..a7a81240bc2 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/directory/directory_windows.go @@ -0,0 +1,49 @@ +// +build windows + +package directory + +import ( + "os" + "path/filepath" +) + +// Size walks a directory tree and returns its total size in bytes +func Size(dir string) (size int64, err error) { + usage, err := Usage(dir) + if err != nil { + return 0, nil + } + return usage.Size, nil +} + +// Usage walks a directory tree and returns its total size in bytes and the number of inodes. +func Usage(dir string) (usage *DiskUsage, err error) { + usage = &DiskUsage{} + err = filepath.Walk(dir, func(d string, fileInfo os.FileInfo, err error) error { + if err != nil { + // if dir does not exist, Size() returns the error. + // if dir/x disappeared while walking, Size() ignores dir/x. + if os.IsNotExist(err) && d != dir { + return nil + } + return err + } + + usage.InodeCount++ + + // Ignore directory sizes + if fileInfo == nil { + return nil + } + + s := fileInfo.Size() + if fileInfo.IsDir() || s == 0 { + return nil + } + + usage.Size += s + + return nil + }) + return +} diff --git a/vendor/github.com/containers/storage/pkg/dmesg/dmesg_linux.go b/vendor/github.com/containers/storage/pkg/dmesg/dmesg_linux.go new file mode 100644 index 00000000000..7df7f3d4364 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/dmesg/dmesg_linux.go @@ -0,0 +1,20 @@ +// +build linux + +package dmesg + +import ( + "unsafe" + + "golang.org/x/sys/unix" +) + +// Dmesg returns last messages from the kernel log, up to size bytes +func Dmesg(size int) []byte { + t := uintptr(3) // SYSLOG_ACTION_READ_ALL + b := make([]byte, size) + amt, _, err := unix.Syscall(unix.SYS_SYSLOG, t, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b))) + if err != 0 { + return []byte{} + } + return b[:amt] +} diff --git a/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go b/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go new file mode 100644 index 00000000000..5be98165ef7 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go @@ -0,0 +1,372 @@ +package fileutils + +import ( + "fmt" + "io" + "os" + "path/filepath" + "regexp" + "strings" + "text/scanner" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// PatternMatcher allows checking paths against a list of patterns +type PatternMatcher struct { + patterns []*Pattern + exclusions bool +} + +// NewPatternMatcher creates a new matcher object for specific patterns that can +// be used later to match against patterns against paths +func NewPatternMatcher(patterns []string) (*PatternMatcher, error) { + pm := &PatternMatcher{ + patterns: make([]*Pattern, 0, len(patterns)), + } + for _, p := range patterns { + // Eliminate leading and trailing whitespace. + p = strings.TrimSpace(p) + if p == "" { + continue + } + p = filepath.Clean(p) + newp := &Pattern{} + if p[0] == '!' { + if len(p) == 1 { + return nil, errors.New("illegal exclusion pattern: \"!\"") + } + newp.exclusion = true + p = strings.TrimPrefix(filepath.Clean(p[1:]), "/") + pm.exclusions = true + } + // Do some syntax checking on the pattern. + // filepath's Match() has some really weird rules that are inconsistent + // so instead of trying to dup their logic, just call Match() for its + // error state and if there is an error in the pattern return it. + // If this becomes an issue we can remove this since its really only + // needed in the error (syntax) case - which isn't really critical. + if _, err := filepath.Match(p, "."); err != nil { + return nil, err + } + newp.cleanedPattern = p + newp.dirs = strings.Split(p, string(os.PathSeparator)) + pm.patterns = append(pm.patterns, newp) + } + return pm, nil +} + +// Deprecated: Please use the `MatchesResult` method instead. +// Matches matches path against all the patterns. Matches is not safe to be +// called concurrently +func (pm *PatternMatcher) Matches(file string) (bool, error) { + matched := false + file = filepath.FromSlash(file) + + for _, pattern := range pm.patterns { + negative := false + + if pattern.exclusion { + negative = true + } + + match, err := pattern.match(file) + if err != nil { + return false, err + } + + if match { + matched = !negative + } + } + + if matched { + logrus.Debugf("Skipping excluded path: %s", file) + } + + return matched, nil +} + +type MatchResult struct { + isMatched bool + matches, excludes uint +} + +// Excludes returns true if the overall result is matched +func (m *MatchResult) IsMatched() bool { + return m.isMatched +} + +// Excludes returns the amount of matches of an MatchResult +func (m *MatchResult) Matches() uint { + return m.matches +} + +// Excludes returns the amount of excludes of an MatchResult +func (m *MatchResult) Excludes() uint { + return m.excludes +} + +// MatchesResult verifies the provided filepath against all patterns. +// It returns the `*MatchResult` result for the patterns on success, otherwise +// an error. This method is not safe to be called concurrently. +func (pm *PatternMatcher) MatchesResult(file string) (res *MatchResult, err error) { + file = filepath.FromSlash(file) + res = &MatchResult{false, 0, 0} + + for _, pattern := range pm.patterns { + negative := false + + if pattern.exclusion { + negative = true + } + + match, err := pattern.match(file) + if err != nil { + return nil, err + } + + if match { + res.isMatched = !negative + if negative { + res.excludes++ + } else { + res.matches++ + } + } + } + + if res.matches > 0 { + logrus.Debugf("Skipping excluded path: %s", file) + } + + return res, nil +} + +// IsMatch verifies the provided filepath against all patterns and returns true +// if it matches. A match is valid if the last match is a positive one. +// It returns an error on failure and is not safe to be called concurrently. +func (pm *PatternMatcher) IsMatch(file string) (matched bool, err error) { + res, err := pm.MatchesResult(file) + if err != nil { + return false, err + } + return res.isMatched, nil +} + +// Exclusions returns true if any of the patterns define exclusions +func (pm *PatternMatcher) Exclusions() bool { + return pm.exclusions +} + +// Patterns returns array of active patterns +func (pm *PatternMatcher) Patterns() []*Pattern { + return pm.patterns +} + +// Pattern defines a single regexp used used to filter file paths. +type Pattern struct { + cleanedPattern string + dirs []string + regexp *regexp.Regexp + exclusion bool +} + +func (p *Pattern) String() string { + return p.cleanedPattern +} + +// Exclusion returns true if this pattern defines exclusion +func (p *Pattern) Exclusion() bool { + return p.exclusion +} + +func (p *Pattern) match(path string) (bool, error) { + + if p.regexp == nil { + if err := p.compile(); err != nil { + return false, filepath.ErrBadPattern + } + } + + b := p.regexp.MatchString(path) + + return b, nil +} + +func (p *Pattern) compile() error { + regStr := "^" + pattern := p.cleanedPattern + // Go through the pattern and convert it to a regexp. + // We use a scanner so we can support utf-8 chars. + var scan scanner.Scanner + scan.Init(strings.NewReader(pattern)) + + sl := string(os.PathSeparator) + escSL := sl + const bs = `\` + if sl == bs { + escSL += bs + } + + for scan.Peek() != scanner.EOF { + ch := scan.Next() + + if ch == '*' { + if scan.Peek() == '*' { + // is some flavor of "**" + scan.Next() + + // Treat **/ as ** so eat the "/" + if string(scan.Peek()) == sl { + scan.Next() + } + + if scan.Peek() == scanner.EOF { + // is "**EOF" - to align with .gitignore just accept all + regStr += ".*" + } else { + // is "**" + // Note that this allows for any # of /'s (even 0) because + // the .* will eat everything, even /'s + regStr += "(.*" + escSL + ")?" + } + } else { + // is "*" so map it to anything but "/" + regStr += "[^" + escSL + "]*" + } + } else if ch == '?' { + // "?" is any char except "/" + regStr += "[^" + escSL + "]" + } else if ch == '.' || ch == '$' { + // Escape some regexp special chars that have no meaning + // in golang's filepath.Match + regStr += bs + string(ch) + } else if ch == '\\' { + // escape next char. + if sl == bs { + // On windows map "\" to "\\", meaning an escaped backslash, + // and then just continue because filepath.Match on + // Windows doesn't allow escaping at all + regStr += escSL + continue + } + if scan.Peek() != scanner.EOF { + regStr += bs + string(scan.Next()) + } else { + return filepath.ErrBadPattern + } + } else { + regStr += string(ch) + } + } + + regStr += "(" + escSL + ".*)?$" + + re, err := regexp.Compile(regStr) + if err != nil { + return err + } + + p.regexp = re + return nil +} + +// Matches returns true if file matches any of the patterns +// and isn't excluded by any of the subsequent patterns. +func Matches(file string, patterns []string) (bool, error) { + pm, err := NewPatternMatcher(patterns) + if err != nil { + return false, err + } + file = filepath.Clean(file) + + if file == "." { + // Don't let them exclude everything, kind of silly. + return false, nil + } + + return pm.IsMatch(file) +} + +// CopyFile copies from src to dst until either EOF is reached +// on src or an error occurs. It verifies src exists and removes +// the dst if it exists. +func CopyFile(src, dst string) (int64, error) { + cleanSrc := filepath.Clean(src) + cleanDst := filepath.Clean(dst) + if cleanSrc == cleanDst { + return 0, nil + } + sf, err := os.Open(cleanSrc) + if err != nil { + return 0, err + } + defer sf.Close() + if err := os.Remove(cleanDst); err != nil && !os.IsNotExist(err) { + return 0, err + } + df, err := os.Create(cleanDst) + if err != nil { + return 0, err + } + defer df.Close() + return io.Copy(df, sf) +} + +// ReadSymlinkedDirectory returns the target directory of a symlink. +// The target of the symbolic link may not be a file. +func ReadSymlinkedDirectory(path string) (string, error) { + var realPath string + var err error + if realPath, err = filepath.Abs(path); err != nil { + return "", fmt.Errorf("unable to get absolute path for %s: %s", path, err) + } + if realPath, err = filepath.EvalSymlinks(realPath); err != nil { + return "", fmt.Errorf("failed to canonicalise path for %s: %s", path, err) + } + realPathInfo, err := os.Stat(realPath) + if err != nil { + return "", fmt.Errorf("failed to stat target '%s' of '%s': %s", realPath, path, err) + } + if !realPathInfo.Mode().IsDir() { + return "", fmt.Errorf("canonical path points to a file '%s'", realPath) + } + return realPath, nil +} + +// ReadSymlinkedPath returns the target directory of a symlink. +// The target of the symbolic link can be a file and a directory. +func ReadSymlinkedPath(path string) (realPath string, err error) { + if realPath, err = filepath.Abs(path); err != nil { + return "", errors.Wrapf(err, "unable to get absolute path for %q", path) + } + if realPath, err = filepath.EvalSymlinks(realPath); err != nil { + return "", errors.Wrapf(err, "failed to canonicalise path for %q", path) + } + if _, err := os.Stat(realPath); err != nil { + return "", errors.Wrapf(err, "failed to stat target %q of %q", realPath, path) + } + return realPath, nil +} + +// CreateIfNotExists creates a file or a directory only if it does not already exist. +func CreateIfNotExists(path string, isDir bool) error { + if _, err := os.Stat(path); err != nil { + if os.IsNotExist(err) { + if isDir { + return os.MkdirAll(path, 0755) + } + if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + return err + } + f, err := os.OpenFile(path, os.O_CREATE, 0755) + if err != nil { + return err + } + f.Close() + } + } + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/fileutils/fileutils_darwin.go b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_darwin.go new file mode 100644 index 00000000000..ccd648fac30 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_darwin.go @@ -0,0 +1,27 @@ +package fileutils + +import ( + "os" + "os/exec" + "strconv" + "strings" +) + +// GetTotalUsedFds returns the number of used File Descriptors by +// executing `lsof -p PID` +func GetTotalUsedFds() int { + pid := os.Getpid() + + cmd := exec.Command("lsof", "-p", strconv.Itoa(pid)) + + output, err := cmd.CombinedOutput() + if err != nil { + return -1 + } + + outputStr := strings.TrimSpace(string(output)) + + fds := strings.Split(outputStr, "\n") + + return len(fds) - 1 +} diff --git a/vendor/github.com/containers/storage/pkg/fileutils/fileutils_solaris.go b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_solaris.go new file mode 100644 index 00000000000..0f2cb7ab933 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_solaris.go @@ -0,0 +1,7 @@ +package fileutils + +// GetTotalUsedFds Returns the number of used File Descriptors. +// On Solaris these limits are per process and not systemwide +func GetTotalUsedFds() int { + return -1 +} diff --git a/vendor/github.com/containers/storage/pkg/fileutils/fileutils_unix.go b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_unix.go new file mode 100644 index 00000000000..92056c1d5f6 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_unix.go @@ -0,0 +1,22 @@ +// +build linux freebsd + +package fileutils + +import ( + "fmt" + "io/ioutil" + "os" + + "github.com/sirupsen/logrus" +) + +// GetTotalUsedFds Returns the number of used File Descriptors by +// reading it via /proc filesystem. +func GetTotalUsedFds() int { + if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil { + logrus.Errorf("%v", err) + } else { + return len(fds) + } + return -1 +} diff --git a/vendor/github.com/containers/storage/pkg/fileutils/fileutils_windows.go b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_windows.go new file mode 100644 index 00000000000..5ec21cace52 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_windows.go @@ -0,0 +1,7 @@ +package fileutils + +// GetTotalUsedFds Returns the number of used File Descriptors. Not supported +// on Windows. +func GetTotalUsedFds() int { + return -1 +} diff --git a/vendor/github.com/containers/storage/pkg/fsutils/fsutils_linux.go b/vendor/github.com/containers/storage/pkg/fsutils/fsutils_linux.go new file mode 100644 index 00000000000..e6094b55b71 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/fsutils/fsutils_linux.go @@ -0,0 +1,88 @@ +// +build linux + +package fsutils + +import ( + "fmt" + "io/ioutil" + "os" + "unsafe" + + "golang.org/x/sys/unix" +) + +func locateDummyIfEmpty(path string) (string, error) { + children, err := ioutil.ReadDir(path) + if err != nil { + return "", err + } + if len(children) != 0 { + return "", nil + } + dummyFile, err := ioutil.TempFile(path, "fsutils-dummy") + if err != nil { + return "", err + } + name := dummyFile.Name() + err = dummyFile.Close() + return name, err +} + +// SupportsDType returns whether the filesystem mounted on path supports d_type +func SupportsDType(path string) (bool, error) { + // locate dummy so that we have at least one dirent + dummy, err := locateDummyIfEmpty(path) + if err != nil { + return false, err + } + if dummy != "" { + defer os.Remove(dummy) + } + + visited := 0 + supportsDType := true + fn := func(ent *unix.Dirent) bool { + visited++ + if ent.Type == unix.DT_UNKNOWN { + supportsDType = false + // stop iteration + return true + } + // continue iteration + return false + } + if err = iterateReadDir(path, fn); err != nil { + return false, err + } + if visited == 0 { + return false, fmt.Errorf("did not hit any dirent during iteration %s", path) + } + return supportsDType, nil +} + +func iterateReadDir(path string, fn func(*unix.Dirent) bool) error { + d, err := os.Open(path) + if err != nil { + return err + } + defer d.Close() + fd := int(d.Fd()) + buf := make([]byte, 4096) + for { + nbytes, err := unix.ReadDirent(fd, buf) + if err != nil { + return err + } + if nbytes == 0 { + break + } + for off := 0; off < nbytes; { + ent := (*unix.Dirent)(unsafe.Pointer(&buf[off])) + if stop := fn(ent); stop { + return nil + } + off += int(ent.Reclen) + } + } + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir.go b/vendor/github.com/containers/storage/pkg/homedir/homedir.go new file mode 100644 index 00000000000..85c5e76c844 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/homedir/homedir.go @@ -0,0 +1,52 @@ +package homedir + +import ( + "errors" + "os" + "path/filepath" +) + +// GetConfigHome returns XDG_CONFIG_HOME. +// GetConfigHome returns $HOME/.config and nil error if XDG_CONFIG_HOME is not set. +// +// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html +func GetConfigHome() (string, error) { + if xdgConfigHome := os.Getenv("XDG_CONFIG_HOME"); xdgConfigHome != "" { + return xdgConfigHome, nil + } + home := Get() + if home == "" { + return "", errors.New("could not get either XDG_CONFIG_HOME or HOME") + } + return filepath.Join(home, ".config"), nil +} + +// GetDataHome returns XDG_DATA_HOME. +// GetDataHome returns $HOME/.local/share and nil error if XDG_DATA_HOME is not set. +// +// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html +func GetDataHome() (string, error) { + if xdgDataHome := os.Getenv("XDG_DATA_HOME"); xdgDataHome != "" { + return xdgDataHome, nil + } + home := Get() + if home == "" { + return "", errors.New("could not get either XDG_DATA_HOME or HOME") + } + return filepath.Join(home, ".local", "share"), nil +} + +// GetCacheHome returns XDG_CACHE_HOME. +// GetCacheHome returns $HOME/.cache and nil error if XDG_CACHE_HOME is not set. +// +// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html +func GetCacheHome() (string, error) { + if xdgCacheHome := os.Getenv("XDG_CACHE_HOME"); xdgCacheHome != "" { + return xdgCacheHome, nil + } + home := Get() + if home == "" { + return "", errors.New("could not get either XDG_CACHE_HOME or HOME") + } + return filepath.Join(home, ".cache"), nil +} diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go new file mode 100644 index 00000000000..027db259c19 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go @@ -0,0 +1,20 @@ +// +build !linux,!darwin,!freebsd + +package homedir + +// Copyright 2013-2018 Docker, Inc. +// NOTE: this package has originally been copied from github.com/docker/docker. + +import ( + "errors" +) + +// GetRuntimeDir is unsupported on non-linux system. +func GetRuntimeDir() (string, error) { + return "", errors.New("homedir.GetRuntimeDir() is not supported on this system") +} + +// StickRuntimeDirContents is unsupported on non-linux system. +func StickRuntimeDirContents(files []string) ([]string, error) { + return nil, errors.New("homedir.StickRuntimeDirContents() is not supported on this system") +} diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go new file mode 100644 index 00000000000..33177bdf306 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go @@ -0,0 +1,95 @@ +// +build !windows + +package homedir + +// Copyright 2013-2018 Docker, Inc. +// NOTE: this package has originally been copied from github.com/docker/docker. + +import ( + "errors" + "os" + "path/filepath" + "strings" + + "github.com/containers/storage/pkg/unshare" +) + +// Key returns the env var name for the user's home dir based on +// the platform being run on +func Key() string { + return "HOME" +} + +// Get returns the home directory of the current user with the help of +// environment variables depending on the target operating system. +// Returned path should be used with "path/filepath" to form new paths. +// +// If linking statically with cgo enabled against glibc, ensure the +// osusergo build tag is used. +// +// If needing to do nss lookups, do not disable cgo or set osusergo. +func Get() string { + homedir, _ := unshare.HomeDir() + return homedir +} + +// GetShortcutString returns the string that is shortcut to user's home directory +// in the native shell of the platform running on. +func GetShortcutString() string { + return "~" +} + +// GetRuntimeDir returns XDG_RUNTIME_DIR. +// XDG_RUNTIME_DIR is typically configured via pam_systemd. +// GetRuntimeDir returns non-nil error if XDG_RUNTIME_DIR is not set. +// +// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html +func GetRuntimeDir() (string, error) { + if xdgRuntimeDir := os.Getenv("XDG_RUNTIME_DIR"); xdgRuntimeDir != "" { + return xdgRuntimeDir, nil + } + return "", errors.New("could not get XDG_RUNTIME_DIR") +} + +// StickRuntimeDirContents sets the sticky bit on files that are under +// XDG_RUNTIME_DIR, so that the files won't be periodically removed by the system. +// +// StickyRuntimeDir returns slice of sticked files. +// StickyRuntimeDir returns nil error if XDG_RUNTIME_DIR is not set. +// +// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html +func StickRuntimeDirContents(files []string) ([]string, error) { + runtimeDir, err := GetRuntimeDir() + if err != nil { + // ignore error if runtimeDir is empty + return nil, nil + } + runtimeDir, err = filepath.Abs(runtimeDir) + if err != nil { + return nil, err + } + var sticked []string + for _, f := range files { + f, err = filepath.Abs(f) + if err != nil { + return sticked, err + } + if strings.HasPrefix(f, runtimeDir+"/") { + if err = stick(f); err != nil { + return sticked, err + } + sticked = append(sticked, f) + } + } + return sticked, nil +} + +func stick(f string) error { + st, err := os.Stat(f) + if err != nil { + return err + } + m := st.Mode() + m |= os.ModeSticky + return os.Chmod(f, m) +} diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go new file mode 100644 index 00000000000..af65f2c03de --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go @@ -0,0 +1,32 @@ +package homedir + +// Copyright 2013-2018 Docker, Inc. +// NOTE: this package has originally been copied from github.com/docker/docker. + +import ( + "os" +) + +// Key returns the env var name for the user's home dir based on +// the platform being run on +func Key() string { + return "USERPROFILE" +} + +// Get returns the home directory of the current user with the help of +// environment variables depending on the target operating system. +// Returned path should be used with "path/filepath" to form new paths. +func Get() string { + home := os.Getenv(Key()) + if home != "" { + return home + } + home, _ = os.UserHomeDir() + return home +} + +// GetShortcutString returns the string that is shortcut to user's home directory +// in the native shell of the platform running on. +func GetShortcutString() string { + return "%USERPROFILE%" // be careful while using in format functions +} diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools.go b/vendor/github.com/containers/storage/pkg/idtools/idtools.go new file mode 100644 index 00000000000..0abe886eb28 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/idtools/idtools.go @@ -0,0 +1,358 @@ +package idtools + +import ( + "bufio" + "fmt" + "os" + "os/user" + "sort" + "strconv" + "strings" + "syscall" + + "github.com/containers/storage/pkg/system" + "github.com/pkg/errors" +) + +// IDMap contains a single entry for user namespace range remapping. An array +// of IDMap entries represents the structure that will be provided to the Linux +// kernel for creating a user namespace. +type IDMap struct { + ContainerID int `json:"container_id"` + HostID int `json:"host_id"` + Size int `json:"size"` +} + +type subIDRange struct { + Start int + Length int +} + +type ranges []subIDRange + +func (e ranges) Len() int { return len(e) } +func (e ranges) Swap(i, j int) { e[i], e[j] = e[j], e[i] } +func (e ranges) Less(i, j int) bool { return e[i].Start < e[j].Start } + +const ( + subuidFileName string = "/etc/subuid" + subgidFileName string = "/etc/subgid" +) + +// MkdirAllAs creates a directory (include any along the path) and then modifies +// ownership to the requested uid/gid. If the directory already exists, this +// function will still change ownership to the requested uid/gid pair. +// Deprecated: Use MkdirAllAndChown +func MkdirAllAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { + return mkdirAs(path, mode, ownerUID, ownerGID, true, true) +} + +// MkdirAs creates a directory and then modifies ownership to the requested uid/gid. +// If the directory already exists, this function still changes ownership +// Deprecated: Use MkdirAndChown with a IDPair +func MkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { + return mkdirAs(path, mode, ownerUID, ownerGID, false, true) +} + +// MkdirAllAndChown creates a directory (include any along the path) and then modifies +// ownership to the requested uid/gid. If the directory already exists, this +// function will still change ownership to the requested uid/gid pair. +func MkdirAllAndChown(path string, mode os.FileMode, ids IDPair) error { + return mkdirAs(path, mode, ids.UID, ids.GID, true, true) +} + +// MkdirAndChown creates a directory and then modifies ownership to the requested uid/gid. +// If the directory already exists, this function still changes ownership +func MkdirAndChown(path string, mode os.FileMode, ids IDPair) error { + return mkdirAs(path, mode, ids.UID, ids.GID, false, true) +} + +// MkdirAllAndChownNew creates a directory (include any along the path) and then modifies +// ownership ONLY of newly created directories to the requested uid/gid. If the +// directories along the path exist, no change of ownership will be performed +func MkdirAllAndChownNew(path string, mode os.FileMode, ids IDPair) error { + return mkdirAs(path, mode, ids.UID, ids.GID, true, false) +} + +// GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps. +// If the maps are empty, then the root uid/gid will default to "real" 0/0 +func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) { + var uid, gid int + var err error + if len(uidMap) == 1 && uidMap[0].Size == 1 { + uid = uidMap[0].HostID + } else { + uid, err = RawToHost(0, uidMap) + if err != nil { + return -1, -1, err + } + } + if len(gidMap) == 1 && gidMap[0].Size == 1 { + gid = gidMap[0].HostID + } else { + gid, err = RawToHost(0, gidMap) + if err != nil { + return -1, -1, err + } + } + return uid, gid, nil +} + +// RawToContainer takes an id mapping, and uses it to translate a host ID to +// the remapped ID. If no map is provided, then the translation assumes a +// 1-to-1 mapping and returns the passed in id. +// +// If you wish to map a (uid,gid) combination you should use the corresponding +// IDMappings methods, which ensure that you are mapping the correct ID against +// the correct mapping. +func RawToContainer(hostID int, idMap []IDMap) (int, error) { + if idMap == nil { + return hostID, nil + } + for _, m := range idMap { + if (hostID >= m.HostID) && (hostID <= (m.HostID + m.Size - 1)) { + contID := m.ContainerID + (hostID - m.HostID) + return contID, nil + } + } + return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID) +} + +// RawToHost takes an id mapping and a remapped ID, and translates the ID to +// the mapped host ID. If no map is provided, then the translation assumes a +// 1-to-1 mapping and returns the passed in id. +// +// If you wish to map a (uid,gid) combination you should use the corresponding +// IDMappings methods, which ensure that you are mapping the correct ID against +// the correct mapping. +func RawToHost(contID int, idMap []IDMap) (int, error) { + if idMap == nil { + return contID, nil + } + for _, m := range idMap { + if (contID >= m.ContainerID) && (contID <= (m.ContainerID + m.Size - 1)) { + hostID := m.HostID + (contID - m.ContainerID) + return hostID, nil + } + } + return -1, fmt.Errorf("Container ID %d cannot be mapped to a host ID", contID) +} + +// IDPair is a UID and GID pair +type IDPair struct { + UID int + GID int +} + +// IDMappings contains a mappings of UIDs and GIDs +type IDMappings struct { + uids []IDMap + gids []IDMap +} + +// NewIDMappings takes a requested user and group name and +// using the data from /etc/sub{uid,gid} ranges, creates the +// proper uid and gid remapping ranges for that user/group pair +func NewIDMappings(username, groupname string) (*IDMappings, error) { + subuidRanges, err := readSubuid(username) + if err != nil { + return nil, err + } + subgidRanges, err := readSubgid(groupname) + if err != nil { + return nil, err + } + if len(subuidRanges) == 0 { + return nil, fmt.Errorf("No subuid ranges found for user %q in %s", username, subuidFileName) + } + if len(subgidRanges) == 0 { + return nil, fmt.Errorf("No subgid ranges found for group %q in %s", groupname, subgidFileName) + } + + return &IDMappings{ + uids: createIDMap(subuidRanges), + gids: createIDMap(subgidRanges), + }, nil +} + +// NewIDMappingsFromMaps creates a new mapping from two slices +// Deprecated: this is a temporary shim while transitioning to IDMapping +func NewIDMappingsFromMaps(uids []IDMap, gids []IDMap) *IDMappings { + return &IDMappings{uids: uids, gids: gids} +} + +// RootPair returns a uid and gid pair for the root user. The error is ignored +// because a root user always exists, and the defaults are correct when the uid +// and gid maps are empty. +func (i *IDMappings) RootPair() IDPair { + uid, gid, _ := GetRootUIDGID(i.uids, i.gids) + return IDPair{UID: uid, GID: gid} +} + +// ToHost returns the host UID and GID for the container uid, gid. +// Remapping is only performed if the ids aren't already the remapped root ids +func (i *IDMappings) ToHost(pair IDPair) (IDPair, error) { + var err error + var target IDPair + + target.UID, err = RawToHost(pair.UID, i.uids) + if err != nil { + return target, err + } + + target.GID, err = RawToHost(pair.GID, i.gids) + return target, err +} + +// ToContainer returns the container UID and GID for the host uid and gid +func (i *IDMappings) ToContainer(pair IDPair) (int, int, error) { + uid, err := RawToContainer(pair.UID, i.uids) + if err != nil { + return -1, -1, err + } + gid, err := RawToContainer(pair.GID, i.gids) + return uid, gid, err +} + +// Empty returns true if there are no id mappings +func (i *IDMappings) Empty() bool { + return len(i.uids) == 0 && len(i.gids) == 0 +} + +// UIDs return the UID mapping +// TODO: remove this once everything has been refactored to use pairs +func (i *IDMappings) UIDs() []IDMap { + return i.uids +} + +// GIDs return the UID mapping +// TODO: remove this once everything has been refactored to use pairs +func (i *IDMappings) GIDs() []IDMap { + return i.gids +} + +func createIDMap(subidRanges ranges) []IDMap { + idMap := []IDMap{} + + // sort the ranges by lowest ID first + sort.Sort(subidRanges) + containerID := 0 + for _, idrange := range subidRanges { + idMap = append(idMap, IDMap{ + ContainerID: containerID, + HostID: idrange.Start, + Size: idrange.Length, + }) + containerID = containerID + idrange.Length + } + return idMap +} + +// parseSubidFile will read the appropriate file (/etc/subuid or /etc/subgid) +// and return all found ranges for a specified username. If the special value +// "ALL" is supplied for username, then all ranges in the file will be returned +func parseSubidFile(path, username string) (ranges, error) { + var ( + rangeList ranges + uidstr string + ) + if u, err := user.Lookup(username); err == nil { + uidstr = u.Uid + } + + subidFile, err := os.Open(path) + if err != nil { + return rangeList, err + } + defer subidFile.Close() + + s := bufio.NewScanner(subidFile) + for s.Scan() { + if err := s.Err(); err != nil { + return rangeList, err + } + + text := strings.TrimSpace(s.Text()) + if text == "" || strings.HasPrefix(text, "#") { + continue + } + parts := strings.Split(text, ":") + if len(parts) != 3 { + return rangeList, fmt.Errorf("Cannot parse subuid/gid information: Format not correct for %s file", path) + } + if parts[0] == username || username == "ALL" || (parts[0] == uidstr && parts[0] != "") { + startid, err := strconv.Atoi(parts[1]) + if err != nil { + return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err) + } + length, err := strconv.Atoi(parts[2]) + if err != nil { + return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err) + } + rangeList = append(rangeList, subIDRange{startid, length}) + } + } + return rangeList, nil +} + +func checkChownErr(err error, name string, uid, gid int) error { + if e, ok := err.(*os.PathError); ok && e.Err == syscall.EINVAL { + return errors.Wrapf(err, "potentially insufficient UIDs or GIDs available in user namespace (requested %d:%d for %s): Check /etc/subuid and /etc/subgid if configured locally", uid, gid, name) + } + return err +} + +func SafeChown(name string, uid, gid int) error { + if stat, statErr := system.Stat(name); statErr == nil { + if stat.UID() == uint32(uid) && stat.GID() == uint32(gid) { + return nil + } + } + return checkChownErr(os.Chown(name, uid, gid), name, uid, gid) +} + +func SafeLchown(name string, uid, gid int) error { + if stat, statErr := system.Lstat(name); statErr == nil { + if stat.UID() == uint32(uid) && stat.GID() == uint32(gid) { + return nil + } + } + return checkChownErr(os.Lchown(name, uid, gid), name, uid, gid) +} + +type sortByHostID []IDMap + +func (e sortByHostID) Len() int { return len(e) } +func (e sortByHostID) Swap(i, j int) { e[i], e[j] = e[j], e[i] } +func (e sortByHostID) Less(i, j int) bool { return e[i].HostID < e[j].HostID } + +type sortByContainerID []IDMap + +func (e sortByContainerID) Len() int { return len(e) } +func (e sortByContainerID) Swap(i, j int) { e[i], e[j] = e[j], e[i] } +func (e sortByContainerID) Less(i, j int) bool { return e[i].ContainerID < e[j].ContainerID } + +// IsContiguous checks if the specified mapping is contiguous and doesn't +// have any hole. +func IsContiguous(mappings []IDMap) bool { + if len(mappings) < 2 { + return true + } + + var mh sortByHostID = mappings[:] + sort.Sort(mh) + for i := 1; i < len(mh); i++ { + if mh[i].HostID != mh[i-1].HostID+mh[i-1].Size { + return false + } + } + + var mc sortByContainerID = mappings[:] + sort.Sort(mc) + for i := 1; i < len(mc); i++ { + if mc[i].ContainerID != mc[i-1].ContainerID+mc[i-1].Size { + return false + } + } + return true +} diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go new file mode 100644 index 00000000000..6e6e3b22bc9 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go @@ -0,0 +1,71 @@ +// +build linux,cgo,libsubid + +package idtools + +import ( + "unsafe" + + "github.com/pkg/errors" +) + +/* +#cgo LDFLAGS: -l subid +#include +#include +#include +const char *Prog = "storage"; +FILE *shadow_logfd = NULL; + +struct subid_range get_range(struct subid_range *ranges, int i) +{ + shadow_logfd = stderr; + return ranges[i]; +} + +#if !defined(SUBID_ABI_MAJOR) || (SUBID_ABI_MAJOR < 4) +# define subid_get_uid_ranges get_subuid_ranges +# define subid_get_gid_ranges get_subgid_ranges +#endif + +*/ +import "C" + +func readSubid(username string, isUser bool) (ranges, error) { + var ret ranges + if username == "ALL" { + return nil, errors.New("username ALL not supported") + } + + cUsername := C.CString(username) + defer C.free(unsafe.Pointer(cUsername)) + + var nRanges C.int + var cRanges *C.struct_subid_range + if isUser { + nRanges = C.subid_get_uid_ranges(cUsername, &cRanges) + } else { + nRanges = C.subid_get_gid_ranges(cUsername, &cRanges) + } + if nRanges < 0 { + return nil, errors.New("cannot read subids") + } + defer C.free(unsafe.Pointer(cRanges)) + + for i := 0; i < int(nRanges); i++ { + r := C.get_range(cRanges, C.int(i)) + newRange := subIDRange{ + Start: int(r.start), + Length: int(r.count), + } + ret = append(ret, newRange) + } + return ret, nil +} + +func readSubuid(username string) (ranges, error) { + return readSubid(username, true) +} + +func readSubgid(username string) (ranges, error) { + return readSubid(username, false) +} diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go new file mode 100644 index 00000000000..7f270c61f82 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go @@ -0,0 +1,213 @@ +// +build !windows + +package idtools + +import ( + "bytes" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "sync" + "syscall" + + "github.com/containers/storage/pkg/system" + "github.com/opencontainers/runc/libcontainer/user" +) + +var ( + entOnce sync.Once + getentCmd string +) + +func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error { + // make an array containing the original path asked for, plus (for mkAll == true) + // all path components leading up to the complete path that don't exist before we MkdirAll + // so that we can chown all of them properly at the end. If chownExisting is false, we won't + // chown the full directory path if it exists + var paths []string + st, err := os.Stat(path) + if err != nil && os.IsNotExist(err) { + paths = []string{path} + } else if err == nil { + if !st.IsDir() { + return &os.PathError{Op: "mkdir", Path: path, Err: syscall.ENOTDIR} + } + if chownExisting { + // short-circuit--we were called with an existing directory and chown was requested + return SafeChown(path, ownerUID, ownerGID) + } + // nothing to do; directory exists and chown was NOT requested + return nil + } + + if mkAll { + // walk back to "/" looking for directories which do not exist + // and add them to the paths array for chown after creation + dirPath := path + if !filepath.IsAbs(dirPath) { + return fmt.Errorf("path: %s should be absolute", dirPath) + } + for { + dirPath = filepath.Dir(dirPath) + if dirPath == "/" { + break + } + if _, err := os.Stat(dirPath); err != nil && os.IsNotExist(err) { + paths = append(paths, dirPath) + } + } + if err := os.MkdirAll(path, mode); err != nil { + return err + } + } else { + if err := os.Mkdir(path, mode); err != nil && !os.IsExist(err) { + return err + } + } + // even if it existed, we will chown the requested path + any subpaths that + // didn't exist when we called MkdirAll + for _, pathComponent := range paths { + if err := SafeChown(pathComponent, ownerUID, ownerGID); err != nil { + return err + } + } + return nil +} + +// CanAccess takes a valid (existing) directory and a uid, gid pair and determines +// if that uid, gid pair has access (execute bit) to the directory +func CanAccess(path string, pair IDPair) bool { + statInfo, err := system.Stat(path) + if err != nil { + return false + } + fileMode := os.FileMode(statInfo.Mode()) + permBits := fileMode.Perm() + return accessible(statInfo.UID() == uint32(pair.UID), + statInfo.GID() == uint32(pair.GID), permBits) +} + +func accessible(isOwner, isGroup bool, perms os.FileMode) bool { + if isOwner && (perms&0100 == 0100) { + return true + } + if isGroup && (perms&0010 == 0010) { + return true + } + if perms&0001 == 0001 { + return true + } + return false +} + +// LookupUser uses traditional local system files lookup (from libcontainer/user) on a username, +// followed by a call to `getent` for supporting host configured non-files passwd and group dbs +func LookupUser(username string) (user.User, error) { + // first try a local system files lookup using existing capabilities + usr, err := user.LookupUser(username) + if err == nil { + return usr, nil + } + // local files lookup failed; attempt to call `getent` to query configured passwd dbs + usr, err = getentUser(fmt.Sprintf("%s %s", "passwd", username)) + if err != nil { + return user.User{}, err + } + return usr, nil +} + +// LookupUID uses traditional local system files lookup (from libcontainer/user) on a uid, +// followed by a call to `getent` for supporting host configured non-files passwd and group dbs +func LookupUID(uid int) (user.User, error) { + // first try a local system files lookup using existing capabilities + usr, err := user.LookupUid(uid) + if err == nil { + return usr, nil + } + // local files lookup failed; attempt to call `getent` to query configured passwd dbs + return getentUser(fmt.Sprintf("%s %d", "passwd", uid)) +} + +func getentUser(args string) (user.User, error) { + reader, err := callGetent(args) + if err != nil { + return user.User{}, err + } + users, err := user.ParsePasswd(reader) + if err != nil { + return user.User{}, err + } + if len(users) == 0 { + return user.User{}, fmt.Errorf("getent failed to find passwd entry for %q", strings.Split(args, " ")[1]) + } + return users[0], nil +} + +// LookupGroup uses traditional local system files lookup (from libcontainer/user) on a group name, +// followed by a call to `getent` for supporting host configured non-files passwd and group dbs +func LookupGroup(groupname string) (user.Group, error) { + // first try a local system files lookup using existing capabilities + group, err := user.LookupGroup(groupname) + if err == nil { + return group, nil + } + // local files lookup failed; attempt to call `getent` to query configured group dbs + return getentGroup(fmt.Sprintf("%s %s", "group", groupname)) +} + +// LookupGID uses traditional local system files lookup (from libcontainer/user) on a group ID, +// followed by a call to `getent` for supporting host configured non-files passwd and group dbs +func LookupGID(gid int) (user.Group, error) { + // first try a local system files lookup using existing capabilities + group, err := user.LookupGid(gid) + if err == nil { + return group, nil + } + // local files lookup failed; attempt to call `getent` to query configured group dbs + return getentGroup(fmt.Sprintf("%s %d", "group", gid)) +} + +func getentGroup(args string) (user.Group, error) { + reader, err := callGetent(args) + if err != nil { + return user.Group{}, err + } + groups, err := user.ParseGroup(reader) + if err != nil { + return user.Group{}, err + } + if len(groups) == 0 { + return user.Group{}, fmt.Errorf("getent failed to find groups entry for %q", strings.Split(args, " ")[1]) + } + return groups[0], nil +} + +func callGetent(args string) (io.Reader, error) { + entOnce.Do(func() { getentCmd, _ = resolveBinary("getent") }) + // if no `getent` command on host, can't do anything else + if getentCmd == "" { + return nil, fmt.Errorf("") + } + out, err := execCmd(getentCmd, args) + if err != nil { + exitCode, errC := system.GetExitCode(err) + if errC != nil { + return nil, err + } + switch exitCode { + case 1: + return nil, fmt.Errorf("getent reported invalid parameters/database unknown") + case 2: + terms := strings.Split(args, " ") + return nil, fmt.Errorf("getent unable to find entry %q in %s database", terms[1], terms[0]) + case 3: + return nil, fmt.Errorf("getent database doesn't support enumeration") + default: + return nil, err + } + + } + return bytes.NewReader(out), nil +} diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_unsupported.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_unsupported.go new file mode 100644 index 00000000000..84da1b764bf --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_unsupported.go @@ -0,0 +1,11 @@ +// +build !linux !libsubid !cgo + +package idtools + +func readSubuid(username string) (ranges, error) { + return parseSubidFile(subuidFileName, username) +} + +func readSubgid(username string) (ranges, error) { + return parseSubidFile(subgidFileName, username) +} diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go new file mode 100644 index 00000000000..16be94f446f --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go @@ -0,0 +1,23 @@ +// +build windows + +package idtools + +import ( + "os" +) + +// Platforms such as Windows do not support the UID/GID concept. So make this +// just a wrapper around system.MkdirAll. +func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error { + if err := os.MkdirAll(path, mode); err != nil { + return err + } + return nil +} + +// CanAccess takes a valid (existing) directory and a uid, gid pair and determines +// if that uid, gid pair has access (execute bit) to the directory +// Windows does not require/support this function, so always return true +func CanAccess(path string, pair IDPair) bool { + return true +} diff --git a/vendor/github.com/containers/storage/pkg/idtools/parser.go b/vendor/github.com/containers/storage/pkg/idtools/parser.go new file mode 100644 index 00000000000..1c819a1f971 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/idtools/parser.go @@ -0,0 +1,59 @@ +package idtools + +import ( + "fmt" + "math" + "math/bits" + "strconv" + "strings" +) + +func parseTriple(spec []string) (container, host, size uint32, err error) { + cid, err := strconv.ParseUint(spec[0], 10, 32) + if err != nil { + return 0, 0, 0, fmt.Errorf("error parsing id map value %q: %v", spec[0], err) + } + hid, err := strconv.ParseUint(spec[1], 10, 32) + if err != nil { + return 0, 0, 0, fmt.Errorf("error parsing id map value %q: %v", spec[1], err) + } + sz, err := strconv.ParseUint(spec[2], 10, 32) + if err != nil { + return 0, 0, 0, fmt.Errorf("error parsing id map value %q: %v", spec[2], err) + } + return uint32(cid), uint32(hid), uint32(sz), nil +} + +// ParseIDMap parses idmap triples from string. +func ParseIDMap(mapSpec []string, mapSetting string) (idmap []IDMap, err error) { + stdErr := fmt.Errorf("error initializing ID mappings: %s setting is malformed expected [\"uint32:uint32:uint32\"]: %q", mapSetting, mapSpec) + for _, idMapSpec := range mapSpec { + if idMapSpec == "" { + continue + } + idSpec := strings.Split(idMapSpec, ":") + if len(idSpec)%3 != 0 { + return nil, stdErr + } + for i := range idSpec { + if i%3 != 0 { + continue + } + cid, hid, size, err := parseTriple(idSpec[i : i+3]) + if err != nil { + return nil, stdErr + } + // Avoid possible integer overflow on 32bit builds + if bits.UintSize == 32 && (cid > math.MaxInt32 || hid > math.MaxInt32 || size > math.MaxInt32) { + return nil, stdErr + } + mapping := IDMap{ + ContainerID: int(cid), + HostID: int(hid), + Size: int(size), + } + idmap = append(idmap, mapping) + } + } + return idmap, nil +} diff --git a/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go b/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go new file mode 100644 index 00000000000..3dd7bf21057 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go @@ -0,0 +1,164 @@ +package idtools + +import ( + "fmt" + "regexp" + "sort" + "strconv" + "strings" + "sync" +) + +// add a user and/or group to Linux /etc/passwd, /etc/group using standard +// Linux distribution commands: +// adduser --system --shell /bin/false --disabled-login --disabled-password --no-create-home --group +// useradd -r -s /bin/false + +var ( + once sync.Once + userCommand string + + cmdTemplates = map[string]string{ + "adduser": "--system --shell /bin/false --no-create-home --disabled-login --disabled-password --group %s", + "useradd": "-r -s /bin/false %s", + "usermod": "-%s %d-%d %s", + } + + idOutRegexp = regexp.MustCompile(`uid=([0-9]+).*gid=([0-9]+)`) + // default length for a UID/GID subordinate range + defaultRangeLen = 65536 + defaultRangeStart = 100000 + userMod = "usermod" +) + +// AddNamespaceRangesUser takes a username and uses the standard system +// utility to create a system user/group pair used to hold the +// /etc/sub{uid,gid} ranges which will be used for user namespace +// mapping ranges in containers. +func AddNamespaceRangesUser(name string) (int, int, error) { + if err := addUser(name); err != nil { + return -1, -1, fmt.Errorf("Error adding user %q: %v", name, err) + } + + // Query the system for the created uid and gid pair + out, err := execCmd("id", name) + if err != nil { + return -1, -1, fmt.Errorf("Error trying to find uid/gid for new user %q: %v", name, err) + } + matches := idOutRegexp.FindStringSubmatch(strings.TrimSpace(string(out))) + if len(matches) != 3 { + return -1, -1, fmt.Errorf("Can't find uid, gid from `id` output: %q", string(out)) + } + uid, err := strconv.Atoi(matches[1]) + if err != nil { + return -1, -1, fmt.Errorf("Can't convert found uid (%s) to int: %v", matches[1], err) + } + gid, err := strconv.Atoi(matches[2]) + if err != nil { + return -1, -1, fmt.Errorf("Can't convert found gid (%s) to int: %v", matches[2], err) + } + + // Now we need to create the subuid/subgid ranges for our new user/group (system users + // do not get auto-created ranges in subuid/subgid) + + if err := createSubordinateRanges(name); err != nil { + return -1, -1, fmt.Errorf("Couldn't create subordinate ID ranges: %v", err) + } + return uid, gid, nil +} + +func addUser(userName string) error { + once.Do(func() { + // set up which commands are used for adding users/groups dependent on distro + if _, err := resolveBinary("adduser"); err == nil { + userCommand = "adduser" + } else if _, err := resolveBinary("useradd"); err == nil { + userCommand = "useradd" + } + }) + if userCommand == "" { + return fmt.Errorf("Cannot add user; no useradd/adduser binary found") + } + args := fmt.Sprintf(cmdTemplates[userCommand], userName) + out, err := execCmd(userCommand, args) + if err != nil { + return fmt.Errorf("Failed to add user with error: %v; output: %q", err, string(out)) + } + return nil +} + +func createSubordinateRanges(name string) error { + + // first, we should verify that ranges weren't automatically created + // by the distro tooling + ranges, err := readSubuid(name) + if err != nil { + return fmt.Errorf("Error while looking for subuid ranges for user %q: %v", name, err) + } + if len(ranges) == 0 { + // no UID ranges; let's create one + startID, err := findNextUIDRange() + if err != nil { + return fmt.Errorf("Can't find available subuid range: %v", err) + } + out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "v", startID, startID+defaultRangeLen-1, name)) + if err != nil { + return fmt.Errorf("Unable to add subuid range to user: %q; output: %s, err: %v", name, out, err) + } + } + + ranges, err = readSubgid(name) + if err != nil { + return fmt.Errorf("Error while looking for subgid ranges for user %q: %v", name, err) + } + if len(ranges) == 0 { + // no GID ranges; let's create one + startID, err := findNextGIDRange() + if err != nil { + return fmt.Errorf("Can't find available subgid range: %v", err) + } + out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "w", startID, startID+defaultRangeLen-1, name)) + if err != nil { + return fmt.Errorf("Unable to add subgid range to user: %q; output: %s, err: %v", name, out, err) + } + } + return nil +} + +func findNextUIDRange() (int, error) { + ranges, err := readSubuid("ALL") + if err != nil { + return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subuid file: %v", err) + } + sort.Sort(ranges) + return findNextRangeStart(ranges) +} + +func findNextGIDRange() (int, error) { + ranges, err := readSubgid("ALL") + if err != nil { + return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subgid file: %v", err) + } + sort.Sort(ranges) + return findNextRangeStart(ranges) +} + +func findNextRangeStart(rangeList ranges) (int, error) { + startID := defaultRangeStart + for _, arange := range rangeList { + if wouldOverlap(arange, startID) { + startID = arange.Start + arange.Length + } + } + return startID, nil +} + +func wouldOverlap(arange subIDRange, ID int) bool { + low := ID + high := ID + defaultRangeLen + if (low >= arange.Start && low <= arange.Start+arange.Length) || + (high <= arange.Start+arange.Length && high >= arange.Start) { + return true + } + return false +} diff --git a/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_unsupported.go b/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_unsupported.go new file mode 100644 index 00000000000..d98b354cbd8 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_unsupported.go @@ -0,0 +1,12 @@ +// +build !linux + +package idtools + +import "fmt" + +// AddNamespaceRangesUser takes a name and finds an unused uid, gid pair +// and calls the appropriate helper function to add the group and then +// the user to the group in /etc/group and /etc/passwd respectively. +func AddNamespaceRangesUser(name string) (int, int, error) { + return -1, -1, fmt.Errorf("No support for adding users or groups on this OS") +} diff --git a/vendor/github.com/containers/storage/pkg/idtools/utils_unix.go b/vendor/github.com/containers/storage/pkg/idtools/utils_unix.go new file mode 100644 index 00000000000..9703ecbd9d6 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/idtools/utils_unix.go @@ -0,0 +1,32 @@ +// +build !windows + +package idtools + +import ( + "fmt" + "os/exec" + "path/filepath" + "strings" +) + +func resolveBinary(binname string) (string, error) { + binaryPath, err := exec.LookPath(binname) + if err != nil { + return "", err + } + resolvedPath, err := filepath.EvalSymlinks(binaryPath) + if err != nil { + return "", err + } + //only return no error if the final resolved binary basename + //matches what was searched for + if filepath.Base(resolvedPath) == binname { + return resolvedPath, nil + } + return "", fmt.Errorf("Binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath) +} + +func execCmd(cmd, args string) ([]byte, error) { + execCmd := exec.Command(cmd, strings.Split(args, " ")...) + return execCmd.CombinedOutput() +} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/buffer.go b/vendor/github.com/containers/storage/pkg/ioutils/buffer.go new file mode 100644 index 00000000000..3d737b3e19d --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/ioutils/buffer.go @@ -0,0 +1,51 @@ +package ioutils + +import ( + "errors" + "io" +) + +var errBufferFull = errors.New("buffer is full") + +type fixedBuffer struct { + buf []byte + pos int + lastRead int +} + +func (b *fixedBuffer) Write(p []byte) (int, error) { + n := copy(b.buf[b.pos:cap(b.buf)], p) + b.pos += n + + if n < len(p) { + if b.pos == cap(b.buf) { + return n, errBufferFull + } + return n, io.ErrShortWrite + } + return n, nil +} + +func (b *fixedBuffer) Read(p []byte) (int, error) { + n := copy(p, b.buf[b.lastRead:b.pos]) + b.lastRead += n + return n, nil +} + +func (b *fixedBuffer) Len() int { + return b.pos - b.lastRead +} + +func (b *fixedBuffer) Cap() int { + return cap(b.buf) +} + +func (b *fixedBuffer) Reset() { + b.pos = 0 + b.lastRead = 0 + b.buf = b.buf[:0] +} + +func (b *fixedBuffer) String() string { + return string(b.buf[b.lastRead:b.pos]) +} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/bytespipe.go b/vendor/github.com/containers/storage/pkg/ioutils/bytespipe.go new file mode 100644 index 00000000000..72a04f34919 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/ioutils/bytespipe.go @@ -0,0 +1,186 @@ +package ioutils + +import ( + "errors" + "io" + "sync" +) + +// maxCap is the highest capacity to use in byte slices that buffer data. +const maxCap = 1e6 + +// minCap is the lowest capacity to use in byte slices that buffer data +const minCap = 64 + +// blockThreshold is the minimum number of bytes in the buffer which will cause +// a write to BytesPipe to block when allocating a new slice. +const blockThreshold = 1e6 + +var ( + // ErrClosed is returned when Write is called on a closed BytesPipe. + ErrClosed = errors.New("write to closed BytesPipe") + + bufPools = make(map[int]*sync.Pool) + bufPoolsLock sync.Mutex +) + +// BytesPipe is io.ReadWriteCloser which works similarly to pipe(queue). +// All written data may be read at most once. Also, BytesPipe allocates +// and releases new byte slices to adjust to current needs, so the buffer +// won't be overgrown after peak loads. +type BytesPipe struct { + mu sync.Mutex + wait *sync.Cond + buf []*fixedBuffer + bufLen int + closeErr error // error to return from next Read. set to nil if not closed. +} + +// NewBytesPipe creates new BytesPipe, initialized by specified slice. +// If buf is nil, then it will be initialized with slice which cap is 64. +// buf will be adjusted in a way that len(buf) == 0, cap(buf) == cap(buf). +func NewBytesPipe() *BytesPipe { + bp := &BytesPipe{} + bp.buf = append(bp.buf, getBuffer(minCap)) + bp.wait = sync.NewCond(&bp.mu) + return bp +} + +// Write writes p to BytesPipe. +// It can allocate new []byte slices in a process of writing. +func (bp *BytesPipe) Write(p []byte) (int, error) { + bp.mu.Lock() + + written := 0 +loop0: + for { + if bp.closeErr != nil { + bp.mu.Unlock() + return written, ErrClosed + } + + if len(bp.buf) == 0 { + bp.buf = append(bp.buf, getBuffer(64)) + } + // get the last buffer + b := bp.buf[len(bp.buf)-1] + + n, err := b.Write(p) + written += n + bp.bufLen += n + + // errBufferFull is an error we expect to get if the buffer is full + if err != nil && err != errBufferFull { + bp.wait.Broadcast() + bp.mu.Unlock() + return written, err + } + + // if there was enough room to write all then break + if len(p) == n { + break + } + + // more data: write to the next slice + p = p[n:] + + // make sure the buffer doesn't grow too big from this write + for bp.bufLen >= blockThreshold { + bp.wait.Wait() + if bp.closeErr != nil { + continue loop0 + } + } + + // add new byte slice to the buffers slice and continue writing + nextCap := b.Cap() * 2 + if nextCap > maxCap { + nextCap = maxCap + } + bp.buf = append(bp.buf, getBuffer(nextCap)) + } + bp.wait.Broadcast() + bp.mu.Unlock() + return written, nil +} + +// CloseWithError causes further reads from a BytesPipe to return immediately. +func (bp *BytesPipe) CloseWithError(err error) error { + bp.mu.Lock() + if err != nil { + bp.closeErr = err + } else { + bp.closeErr = io.EOF + } + bp.wait.Broadcast() + bp.mu.Unlock() + return nil +} + +// Close causes further reads from a BytesPipe to return immediately. +func (bp *BytesPipe) Close() error { + return bp.CloseWithError(nil) +} + +// Read reads bytes from BytesPipe. +// Data could be read only once. +func (bp *BytesPipe) Read(p []byte) (n int, err error) { + bp.mu.Lock() + if bp.bufLen == 0 { + if bp.closeErr != nil { + bp.mu.Unlock() + return 0, bp.closeErr + } + bp.wait.Wait() + if bp.bufLen == 0 && bp.closeErr != nil { + err := bp.closeErr + bp.mu.Unlock() + return 0, err + } + } + + for bp.bufLen > 0 { + b := bp.buf[0] + read, _ := b.Read(p) // ignore error since fixedBuffer doesn't really return an error + n += read + bp.bufLen -= read + + if b.Len() == 0 { + // it's empty so return it to the pool and move to the next one + returnBuffer(b) + bp.buf[0] = nil + bp.buf = bp.buf[1:] + } + + if len(p) == read { + break + } + + p = p[read:] + } + + bp.wait.Broadcast() + bp.mu.Unlock() + return +} + +func returnBuffer(b *fixedBuffer) { + b.Reset() + bufPoolsLock.Lock() + pool := bufPools[b.Cap()] + bufPoolsLock.Unlock() + if pool != nil { + pool.Put(b) + } +} + +func getBuffer(size int) *fixedBuffer { + bufPoolsLock.Lock() + pool, ok := bufPools[size] + if !ok { + pool = &sync.Pool{New: func() interface{} { return &fixedBuffer{buf: make([]byte, 0, size)} }} + bufPools[size] = pool + } + bufPoolsLock.Unlock() + return pool.Get().(*fixedBuffer) +} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/fswriters.go b/vendor/github.com/containers/storage/pkg/ioutils/fswriters.go new file mode 100644 index 00000000000..cd12470f9da --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/ioutils/fswriters.go @@ -0,0 +1,195 @@ +package ioutils + +import ( + "io" + "io/ioutil" + "os" + "path/filepath" +) + +// AtomicFileWriterOptions specifies options for creating the atomic file writer. +type AtomicFileWriterOptions struct { + // NoSync specifies whether the sync call must be skipped for the file. + // If NoSync is not specified, the file is synced to the + // storage after it has been written and before it is moved to + // the specified path. + NoSync bool +} + +var defaultWriterOptions AtomicFileWriterOptions = AtomicFileWriterOptions{} + +// SetDefaultOptions overrides the default options used when creating an +// atomic file writer. +func SetDefaultOptions(opts AtomicFileWriterOptions) { + defaultWriterOptions = opts +} + +// NewAtomicFileWriterWithOpts returns WriteCloser so that writing to it writes to a +// temporary file and closing it atomically changes the temporary file to +// destination path. Writing and closing concurrently is not allowed. +func NewAtomicFileWriterWithOpts(filename string, perm os.FileMode, opts *AtomicFileWriterOptions) (io.WriteCloser, error) { + f, err := ioutil.TempFile(filepath.Dir(filename), ".tmp-"+filepath.Base(filename)) + if err != nil { + return nil, err + } + if opts == nil { + opts = &defaultWriterOptions + } + abspath, err := filepath.Abs(filename) + if err != nil { + return nil, err + } + return &atomicFileWriter{ + f: f, + fn: abspath, + perm: perm, + noSync: opts.NoSync, + }, nil +} + +// NewAtomicFileWriter returns WriteCloser so that writing to it writes to a +// temporary file and closing it atomically changes the temporary file to +// destination path. Writing and closing concurrently is not allowed. +func NewAtomicFileWriter(filename string, perm os.FileMode) (io.WriteCloser, error) { + return NewAtomicFileWriterWithOpts(filename, perm, nil) +} + +// AtomicWriteFile atomically writes data to a file named by filename. +func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error { + f, err := NewAtomicFileWriter(filename, perm) + if err != nil { + return err + } + n, err := f.Write(data) + if err == nil && n < len(data) { + err = io.ErrShortWrite + f.(*atomicFileWriter).writeErr = err + } + if err1 := f.Close(); err == nil { + err = err1 + } + return err +} + +type atomicFileWriter struct { + f *os.File + fn string + writeErr error + perm os.FileMode + noSync bool +} + +func (w *atomicFileWriter) Write(dt []byte) (int, error) { + n, err := w.f.Write(dt) + if err != nil { + w.writeErr = err + } + return n, err +} + +func (w *atomicFileWriter) Close() (retErr error) { + defer func() { + if retErr != nil || w.writeErr != nil { + os.Remove(w.f.Name()) + } + }() + if !w.noSync { + if err := fdatasync(w.f); err != nil { + w.f.Close() + return err + } + } + if err := w.f.Close(); err != nil { + return err + } + if err := os.Chmod(w.f.Name(), w.perm); err != nil { + return err + } + if w.writeErr == nil { + return os.Rename(w.f.Name(), w.fn) + } + return nil +} + +// AtomicWriteSet is used to atomically write a set +// of files and ensure they are visible at the same time. +// Must be committed to a new directory. +type AtomicWriteSet struct { + root string +} + +// NewAtomicWriteSet creates a new atomic write set to +// atomically create a set of files. The given directory +// is used as the base directory for storing files before +// commit. If no temporary directory is given the system +// default is used. +func NewAtomicWriteSet(tmpDir string) (*AtomicWriteSet, error) { + td, err := ioutil.TempDir(tmpDir, "write-set-") + if err != nil { + return nil, err + } + + return &AtomicWriteSet{ + root: td, + }, nil +} + +// WriteFile writes a file to the set, guaranteeing the file +// has been synced. +func (ws *AtomicWriteSet) WriteFile(filename string, data []byte, perm os.FileMode) error { + f, err := ws.FileWriter(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) + if err != nil { + return err + } + n, err := f.Write(data) + if err == nil && n < len(data) { + err = io.ErrShortWrite + } + if err1 := f.Close(); err == nil { + err = err1 + } + return err +} + +type syncFileCloser struct { + *os.File +} + +func (w syncFileCloser) Close() error { + if !defaultWriterOptions.NoSync { + return w.File.Close() + } + err := fdatasync(w.File) + if err1 := w.File.Close(); err == nil { + err = err1 + } + return err +} + +// FileWriter opens a file writer inside the set. The file +// should be synced and closed before calling commit. +func (ws *AtomicWriteSet) FileWriter(name string, flag int, perm os.FileMode) (io.WriteCloser, error) { + f, err := os.OpenFile(filepath.Join(ws.root, name), flag, perm) + if err != nil { + return nil, err + } + return syncFileCloser{f}, nil +} + +// Cancel cancels the set and removes all temporary data +// created in the set. +func (ws *AtomicWriteSet) Cancel() error { + return os.RemoveAll(ws.root) +} + +// Commit moves all created files to the target directory. The +// target directory must not exist and the parent of the target +// directory must exist. +func (ws *AtomicWriteSet) Commit(target string) error { + return os.Rename(ws.root, target) +} + +// String returns the location the set is writing to. +func (ws *AtomicWriteSet) String() string { + return ws.root +} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/fswriters_linux.go b/vendor/github.com/containers/storage/pkg/ioutils/fswriters_linux.go new file mode 100644 index 00000000000..0da78a063d4 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/ioutils/fswriters_linux.go @@ -0,0 +1,11 @@ +package ioutils + +import ( + "os" + + "golang.org/x/sys/unix" +) + +func fdatasync(f *os.File) error { + return unix.Fdatasync(int(f.Fd())) +} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/fswriters_unsupported.go b/vendor/github.com/containers/storage/pkg/ioutils/fswriters_unsupported.go new file mode 100644 index 00000000000..79a094035db --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/ioutils/fswriters_unsupported.go @@ -0,0 +1,11 @@ +// +build !linux + +package ioutils + +import ( + "os" +) + +func fdatasync(f *os.File) error { + return f.Sync() +} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/readers.go b/vendor/github.com/containers/storage/pkg/ioutils/readers.go new file mode 100644 index 00000000000..0e89787d4a8 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/ioutils/readers.go @@ -0,0 +1,171 @@ +package ioutils + +import ( + "crypto/sha256" + "encoding/hex" + "io" + + "golang.org/x/net/context" +) + +type readCloserWrapper struct { + io.Reader + closer func() error +} + +func (r *readCloserWrapper) Close() error { + return r.closer() +} + +type readWriteToCloserWrapper struct { + io.Reader + io.WriterTo + closer func() error +} + +func (r *readWriteToCloserWrapper) Close() error { + return r.closer() +} + +// NewReadCloserWrapper returns a new io.ReadCloser. +func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser { + if wt, ok := r.(io.WriterTo); ok { + return &readWriteToCloserWrapper{ + Reader: r, + WriterTo: wt, + closer: closer, + } + } + return &readCloserWrapper{ + Reader: r, + closer: closer, + } +} + +type readerErrWrapper struct { + reader io.Reader + closer func() +} + +func (r *readerErrWrapper) Read(p []byte) (int, error) { + n, err := r.reader.Read(p) + if err != nil { + r.closer() + } + return n, err +} + +// NewReaderErrWrapper returns a new io.Reader. +func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader { + return &readerErrWrapper{ + reader: r, + closer: closer, + } +} + +// HashData returns the sha256 sum of src. +func HashData(src io.Reader) (string, error) { + h := sha256.New() + if _, err := io.Copy(h, src); err != nil { + return "", err + } + return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil +} + +// OnEOFReader wraps an io.ReadCloser and a function +// the function will run at the end of file or close the file. +type OnEOFReader struct { + Rc io.ReadCloser + Fn func() +} + +func (r *OnEOFReader) Read(p []byte) (n int, err error) { + n, err = r.Rc.Read(p) + if err == io.EOF { + r.runFunc() + } + return +} + +// Close closes the file and run the function. +func (r *OnEOFReader) Close() error { + err := r.Rc.Close() + r.runFunc() + return err +} + +func (r *OnEOFReader) runFunc() { + if fn := r.Fn; fn != nil { + fn() + r.Fn = nil + } +} + +// cancelReadCloser wraps an io.ReadCloser with a context for cancelling read +// operations. +type cancelReadCloser struct { + cancel func() + pR *io.PipeReader // Stream to read from + pW *io.PipeWriter +} + +// NewCancelReadCloser creates a wrapper that closes the ReadCloser when the +// context is cancelled. The returned io.ReadCloser must be closed when it is +// no longer needed. +func NewCancelReadCloser(ctx context.Context, in io.ReadCloser) io.ReadCloser { + pR, pW := io.Pipe() + + // Create a context used to signal when the pipe is closed + doneCtx, cancel := context.WithCancel(context.Background()) + + p := &cancelReadCloser{ + cancel: cancel, + pR: pR, + pW: pW, + } + + go func() { + _, err := io.Copy(pW, in) + select { + case <-ctx.Done(): + // If the context was closed, p.closeWithError + // was already called. Calling it again would + // change the error that Read returns. + default: + p.closeWithError(err) + } + in.Close() + }() + go func() { + for { + select { + case <-ctx.Done(): + p.closeWithError(ctx.Err()) + case <-doneCtx.Done(): + return + } + } + }() + + return p +} + +// Read wraps the Read method of the pipe that provides data from the wrapped +// ReadCloser. +func (p *cancelReadCloser) Read(buf []byte) (n int, err error) { + return p.pR.Read(buf) +} + +// closeWithError closes the wrapper and its underlying reader. It will +// cause future calls to Read to return err. +func (p *cancelReadCloser) closeWithError(err error) { + p.pW.CloseWithError(err) + p.cancel() +} + +// Close closes the wrapper its underlying reader. It will cause +// future calls to Read to return io.EOF. +func (p *cancelReadCloser) Close() error { + p.closeWithError(io.EOF) + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/temp_unix.go b/vendor/github.com/containers/storage/pkg/ioutils/temp_unix.go new file mode 100644 index 00000000000..1539ad21b57 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/ioutils/temp_unix.go @@ -0,0 +1,10 @@ +// +build !windows + +package ioutils + +import "io/ioutil" + +// TempDir on Unix systems is equivalent to ioutil.TempDir. +func TempDir(dir, prefix string) (string, error) { + return ioutil.TempDir(dir, prefix) +} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/temp_windows.go b/vendor/github.com/containers/storage/pkg/ioutils/temp_windows.go new file mode 100644 index 00000000000..c719c120b5f --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/ioutils/temp_windows.go @@ -0,0 +1,18 @@ +// +build windows + +package ioutils + +import ( + "io/ioutil" + + "github.com/containers/storage/pkg/longpath" +) + +// TempDir is the equivalent of ioutil.TempDir, except that the result is in Windows longpath format. +func TempDir(dir, prefix string) (string, error) { + tempDir, err := ioutil.TempDir(dir, prefix) + if err != nil { + return "", err + } + return longpath.AddPrefix(tempDir), nil +} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/writeflusher.go b/vendor/github.com/containers/storage/pkg/ioutils/writeflusher.go new file mode 100644 index 00000000000..52a4901adeb --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/ioutils/writeflusher.go @@ -0,0 +1,92 @@ +package ioutils + +import ( + "io" + "sync" +) + +// WriteFlusher wraps the Write and Flush operation ensuring that every write +// is a flush. In addition, the Close method can be called to intercept +// Read/Write calls if the targets lifecycle has already ended. +type WriteFlusher struct { + w io.Writer + flusher flusher + flushed chan struct{} + flushedOnce sync.Once + closed chan struct{} + closeLock sync.Mutex +} + +type flusher interface { + Flush() +} + +var errWriteFlusherClosed = io.EOF + +func (wf *WriteFlusher) Write(b []byte) (n int, err error) { + select { + case <-wf.closed: + return 0, errWriteFlusherClosed + default: + } + + n, err = wf.w.Write(b) + wf.Flush() // every write is a flush. + return n, err +} + +// Flush the stream immediately. +func (wf *WriteFlusher) Flush() { + select { + case <-wf.closed: + return + default: + } + + wf.flushedOnce.Do(func() { + close(wf.flushed) + }) + wf.flusher.Flush() +} + +// Flushed returns the state of flushed. +// If it's flushed, return true, or else it return false. +func (wf *WriteFlusher) Flushed() bool { + // BUG(stevvooe): Remove this method. Its use is inherently racy. Seems to + // be used to detect whether or a response code has been issued or not. + // Another hook should be used instead. + var flushed bool + select { + case <-wf.flushed: + flushed = true + default: + } + return flushed +} + +// Close closes the write flusher, disallowing any further writes to the +// target. After the flusher is closed, all calls to write or flush will +// result in an error. +func (wf *WriteFlusher) Close() error { + wf.closeLock.Lock() + defer wf.closeLock.Unlock() + + select { + case <-wf.closed: + return errWriteFlusherClosed + default: + close(wf.closed) + } + return nil +} + +// NewWriteFlusher returns a new WriteFlusher. +func NewWriteFlusher(w io.Writer) *WriteFlusher { + var fl flusher + if f, ok := w.(flusher); ok { + fl = f + } else { + fl = &NopFlusher{} + } + return &WriteFlusher{w: w, flusher: fl, closed: make(chan struct{}), flushed: make(chan struct{})} +} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/writers.go b/vendor/github.com/containers/storage/pkg/ioutils/writers.go new file mode 100644 index 00000000000..ccc7f9c23e0 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/ioutils/writers.go @@ -0,0 +1,66 @@ +package ioutils + +import "io" + +// NopWriter represents a type which write operation is nop. +type NopWriter struct{} + +func (*NopWriter) Write(buf []byte) (int, error) { + return len(buf), nil +} + +type nopWriteCloser struct { + io.Writer +} + +func (w *nopWriteCloser) Close() error { return nil } + +// NopWriteCloser returns a nopWriteCloser. +func NopWriteCloser(w io.Writer) io.WriteCloser { + return &nopWriteCloser{w} +} + +// NopFlusher represents a type which flush operation is nop. +type NopFlusher struct{} + +// Flush is a nop operation. +func (f *NopFlusher) Flush() {} + +type writeCloserWrapper struct { + io.Writer + closer func() error +} + +func (r *writeCloserWrapper) Close() error { + return r.closer() +} + +// NewWriteCloserWrapper returns a new io.WriteCloser. +func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser { + return &writeCloserWrapper{ + Writer: r, + closer: closer, + } +} + +// WriteCounter wraps a concrete io.Writer and hold a count of the number +// of bytes written to the writer during a "session". +// This can be convenient when write return is masked +// (e.g., json.Encoder.Encode()) +type WriteCounter struct { + Count int64 + Writer io.Writer +} + +// NewWriteCounter returns a new WriteCounter. +func NewWriteCounter(w io.Writer) *WriteCounter { + return &WriteCounter{ + Writer: w, + } +} + +func (wc *WriteCounter) Write(p []byte) (count int, err error) { + count, err = wc.Writer.Write(p) + wc.Count += int64(count) + return +} diff --git a/vendor/github.com/containers/storage/pkg/locker/README.md b/vendor/github.com/containers/storage/pkg/locker/README.md new file mode 100644 index 00000000000..ad15e89af10 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/locker/README.md @@ -0,0 +1,65 @@ +Locker +===== + +locker provides a mechanism for creating finer-grained locking to help +free up more global locks to handle other tasks. + +The implementation looks close to a sync.Mutex, however, the user must provide a +reference to use to refer to the underlying lock when locking and unlocking, +and unlock may generate an error. + +If a lock with a given name does not exist when `Lock` is called, one is +created. +Lock references are automatically cleaned up on `Unlock` if nothing else is +waiting for the lock. + + +## Usage + +```go +package important + +import ( + "sync" + "time" + + "github.com/containers/storage/pkg/locker" +) + +type important struct { + locks *locker.Locker + data map[string]interface{} + mu sync.Mutex +} + +func (i *important) Get(name string) interface{} { + i.locks.Lock(name) + defer i.locks.Unlock(name) + return data[name] +} + +func (i *important) Create(name string, data interface{}) { + i.locks.Lock(name) + defer i.locks.Unlock(name) + + i.createImportant(data) + + s.mu.Lock() + i.data[name] = data + s.mu.Unlock() +} + +func (i *important) createImportant(data interface{}) { + time.Sleep(10 * time.Second) +} +``` + +For functions dealing with a given name, always lock at the beginning of the +function (or before doing anything with the underlying state), this ensures any +other function that is dealing with the same name will block. + +When needing to modify the underlying data, use the global lock to ensure nothing +else is modifying it at the same time. +Since name lock is already in place, no reads will occur while the modification +is being performed. + diff --git a/vendor/github.com/containers/storage/pkg/locker/locker.go b/vendor/github.com/containers/storage/pkg/locker/locker.go new file mode 100644 index 00000000000..0b22ddfab85 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/locker/locker.go @@ -0,0 +1,112 @@ +/* +Package locker provides a mechanism for creating finer-grained locking to help +free up more global locks to handle other tasks. + +The implementation looks close to a sync.Mutex, however the user must provide a +reference to use to refer to the underlying lock when locking and unlocking, +and unlock may generate an error. + +If a lock with a given name does not exist when `Lock` is called, one is +created. +Lock references are automatically cleaned up on `Unlock` if nothing else is +waiting for the lock. +*/ +package locker + +import ( + "errors" + "sync" + "sync/atomic" +) + +// ErrNoSuchLock is returned when the requested lock does not exist +var ErrNoSuchLock = errors.New("no such lock") + +// Locker provides a locking mechanism based on the passed in reference name +type Locker struct { + mu sync.Mutex + locks map[string]*lockCtr +} + +// lockCtr is used by Locker to represent a lock with a given name. +type lockCtr struct { + mu sync.Mutex + // waiters is the number of waiters waiting to acquire the lock + // this is int32 instead of uint32 so we can add `-1` in `dec()` + waiters int32 +} + +// inc increments the number of waiters waiting for the lock +func (l *lockCtr) inc() { + atomic.AddInt32(&l.waiters, 1) +} + +// dec decrements the number of waiters waiting on the lock +func (l *lockCtr) dec() { + atomic.AddInt32(&l.waiters, -1) +} + +// count gets the current number of waiters +func (l *lockCtr) count() int32 { + return atomic.LoadInt32(&l.waiters) +} + +// Lock locks the mutex +func (l *lockCtr) Lock() { + l.mu.Lock() +} + +// Unlock unlocks the mutex +func (l *lockCtr) Unlock() { + l.mu.Unlock() +} + +// New creates a new Locker +func New() *Locker { + return &Locker{ + locks: make(map[string]*lockCtr), + } +} + +// Lock locks a mutex with the given name. If it doesn't exist, one is created +func (l *Locker) Lock(name string) { + l.mu.Lock() + if l.locks == nil { + l.locks = make(map[string]*lockCtr) + } + + nameLock, exists := l.locks[name] + if !exists { + nameLock = &lockCtr{} + l.locks[name] = nameLock + } + + // increment the nameLock waiters while inside the main mutex + // this makes sure that the lock isn't deleted if `Lock` and `Unlock` are called concurrently + nameLock.inc() + l.mu.Unlock() + + // Lock the nameLock outside the main mutex so we don't block other operations + // once locked then we can decrement the number of waiters for this lock + nameLock.Lock() + nameLock.dec() +} + +// Unlock unlocks the mutex with the given name +// If the given lock is not being waited on by any other callers, it is deleted +func (l *Locker) Unlock(name string) error { + l.mu.Lock() + nameLock, exists := l.locks[name] + if !exists { + l.mu.Unlock() + return ErrNoSuchLock + } + + if nameLock.count() == 0 { + delete(l.locks, name) + } + nameLock.Unlock() + + l.mu.Unlock() + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/lockfile/lockfile.go b/vendor/github.com/containers/storage/pkg/lockfile/lockfile.go new file mode 100644 index 00000000000..6a00141c3de --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/lockfile/lockfile.go @@ -0,0 +1,107 @@ +package lockfile + +import ( + "path/filepath" + "sync" + "time" + + "github.com/pkg/errors" +) + +// A Locker represents a file lock where the file is used to cache an +// identifier of the last party that made changes to whatever's being protected +// by the lock. +type Locker interface { + // Acquire a writer lock. + // The default unix implementation panics if: + // - opening the lockfile failed + // - tried to lock a read-only lock-file + Lock() + + // Acquire a writer lock recursively, allowing for recursive acquisitions + // within the same process space. + RecursiveLock() + + // Unlock the lock. + // The default unix implementation panics if: + // - unlocking an unlocked lock + // - if the lock counter is corrupted + Unlock() + + // Acquire a reader lock. + RLock() + + // Touch records, for others sharing the lock, that the caller was the + // last writer. It should only be called with the lock held. + Touch() error + + // Modified() checks if the most recent writer was a party other than the + // last recorded writer. It should only be called with the lock held. + Modified() (bool, error) + + // TouchedSince() checks if the most recent writer modified the file (likely using Touch()) after the specified time. + TouchedSince(when time.Time) bool + + // IsReadWrite() checks if the lock file is read-write + IsReadWrite() bool + + // Locked() checks if lock is locked for writing by a thread in this process + Locked() bool +} + +var ( + lockfiles map[string]Locker + lockfilesLock sync.Mutex +) + +// GetLockfile opens a read-write lock file, creating it if necessary. The +// Locker object may already be locked if the path has already been requested +// by the current process. +func GetLockfile(path string) (Locker, error) { + return getLockfile(path, false) +} + +// GetROLockfile opens a read-only lock file, creating it if necessary. The +// Locker object may already be locked if the path has already been requested +// by the current process. +func GetROLockfile(path string) (Locker, error) { + return getLockfile(path, true) +} + +// getLockfile returns a Locker object, possibly (depending on the platform) +// working inter-process, and associated with the specified path. +// +// If ro, the lock is a read-write lock and the returned Locker should correspond to the +// “lock for reading” (shared) operation; otherwise, the lock is either an exclusive lock, +// or a read-write lock and Locker should correspond to the “lock for writing” (exclusive) operation. +// +// WARNING: +// - The lock may or MAY NOT be inter-process. +// - There may or MAY NOT be an actual object on the filesystem created for the specified path. +// - Even if ro, the lock MAY be exclusive. +func getLockfile(path string, ro bool) (Locker, error) { + lockfilesLock.Lock() + defer lockfilesLock.Unlock() + if lockfiles == nil { + lockfiles = make(map[string]Locker) + } + cleanPath, err := filepath.Abs(path) + if err != nil { + return nil, errors.Wrapf(err, "error ensuring that path %q is an absolute path", path) + } + if locker, ok := lockfiles[cleanPath]; ok { + if ro && locker.IsReadWrite() { + return nil, errors.Errorf("lock %q is not a read-only lock", cleanPath) + } + if !ro && !locker.IsReadWrite() { + return nil, errors.Errorf("lock %q is not a read-write lock", cleanPath) + } + return locker, nil + } + locker, err := createLockerForPath(cleanPath, ro) // platform-dependent locker + if err != nil { + return nil, err + } + lockfiles[cleanPath] = locker + return locker, nil +} diff --git a/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go b/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go new file mode 100644 index 00000000000..fc080acbed7 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go @@ -0,0 +1,262 @@ +// +build linux solaris darwin freebsd + +package lockfile + +import ( + "fmt" + "os" + "path/filepath" + "sync" + "time" + + "github.com/containers/storage/pkg/stringid" + "github.com/containers/storage/pkg/system" + "github.com/pkg/errors" + "golang.org/x/sys/unix" +) + +type lockfile struct { + // rwMutex serializes concurrent reader-writer acquisitions in the same process space + rwMutex *sync.RWMutex + // stateMutex is used to synchronize concurrent accesses to the state below + stateMutex *sync.Mutex + counter int64 + file string + fd uintptr + lw string + locktype int16 + locked bool + ro bool + recursive bool +} + +// openLock opens the file at path and returns the corresponding file +// descriptor. Note that the path is opened read-only when ro is set. If ro +// is unset, openLock will open the path read-write and create the file if +// necessary. +func openLock(path string, ro bool) (fd int, err error) { + if ro { + fd, err = unix.Open(path, os.O_RDONLY|unix.O_CLOEXEC|os.O_CREATE, 0) + } else { + fd, err = unix.Open(path, + os.O_RDWR|unix.O_CLOEXEC|os.O_CREATE, + unix.S_IRUSR|unix.S_IWUSR|unix.S_IRGRP|unix.S_IROTH, + ) + } + + if err == nil { + return + } + + // the directory of the lockfile seems to be removed, try to create it + if os.IsNotExist(err) { + if err := os.MkdirAll(filepath.Dir(path), 0700); err != nil { + return fd, errors.Wrap(err, "creating locker directory") + } + + return openLock(path, ro) + } + + return +} + +// createLockerForPath returns a Locker object, possibly (depending on the platform) +// working inter-process and associated with the specified path. +// +// This function will be called at most once for each path value within a single process. +// +// If ro, the lock is a read-write lock and the returned Locker should correspond to the +// “lock for reading” (shared) operation; otherwise, the lock is either an exclusive lock, +// or a read-write lock and Locker should correspond to the “lock for writing” (exclusive) operation. +// +// WARNING: +// - The lock may or MAY NOT be inter-process. +// - There may or MAY NOT be an actual object on the filesystem created for the specified path. +// - Even if ro, the lock MAY be exclusive. +func createLockerForPath(path string, ro bool) (Locker, error) { + // Check if we can open the lock. + fd, err := openLock(path, ro) + if err != nil { + return nil, errors.Wrapf(err, "error opening %q", path) + } + unix.Close(fd) + + locktype := unix.F_WRLCK + if ro { + locktype = unix.F_RDLCK + } + return &lockfile{ + stateMutex: &sync.Mutex{}, + rwMutex: &sync.RWMutex{}, + file: path, + lw: stringid.GenerateRandomID(), + locktype: int16(locktype), + locked: false, + ro: ro}, nil +} + +// lock locks the lockfile via FCTNL(2) based on the specified type and +// command. +func (l *lockfile) lock(lType int16, recursive bool) { + lk := unix.Flock_t{ + Type: lType, + Whence: int16(os.SEEK_SET), + Start: 0, + Len: 0, + } + switch lType { + case unix.F_RDLCK: + l.rwMutex.RLock() + case unix.F_WRLCK: + if recursive { + // NOTE: that's okay as recursive is only set in RecursiveLock(), so + // there's no need to protect against hypothetical RDLCK cases. + l.rwMutex.RLock() + } else { + l.rwMutex.Lock() + } + default: + panic(fmt.Sprintf("attempted to acquire a file lock of unrecognized type %d", lType)) + } + l.stateMutex.Lock() + defer l.stateMutex.Unlock() + if l.counter == 0 { + // If we're the first reference on the lock, we need to open the file again. + fd, err := openLock(l.file, l.ro) + if err != nil { + panic(fmt.Sprintf("error opening %q: %v", l.file, err)) + } + l.fd = uintptr(fd) + + // Optimization: only use the (expensive) fcntl syscall when + // the counter is 0. In this case, we're either the first + // reader lock or a writer lock. + for unix.FcntlFlock(l.fd, unix.F_SETLKW, &lk) != nil { + time.Sleep(10 * time.Millisecond) + } + } + l.locktype = lType + l.locked = true + l.recursive = recursive + l.counter++ +} + +// Lock locks the lockfile as a writer. Panic if the lock is a read-only one. +func (l *lockfile) Lock() { + if l.ro { + panic("can't take write lock on read-only lock file") + } else { + l.lock(unix.F_WRLCK, false) + } +} + +// RecursiveLock locks the lockfile as a writer but allows for recursive +// acquisitions within the same process space. Note that RLock() will be called +// if it's a lockTypReader lock. +func (l *lockfile) RecursiveLock() { + if l.ro { + l.RLock() + } else { + l.lock(unix.F_WRLCK, true) + } +} + +// LockRead locks the lockfile as a reader. +func (l *lockfile) RLock() { + l.lock(unix.F_RDLCK, false) +} + +// Unlock unlocks the lockfile. +func (l *lockfile) Unlock() { + l.stateMutex.Lock() + if l.locked == false { + // Panic when unlocking an unlocked lock. That's a violation + // of the lock semantics and will reveal such. + panic("calling Unlock on unlocked lock") + } + l.counter-- + if l.counter < 0 { + // Panic when the counter is negative. There is no way we can + // recover from a corrupted lock and we need to protect the + // storage from corruption. + panic(fmt.Sprintf("lock %q has been unlocked too often", l.file)) + } + if l.counter == 0 { + // We should only release the lock when the counter is 0 to + // avoid releasing read-locks too early; a given process may + // acquire a read lock multiple times. + l.locked = false + // Close the file descriptor on the last unlock, releasing the + // file lock. + unix.Close(int(l.fd)) + } + if l.locktype == unix.F_RDLCK || l.recursive { + l.rwMutex.RUnlock() + } else { + l.rwMutex.Unlock() + } + l.stateMutex.Unlock() +} + +// Locked checks if lockfile is locked for writing by a thread in this process. +func (l *lockfile) Locked() bool { + l.stateMutex.Lock() + defer l.stateMutex.Unlock() + return l.locked && (l.locktype == unix.F_WRLCK) +} + +// Touch updates the lock file with the UID of the user. +func (l *lockfile) Touch() error { + l.stateMutex.Lock() + if !l.locked || (l.locktype != unix.F_WRLCK) { + panic("attempted to update last-writer in lockfile without the write lock") + } + defer l.stateMutex.Unlock() + l.lw = stringid.GenerateRandomID() + id := []byte(l.lw) + n, err := unix.Pwrite(int(l.fd), id, 0) + if err != nil { + return err + } + if n != len(id) { + return unix.ENOSPC + } + return nil +} + +// Modified indicates if the lockfile has been updated since the last time it +// was loaded. +func (l *lockfile) Modified() (bool, error) { + l.stateMutex.Lock() + id := []byte(l.lw) + if !l.locked { + panic("attempted to check last-writer in lockfile without locking it first") + } + defer l.stateMutex.Unlock() + n, err := unix.Pread(int(l.fd), id, 0) + if err != nil { + return true, err + } + if n != len(id) { + return true, nil + } + lw := l.lw + l.lw = string(id) + return l.lw != lw, nil +} + +// IsReadWriteLock indicates if the lock file is a read-write lock. +func (l *lockfile) IsReadWrite() bool { + return !l.ro +} + +// TouchedSince indicates if the lock file has been touched since the specified time +func (l *lockfile) TouchedSince(when time.Time) bool { + st, err := system.Fstat(int(l.fd)) + if err != nil { + return true + } + mtim := st.Mtim() + touched := time.Unix(mtim.Unix()) + return when.Before(touched) +} diff --git a/vendor/github.com/containers/storage/pkg/lockfile/lockfile_windows.go b/vendor/github.com/containers/storage/pkg/lockfile/lockfile_windows.go new file mode 100644 index 00000000000..82bd91db9a9 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/lockfile/lockfile_windows.go @@ -0,0 +1,75 @@ +// +build windows + +package lockfile + +import ( + "os" + "sync" + "time" +) + +// createLockerForPath returns a Locker object, possibly (depending on the platform) +// working inter-process and associated with the specified path. +// +// This function will be called at most once for each path value within a single process. +// +// If ro, the lock is a read-write lock and the returned Locker should correspond to the +// “lock for reading” (shared) operation; otherwise, the lock is either an exclusive lock, +// or a read-write lock and Locker should correspond to the “lock for writing” (exclusive) operation. +// +// WARNING: +// - The lock may or MAY NOT be inter-process. +// - There may or MAY NOT be an actual object on the filesystem created for the specified path. +// - Even if ro, the lock MAY be exclusive. +func createLockerForPath(path string, ro bool) (Locker, error) { + return &lockfile{locked: false}, nil +} + +type lockfile struct { + mu sync.Mutex + file string + locked bool +} + +func (l *lockfile) Lock() { + l.mu.Lock() + l.locked = true +} + +func (l *lockfile) RecursiveLock() { + // We don't support Windows but a recursive writer-lock in one process-space + // is really a writer lock, so just panic. + panic("not supported") +} + +func (l *lockfile) RLock() { + l.mu.Lock() + l.locked = true +} + +func (l *lockfile) Unlock() { + l.locked = false + l.mu.Unlock() +} + +func (l *lockfile) Locked() bool { + return l.locked +} + +func (l *lockfile) Modified() (bool, error) { + return false, nil +} +func (l *lockfile) Touch() error { + return nil +} +func (l *lockfile) IsReadWrite() bool { + return false +} + +func (l *lockfile) TouchedSince(when time.Time) bool { + stat, err := os.Stat(l.file) + if err != nil { + return true + } + return when.Before(stat.ModTime()) +} diff --git a/vendor/github.com/containers/storage/pkg/longpath/longpath.go b/vendor/github.com/containers/storage/pkg/longpath/longpath.go new file mode 100644 index 00000000000..9b15bfff4c9 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/longpath/longpath.go @@ -0,0 +1,26 @@ +// longpath introduces some constants and helper functions for handling long paths +// in Windows, which are expected to be prepended with `\\?\` and followed by either +// a drive letter, a UNC server\share, or a volume identifier. + +package longpath + +import ( + "strings" +) + +// Prefix is the longpath prefix for Windows file paths. +const Prefix = `\\?\` + +// AddPrefix will add the Windows long path prefix to the path provided if +// it does not already have it. +func AddPrefix(path string) string { + if !strings.HasPrefix(path, Prefix) { + if strings.HasPrefix(path, `\\`) { + // This is a UNC path, so we need to add 'UNC' to the path as well. + path = Prefix + `UNC` + path[1:] + } else { + path = Prefix + path + } + } + return path +} diff --git a/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go b/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go new file mode 100644 index 00000000000..6f072650537 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go @@ -0,0 +1,157 @@ +// +build linux,cgo + +package loopback + +import ( + "errors" + "fmt" + "os" + "syscall" + + "github.com/sirupsen/logrus" +) + +// Loopback related errors +var ( + ErrAttachLoopbackDevice = errors.New("loopback attach failed") + ErrGetLoopbackBackingFile = errors.New("Unable to get loopback backing file") + ErrSetCapacity = errors.New("Unable set loopback capacity") +) + +func stringToLoopName(src string) [LoNameSize]uint8 { + var dst [LoNameSize]uint8 + copy(dst[:], src[:]) + return dst +} + +func getNextFreeLoopbackIndex() (int, error) { + f, err := os.OpenFile("/dev/loop-control", os.O_RDONLY, 0644) + if err != nil { + return 0, err + } + defer f.Close() + + index, err := ioctlLoopCtlGetFree(f.Fd()) + if index < 0 { + index = 0 + } + return index, err +} + +func openNextAvailableLoopback(index int, sparseName string, sparseFile *os.File) (loopFile *os.File, err error) { + // Read information about the loopback file. + var st syscall.Stat_t + err = syscall.Fstat(int(sparseFile.Fd()), &st) + if err != nil { + logrus.Errorf("Reading information about loopback file %s: %v", sparseName, err) + return nil, ErrAttachLoopbackDevice + } + + // Start looking for a free /dev/loop + for { + target := fmt.Sprintf("/dev/loop%d", index) + index++ + + fi, err := os.Stat(target) + if err != nil { + if os.IsNotExist(err) { + logrus.Error("There are no more loopback devices available.") + } + return nil, ErrAttachLoopbackDevice + } + + if fi.Mode()&os.ModeDevice != os.ModeDevice { + logrus.Errorf("Loopback device %s is not a block device.", target) + continue + } + + // OpenFile adds O_CLOEXEC + loopFile, err = os.OpenFile(target, os.O_RDWR, 0644) + if err != nil { + logrus.Errorf("Opening loopback device: %s", err) + return nil, ErrAttachLoopbackDevice + } + + // Try to attach to the loop file + if err := ioctlLoopSetFd(loopFile.Fd(), sparseFile.Fd()); err != nil { + loopFile.Close() + + // If the error is EBUSY, then try the next loopback + if err != syscall.EBUSY { + logrus.Errorf("Cannot set up loopback device %s: %s", target, err) + return nil, ErrAttachLoopbackDevice + } + + // Otherwise, we keep going with the loop + continue + } + + // Check if the loopback driver and underlying filesystem agree on the loopback file's + // device and inode numbers. + dev, ino, err := getLoopbackBackingFile(loopFile) + if err != nil { + logrus.Errorf("Getting loopback backing file: %s", err) + return nil, ErrGetLoopbackBackingFile + } + if dev != uint64(st.Dev) || ino != st.Ino { + logrus.Errorf("Loopback device and filesystem disagree on device/inode for %q: %#x(%d):%#x(%d) vs %#x(%d):%#x(%d)", sparseName, dev, dev, ino, ino, st.Dev, st.Dev, st.Ino, st.Ino) + } + + // In case of success, we finished. Break the loop. + break + } + + // This can't happen, but let's be sure + if loopFile == nil { + logrus.Errorf("Unreachable code reached! Error attaching %s to a loopback device.", sparseFile.Name()) + return nil, ErrAttachLoopbackDevice + } + + return loopFile, nil +} + +// AttachLoopDevice attaches the given sparse file to the next +// available loopback device. It returns an opened *os.File. +func AttachLoopDevice(sparseName string) (loop *os.File, err error) { + + // Try to retrieve the next available loopback device via syscall. + // If it fails, we discard error and start looping for a + // loopback from index 0. + startIndex, err := getNextFreeLoopbackIndex() + if err != nil { + logrus.Debugf("Error retrieving the next available loopback: %s", err) + } + + // OpenFile adds O_CLOEXEC + sparseFile, err := os.OpenFile(sparseName, os.O_RDWR, 0644) + if err != nil { + logrus.Errorf("Opening sparse file: %v", err) + return nil, ErrAttachLoopbackDevice + } + defer sparseFile.Close() + + loopFile, err := openNextAvailableLoopback(startIndex, sparseName, sparseFile) + if err != nil { + return nil, err + } + + // Set the status of the loopback device + loopInfo := &loopInfo64{ + loFileName: stringToLoopName(loopFile.Name()), + loOffset: 0, + loFlags: LoFlagsAutoClear, + } + + if err := ioctlLoopSetStatus64(loopFile.Fd(), loopInfo); err != nil { + logrus.Errorf("Cannot set up loopback device info: %s", err) + + // If the call failed, then free the loopback device + if err := ioctlLoopClrFd(loopFile.Fd()); err != nil { + logrus.Error("While cleaning up the loopback device") + } + loopFile.Close() + return nil, ErrAttachLoopbackDevice + } + + return loopFile, nil +} diff --git a/vendor/github.com/containers/storage/pkg/loopback/ioctl.go b/vendor/github.com/containers/storage/pkg/loopback/ioctl.go new file mode 100644 index 00000000000..ea6841958dd --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/loopback/ioctl.go @@ -0,0 +1,53 @@ +// +build linux,cgo + +package loopback + +import ( + "syscall" + "unsafe" +) + +func ioctlLoopCtlGetFree(fd uintptr) (int, error) { + index, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, LoopCtlGetFree, 0) + if err != 0 { + return 0, err + } + return int(index), nil +} + +func ioctlLoopSetFd(loopFd, sparseFd uintptr) error { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetFd, sparseFd); err != 0 { + return err + } + return nil +} + +func ioctlLoopSetStatus64(loopFd uintptr, loopInfo *loopInfo64) error { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 { + return err + } + return nil +} + +func ioctlLoopClrFd(loopFd uintptr) error { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopClrFd, 0); err != 0 { + return err + } + return nil +} + +func ioctlLoopGetStatus64(loopFd uintptr) (*loopInfo64, error) { + loopInfo := &loopInfo64{} + + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopGetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 { + return nil, err + } + return loopInfo, nil +} + +func ioctlLoopSetCapacity(loopFd uintptr, value int) error { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetCapacity, uintptr(value)); err != 0 { + return err + } + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/loopback/loop_wrapper.go b/vendor/github.com/containers/storage/pkg/loopback/loop_wrapper.go new file mode 100644 index 00000000000..a50de7f07a2 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/loopback/loop_wrapper.go @@ -0,0 +1,52 @@ +// +build linux,cgo + +package loopback + +/* +#include // FIXME: present only for defines, maybe we can remove it? + +#ifndef LOOP_CTL_GET_FREE + #define LOOP_CTL_GET_FREE 0x4C82 +#endif + +#ifndef LO_FLAGS_PARTSCAN + #define LO_FLAGS_PARTSCAN 8 +#endif + +*/ +import "C" + +type loopInfo64 struct { + loDevice uint64 /* ioctl r/o */ + loInode uint64 /* ioctl r/o */ + loRdevice uint64 /* ioctl r/o */ + loOffset uint64 + loSizelimit uint64 /* bytes, 0 == max available */ + loNumber uint32 /* ioctl r/o */ + loEncryptType uint32 + loEncryptKeySize uint32 /* ioctl w/o */ + loFlags uint32 /* ioctl r/o */ + loFileName [LoNameSize]uint8 + loCryptName [LoNameSize]uint8 + loEncryptKey [LoKeySize]uint8 /* ioctl w/o */ + loInit [2]uint64 +} + +// IOCTL consts +const ( + LoopSetFd = C.LOOP_SET_FD + LoopCtlGetFree = C.LOOP_CTL_GET_FREE + LoopGetStatus64 = C.LOOP_GET_STATUS64 + LoopSetStatus64 = C.LOOP_SET_STATUS64 + LoopClrFd = C.LOOP_CLR_FD + LoopSetCapacity = C.LOOP_SET_CAPACITY +) + +// LOOP consts. +const ( + LoFlagsAutoClear = C.LO_FLAGS_AUTOCLEAR + LoFlagsReadOnly = C.LO_FLAGS_READ_ONLY + LoFlagsPartScan = C.LO_FLAGS_PARTSCAN + LoKeySize = C.LO_KEY_SIZE + LoNameSize = C.LO_NAME_SIZE +) diff --git a/vendor/github.com/containers/storage/pkg/loopback/loopback.go b/vendor/github.com/containers/storage/pkg/loopback/loopback.go new file mode 100644 index 00000000000..c9be05776d1 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/loopback/loopback.go @@ -0,0 +1,63 @@ +// +build linux,cgo + +package loopback + +import ( + "fmt" + "os" + "syscall" + + "github.com/sirupsen/logrus" +) + +func getLoopbackBackingFile(file *os.File) (uint64, uint64, error) { + loopInfo, err := ioctlLoopGetStatus64(file.Fd()) + if err != nil { + logrus.Errorf("Get loopback backing file: %v", err) + return 0, 0, ErrGetLoopbackBackingFile + } + return loopInfo.loDevice, loopInfo.loInode, nil +} + +// SetCapacity reloads the size for the loopback device. +func SetCapacity(file *os.File) error { + if err := ioctlLoopSetCapacity(file.Fd(), 0); err != nil { + logrus.Errorf("loopbackSetCapacity: %s", err) + return ErrSetCapacity + } + return nil +} + +// FindLoopDeviceFor returns a loopback device file for the specified file which +// is backing file of a loop back device. +func FindLoopDeviceFor(file *os.File) *os.File { + stat, err := file.Stat() + if err != nil { + return nil + } + targetInode := stat.Sys().(*syscall.Stat_t).Ino + targetDevice := stat.Sys().(*syscall.Stat_t).Dev + + for i := 0; true; i++ { + path := fmt.Sprintf("/dev/loop%d", i) + + file, err := os.OpenFile(path, os.O_RDWR, 0) + if err != nil { + if os.IsNotExist(err) { + return nil + } + + // Ignore all errors until the first not-exist + // we want to continue looking for the file + continue + } + + dev, inode, err := getLoopbackBackingFile(file) + if err == nil && dev == uint64(targetDevice) && inode == targetInode { + return file + } + file.Close() + } + + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/loopback/loopback_unsupported.go b/vendor/github.com/containers/storage/pkg/loopback/loopback_unsupported.go new file mode 100644 index 00000000000..460b3070992 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/loopback/loopback_unsupported.go @@ -0,0 +1 @@ +package loopback diff --git a/vendor/github.com/containers/storage/pkg/mount/flags.go b/vendor/github.com/containers/storage/pkg/mount/flags.go new file mode 100644 index 00000000000..07a0f4847c1 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/flags.go @@ -0,0 +1,149 @@ +package mount + +import ( + "fmt" + "strings" +) + +var flags = map[string]struct { + clear bool + flag int +}{ + "defaults": {false, 0}, + "ro": {false, RDONLY}, + "rw": {true, RDONLY}, + "suid": {true, NOSUID}, + "nosuid": {false, NOSUID}, + "dev": {true, NODEV}, + "nodev": {false, NODEV}, + "exec": {true, NOEXEC}, + "noexec": {false, NOEXEC}, + "sync": {false, SYNCHRONOUS}, + "async": {true, SYNCHRONOUS}, + "dirsync": {false, DIRSYNC}, + "remount": {false, REMOUNT}, + "mand": {false, MANDLOCK}, + "nomand": {true, MANDLOCK}, + "atime": {true, NOATIME}, + "noatime": {false, NOATIME}, + "diratime": {true, NODIRATIME}, + "nodiratime": {false, NODIRATIME}, + "bind": {false, BIND}, + "rbind": {false, RBIND}, + "unbindable": {false, UNBINDABLE}, + "runbindable": {false, RUNBINDABLE}, + "private": {false, PRIVATE}, + "rprivate": {false, RPRIVATE}, + "shared": {false, SHARED}, + "rshared": {false, RSHARED}, + "slave": {false, SLAVE}, + "rslave": {false, RSLAVE}, + "relatime": {false, RELATIME}, + "norelatime": {true, RELATIME}, + "strictatime": {false, STRICTATIME}, + "nostrictatime": {true, STRICTATIME}, +} + +var validFlags = map[string]bool{ + "": true, + "size": true, + "mode": true, + "uid": true, + "gid": true, + "nr_inodes": true, + "nr_blocks": true, + "mpol": true, +} + +var propagationFlags = map[string]bool{ + "bind": true, + "rbind": true, + "unbindable": true, + "runbindable": true, + "private": true, + "rprivate": true, + "shared": true, + "rshared": true, + "slave": true, + "rslave": true, +} + +// MergeTmpfsOptions merge mount options to make sure there is no duplicate. +func MergeTmpfsOptions(options []string) ([]string, error) { + // We use collisions maps to remove duplicates. + // For flag, the key is the flag value (the key for propagation flag is -1) + // For data=value, the key is the data + flagCollisions := map[int]bool{} + dataCollisions := map[string]bool{} + + var newOptions []string + // We process in reverse order + for i := len(options) - 1; i >= 0; i-- { + option := options[i] + if option == "defaults" { + continue + } + if f, ok := flags[option]; ok && f.flag != 0 { + // There is only one propagation mode + key := f.flag + if propagationFlags[option] { + key = -1 + } + // Check to see if there is collision for flag + if !flagCollisions[key] { + // We prepend the option and add to collision map + newOptions = append([]string{option}, newOptions...) + flagCollisions[key] = true + } + continue + } + opt := strings.SplitN(option, "=", 2) + if len(opt) != 2 || !validFlags[opt[0]] { + return nil, fmt.Errorf("Invalid tmpfs option %q", opt) + } + if !dataCollisions[opt[0]] { + // We prepend the option and add to collision map + newOptions = append([]string{option}, newOptions...) + dataCollisions[opt[0]] = true + } + } + + return newOptions, nil +} + +// ParseOptions parses fstab type mount options into mount() flags +// and device specific data +func ParseOptions(options string) (int, string) { + var ( + flag int + data []string + ) + + for _, o := range strings.Split(options, ",") { + // If the option does not exist in the flags table or the flag + // is not supported on the platform, + // then it is a data value for a specific fs type + if f, exists := flags[o]; exists && f.flag != 0 { + if f.clear { + flag &= ^f.flag + } else { + flag |= f.flag + } + } else { + data = append(data, o) + } + } + return flag, strings.Join(data, ",") +} + +// ParseTmpfsOptions parse fstab type mount options into flags and data +func ParseTmpfsOptions(options string) (int, string, error) { + flags, data := ParseOptions(options) + for _, o := range strings.Split(data, ",") { + opt := strings.SplitN(o, "=", 2) + if !validFlags[opt[0]] { + return 0, "", fmt.Errorf("Invalid tmpfs option %q", opt) + } + } + return flags, data, nil +} diff --git a/vendor/github.com/containers/storage/pkg/mount/flags_linux.go b/vendor/github.com/containers/storage/pkg/mount/flags_linux.go new file mode 100644 index 00000000000..0425d0dd633 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/flags_linux.go @@ -0,0 +1,87 @@ +package mount + +import ( + "golang.org/x/sys/unix" +) + +const ( + // RDONLY will mount the file system read-only. + RDONLY = unix.MS_RDONLY + + // NOSUID will not allow set-user-identifier or set-group-identifier bits to + // take effect. + NOSUID = unix.MS_NOSUID + + // NODEV will not interpret character or block special devices on the file + // system. + NODEV = unix.MS_NODEV + + // NOEXEC will not allow execution of any binaries on the mounted file system. + NOEXEC = unix.MS_NOEXEC + + // SYNCHRONOUS will allow I/O to the file system to be done synchronously. + SYNCHRONOUS = unix.MS_SYNCHRONOUS + + // DIRSYNC will force all directory updates within the file system to be done + // synchronously. This affects the following system calls: create, link, + // unlink, symlink, mkdir, rmdir, mknod and rename. + DIRSYNC = unix.MS_DIRSYNC + + // REMOUNT will attempt to remount an already-mounted file system. This is + // commonly used to change the mount flags for a file system, especially to + // make a readonly file system writeable. It does not change device or mount + // point. + REMOUNT = unix.MS_REMOUNT + + // MANDLOCK will force mandatory locks on a filesystem. + MANDLOCK = unix.MS_MANDLOCK + + // NOATIME will not update the file access time when reading from a file. + NOATIME = unix.MS_NOATIME + + // NODIRATIME will not update the directory access time. + NODIRATIME = unix.MS_NODIRATIME + + // BIND remounts a subtree somewhere else. + BIND = unix.MS_BIND + + // RBIND remounts a subtree and all possible submounts somewhere else. + RBIND = unix.MS_BIND | unix.MS_REC + + // UNBINDABLE creates a mount which cannot be cloned through a bind operation. + UNBINDABLE = unix.MS_UNBINDABLE + + // RUNBINDABLE marks the entire mount tree as UNBINDABLE. + RUNBINDABLE = unix.MS_UNBINDABLE | unix.MS_REC + + // PRIVATE creates a mount which carries no propagation abilities. + PRIVATE = unix.MS_PRIVATE + + // RPRIVATE marks the entire mount tree as PRIVATE. + RPRIVATE = unix.MS_PRIVATE | unix.MS_REC + + // SLAVE creates a mount which receives propagation from its master, but not + // vice versa. + SLAVE = unix.MS_SLAVE + + // RSLAVE marks the entire mount tree as SLAVE. + RSLAVE = unix.MS_SLAVE | unix.MS_REC + + // SHARED creates a mount which provides the ability to create mirrors of + // that mount such that mounts and unmounts within any of the mirrors + // propagate to the other mirrors. + SHARED = unix.MS_SHARED + + // RSHARED marks the entire mount tree as SHARED. + RSHARED = unix.MS_SHARED | unix.MS_REC + + // RELATIME updates inode access times relative to modify or change time. + RELATIME = unix.MS_RELATIME + + // STRICTATIME allows to explicitly request full atime updates. This makes + // it possible for the kernel to default to relatime or noatime but still + // allow userspace to override it. + STRICTATIME = unix.MS_STRICTATIME + + mntDetach = unix.MNT_DETACH +) diff --git a/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go b/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go new file mode 100644 index 00000000000..9afd26d4c06 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go @@ -0,0 +1,31 @@ +// +build !linux + +package mount + +// These flags are unsupported. +const ( + BIND = 0 + DIRSYNC = 0 + MANDLOCK = 0 + NOATIME = 0 + NODEV = 0 + NODIRATIME = 0 + NOEXEC = 0 + NOSUID = 0 + UNBINDABLE = 0 + RUNBINDABLE = 0 + PRIVATE = 0 + RPRIVATE = 0 + SHARED = 0 + RSHARED = 0 + SLAVE = 0 + RSLAVE = 0 + RBIND = 0 + RELATIME = 0 + RELATIVE = 0 + REMOUNT = 0 + STRICTATIME = 0 + SYNCHRONOUS = 0 + RDONLY = 0 + mntDetach = 0 +) diff --git a/vendor/github.com/containers/storage/pkg/mount/mount.go b/vendor/github.com/containers/storage/pkg/mount/mount.go new file mode 100644 index 00000000000..23c5c44ac0d --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/mount.go @@ -0,0 +1,110 @@ +package mount + +import ( + "sort" + "strconv" + "strings" +) + +// mountError holds an error from a mount or unmount operation +type mountError struct { + op string + source, target string + flags uintptr + data string + err error +} + +// Error returns a string representation of mountError +func (e *mountError) Error() string { + out := e.op + " " + + if e.source != "" { + out += e.source + ":" + e.target + } else { + out += e.target + } + + if e.flags != uintptr(0) { + out += ", flags: 0x" + strconv.FormatUint(uint64(e.flags), 16) + } + if e.data != "" { + out += ", data: " + e.data + } + + out += ": " + e.err.Error() + return out +} + +// Cause returns the underlying cause of the error +func (e *mountError) Cause() error { + return e.err +} + +// Unwrap returns the underlying cause of the error +func (e *mountError) Unwrap() error { + return e.err +} + +// Mount will mount filesystem according to the specified configuration, on the +// condition that the target path is *not* already mounted. Options must be +// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See +// flags.go for supported option flags. +func Mount(device, target, mType, options string) error { + flag, data := ParseOptions(options) + if flag&REMOUNT != REMOUNT { + if mounted, err := Mounted(target); err != nil || mounted { + return err + } + } + return mount(device, target, mType, uintptr(flag), data) +} + +// ForceMount will mount a filesystem according to the specified configuration, +// *regardless* if the target path is not already mounted. Options must be +// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See +// flags.go for supported option flags. +func ForceMount(device, target, mType, options string) error { + flag, data := ParseOptions(options) + return mount(device, target, mType, uintptr(flag), data) +} + +// Unmount lazily unmounts a filesystem on supported platforms, otherwise +// does a normal unmount. +func Unmount(target string) error { + return unmount(target, mntDetach) +} + +// RecursiveUnmount unmounts the target and all mounts underneath, starting with +// the deepest mount first. +func RecursiveUnmount(target string) error { + mounts, err := GetMounts() + if err != nil { + return err + } + + // Make the deepest mount be first + sort.Slice(mounts, func(i, j int) bool { + return len(mounts[i].Mountpoint) > len(mounts[j].Mountpoint) + }) + + for i, m := range mounts { + if !strings.HasPrefix(m.Mountpoint, target) { + continue + } + if err := Unmount(m.Mountpoint); err != nil && i == len(mounts)-1 { + return err + // Ignore errors for submounts and continue trying to unmount others + // The final unmount should fail if there are any submounts remaining + } + } + return nil +} + +// ForceUnmount lazily unmounts a filesystem on supported platforms, +// otherwise does a normal unmount. +// +// Deprecated: please use Unmount instead, it is identical. +func ForceUnmount(target string) error { + return unmount(target, mntDetach) +} diff --git a/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go b/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go new file mode 100644 index 00000000000..b31cf99d0ff --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go @@ -0,0 +1,54 @@ +package mount + +/* +#include +#include +#include +#include +#include +#include +*/ +import "C" + +import ( + "fmt" + "strings" + "unsafe" +) + +func allocateIOVecs(options []string) []C.struct_iovec { + out := make([]C.struct_iovec, len(options)) + for i, option := range options { + out[i].iov_base = unsafe.Pointer(C.CString(option)) + out[i].iov_len = C.size_t(len(option) + 1) + } + return out +} + +func mount(device, target, mType string, flag uintptr, data string) error { + isNullFS := false + + xs := strings.Split(data, ",") + for _, x := range xs { + if x == "bind" { + isNullFS = true + } + } + + options := []string{"fspath", target} + if isNullFS { + options = append(options, "fstype", "nullfs", "target", device) + } else { + options = append(options, "fstype", mType, "from", device) + } + rawOptions := allocateIOVecs(options) + for _, rawOption := range rawOptions { + defer C.free(rawOption.iov_base) + } + + if errno := C.nmount(&rawOptions[0], C.uint(len(options)), C.int(flag)); errno != 0 { + reason := C.GoString(C.strerror(*C.__error())) + return fmt.Errorf("Failed to call nmount: %s", reason) + } + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/mount/mounter_linux.go b/vendor/github.com/containers/storage/pkg/mount/mounter_linux.go new file mode 100644 index 00000000000..594cd0881ac --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/mounter_linux.go @@ -0,0 +1,74 @@ +package mount + +import ( + "golang.org/x/sys/unix" +) + +const ( + // ptypes is the set propagation types. + ptypes = unix.MS_SHARED | unix.MS_PRIVATE | unix.MS_SLAVE | unix.MS_UNBINDABLE + + // pflags is the full set valid flags for a change propagation call. + pflags = ptypes | unix.MS_REC | unix.MS_SILENT + + // broflags is the combination of bind and read only + broflags = unix.MS_BIND | unix.MS_RDONLY + + none = "none" +) + +// isremount returns true if either device name or flags identify a remount request, false otherwise. +func isremount(device string, flags uintptr) bool { + switch { + // We treat device "" and "none" as a remount request to provide compatibility with + // requests that don't explicitly set MS_REMOUNT such as those manipulating bind mounts. + case flags&unix.MS_REMOUNT != 0, device == "", device == none: + return true + default: + return false + } +} + +func mount(device, target, mType string, flags uintptr, data string) error { + oflags := flags &^ ptypes + if !isremount(device, flags) || data != "" { + // Initial call applying all non-propagation flags for mount + // or remount with changed data + if err := unix.Mount(device, target, mType, oflags, data); err != nil { + return &mountError{ + op: "mount", + source: device, + target: target, + flags: oflags, + data: data, + err: err, + } + } + } + + if flags&ptypes != 0 { + // Change the propagation type. + if err := unix.Mount("", target, "", flags&pflags, ""); err != nil { + return &mountError{ + op: "remount", + target: target, + flags: flags & pflags, + err: err, + } + } + } + + if oflags&broflags == broflags { + // Remount the bind to apply read only. + if err := unix.Mount("", target, "", oflags|unix.MS_REMOUNT, ""); err != nil { + return &mountError{ + op: "remount-ro", + target: target, + flags: oflags | unix.MS_REMOUNT, + err: err, + } + } + } + + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/mount/mounter_unsupported.go b/vendor/github.com/containers/storage/pkg/mount/mounter_unsupported.go new file mode 100644 index 00000000000..9d20cfbf869 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/mounter_unsupported.go @@ -0,0 +1,7 @@ +// +build !linux,!freebsd + +package mount + +func mount(device, target, mType string, flag uintptr, data string) error { + panic("Not implemented") +} diff --git a/vendor/github.com/containers/storage/pkg/mount/mountinfo.go b/vendor/github.com/containers/storage/pkg/mount/mountinfo.go new file mode 100644 index 00000000000..bb2da474f41 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/mountinfo.go @@ -0,0 +1,13 @@ +package mount + +import ( + "github.com/moby/sys/mountinfo" +) + +type Info = mountinfo.Info + +var Mounted = mountinfo.Mounted + +func GetMounts() ([]*Info, error) { + return mountinfo.GetMounts(nil) +} diff --git a/vendor/github.com/containers/storage/pkg/mount/mountinfo_linux.go b/vendor/github.com/containers/storage/pkg/mount/mountinfo_linux.go new file mode 100644 index 00000000000..cbc0299fb5b --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/mountinfo_linux.go @@ -0,0 +1,5 @@ +package mount + +import "github.com/moby/sys/mountinfo" + +var PidMountInfo = mountinfo.PidMountInfo diff --git a/vendor/github.com/containers/storage/pkg/mount/sharedsubtree_linux.go b/vendor/github.com/containers/storage/pkg/mount/sharedsubtree_linux.go new file mode 100644 index 00000000000..80922ad5ca3 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/sharedsubtree_linux.go @@ -0,0 +1,64 @@ +package mount + +// MakeShared ensures a mounted filesystem has the SHARED mount option enabled. +// See the supported options in flags.go for further reference. +func MakeShared(mountPoint string) error { + return ensureMountedAs(mountPoint, SHARED) +} + +// MakeRShared ensures a mounted filesystem has the RSHARED mount option enabled. +// See the supported options in flags.go for further reference. +func MakeRShared(mountPoint string) error { + return ensureMountedAs(mountPoint, RSHARED) +} + +// MakePrivate ensures a mounted filesystem has the PRIVATE mount option enabled. +// See the supported options in flags.go for further reference. +func MakePrivate(mountPoint string) error { + return ensureMountedAs(mountPoint, PRIVATE) +} + +// MakeRPrivate ensures a mounted filesystem has the RPRIVATE mount option +// enabled. See the supported options in flags.go for further reference. +func MakeRPrivate(mountPoint string) error { + return ensureMountedAs(mountPoint, RPRIVATE) +} + +// MakeSlave ensures a mounted filesystem has the SLAVE mount option enabled. +// See the supported options in flags.go for further reference. +func MakeSlave(mountPoint string) error { + return ensureMountedAs(mountPoint, SLAVE) +} + +// MakeRSlave ensures a mounted filesystem has the RSLAVE mount option enabled. +// See the supported options in flags.go for further reference. +func MakeRSlave(mountPoint string) error { + return ensureMountedAs(mountPoint, RSLAVE) +} + +// MakeUnbindable ensures a mounted filesystem has the UNBINDABLE mount option +// enabled. See the supported options in flags.go for further reference. +func MakeUnbindable(mountPoint string) error { + return ensureMountedAs(mountPoint, UNBINDABLE) +} + +// MakeRUnbindable ensures a mounted filesystem has the RUNBINDABLE mount +// option enabled. See the supported options in flags.go for further reference. +func MakeRUnbindable(mountPoint string) error { + return ensureMountedAs(mountPoint, RUNBINDABLE) +} + +func ensureMountedAs(mnt string, flags int) error { + mounted, err := Mounted(mnt) + if err != nil { + return err + } + + if !mounted { + if err := mount(mnt, mnt, "none", uintptr(BIND), ""); err != nil { + return err + } + } + + return mount("", mnt, "none", uintptr(flags), "") +} diff --git a/vendor/github.com/containers/storage/pkg/mount/unmount_unix.go b/vendor/github.com/containers/storage/pkg/mount/unmount_unix.go new file mode 100644 index 00000000000..1d1afeee2eb --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/unmount_unix.go @@ -0,0 +1,22 @@ +// +build !windows + +package mount + +import "golang.org/x/sys/unix" + +func unmount(target string, flags int) error { + err := unix.Unmount(target, flags) + if err == nil || err == unix.EINVAL { + // Ignore "not mounted" error here. Note the same error + // can be returned if flags are invalid, so this code + // assumes that the flags value is always correct. + return nil + } + + return &mountError{ + op: "umount", + target: target, + flags: uintptr(flags), + err: err, + } +} diff --git a/vendor/github.com/containers/storage/pkg/mount/unmount_unsupported.go b/vendor/github.com/containers/storage/pkg/mount/unmount_unsupported.go new file mode 100644 index 00000000000..eebc4ab84e2 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/unmount_unsupported.go @@ -0,0 +1,7 @@ +// +build windows + +package mount + +func unmount(target string, flag int) error { + panic("Not implemented") +} diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel.go new file mode 100644 index 00000000000..7738fc7411e --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel.go @@ -0,0 +1,74 @@ +// +build !windows + +// Package kernel provides helper function to get, parse and compare kernel +// versions for different platforms. +package kernel + +import ( + "errors" + "fmt" +) + +// VersionInfo holds information about the kernel. +type VersionInfo struct { + Kernel int // Version of the kernel (e.g. 4.1.2-generic -> 4) + Major int // Major part of the kernel version (e.g. 4.1.2-generic -> 1) + Minor int // Minor part of the kernel version (e.g. 4.1.2-generic -> 2) + Flavor string // Flavor of the kernel version (e.g. 4.1.2-generic -> generic) +} + +func (k *VersionInfo) String() string { + return fmt.Sprintf("%d.%d.%d%s", k.Kernel, k.Major, k.Minor, k.Flavor) +} + +// CompareKernelVersion compares two kernel.VersionInfo structs. +// Returns -1 if a < b, 0 if a == b, 1 it a > b +func CompareKernelVersion(a, b VersionInfo) int { + if a.Kernel < b.Kernel { + return -1 + } else if a.Kernel > b.Kernel { + return 1 + } + + if a.Major < b.Major { + return -1 + } else if a.Major > b.Major { + return 1 + } + + if a.Minor < b.Minor { + return -1 + } else if a.Minor > b.Minor { + return 1 + } + + return 0 +} + +// ParseRelease parses a string and creates a VersionInfo based on it. +func ParseRelease(release string) (*VersionInfo, error) { + var ( + kernel, major, minor, parsed int + flavor, partial string + ) + + // Ignore error from Sscanf to allow an empty flavor. Instead, just + // make sure we got all the version numbers. + parsed, _ = fmt.Sscanf(release, "%d.%d%s", &kernel, &major, &partial) + if parsed < 2 { + return nil, errors.New("Can't parse kernel version " + release) + } + + // sometimes we have 3.12.25-gentoo, but sometimes we just have 3.12-1-amd64 + parsed, _ = fmt.Sscanf(partial, ".%d%s", &minor, &flavor) + if parsed < 1 { + flavor = partial + } + + return &VersionInfo{ + Kernel: kernel, + Major: major, + Minor: minor, + Flavor: flavor, + }, nil +} diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_darwin.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_darwin.go new file mode 100644 index 00000000000..71f205b2852 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_darwin.go @@ -0,0 +1,56 @@ +// +build darwin + +// Package kernel provides helper function to get, parse and compare kernel +// versions for different platforms. +package kernel + +import ( + "fmt" + "os/exec" + "strings" + + "github.com/mattn/go-shellwords" +) + +// GetKernelVersion gets the current kernel version. +func GetKernelVersion() (*VersionInfo, error) { + release, err := getRelease() + if err != nil { + return nil, err + } + + return ParseRelease(release) +} + +// getRelease uses `system_profiler SPSoftwareDataType` to get OSX kernel version +func getRelease() (string, error) { + cmd := exec.Command("system_profiler", "SPSoftwareDataType") + osName, err := cmd.Output() + if err != nil { + return "", err + } + + var release string + data := strings.Split(string(osName), "\n") + for _, line := range data { + if strings.Contains(line, "Kernel Version") { + // It has the format like ' Kernel Version: Darwin 14.5.0' + content := strings.SplitN(line, ":", 2) + if len(content) != 2 { + return "", fmt.Errorf("Kernel Version is invalid") + } + + prettyNames, err := shellwords.Parse(content[1]) + if err != nil { + return "", fmt.Errorf("Kernel Version is invalid: %s", err.Error()) + } + + if len(prettyNames) != 2 { + return "", fmt.Errorf("Kernel Version needs to be 'Darwin x.x.x' ") + } + release = prettyNames[1] + } + } + + return release, nil +} diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_unix.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_unix.go new file mode 100644 index 00000000000..7a68bc39bf4 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_unix.go @@ -0,0 +1,45 @@ +// +build linux freebsd solaris openbsd + +// Package kernel provides helper function to get, parse and compare kernel +// versions for different platforms. +package kernel + +import ( + "bytes" + + "github.com/sirupsen/logrus" +) + +// GetKernelVersion gets the current kernel version. +func GetKernelVersion() (*VersionInfo, error) { + uts, err := uname() + if err != nil { + return nil, err + } + + release := make([]byte, len(uts.Release)) + + i := 0 + for _, c := range uts.Release { + release[i] = byte(c) + i++ + } + + // Remove the \x00 from the release for Atoi to parse correctly + release = release[:bytes.IndexByte(release, 0)] + + return ParseRelease(string(release)) +} + +// CheckKernelVersion checks if current kernel is newer than (or equal to) +// the given version. +func CheckKernelVersion(k, major, minor int) bool { + if v, err := GetKernelVersion(); err != nil { + logrus.Warnf("Error getting kernel version: %s", err) + } else { + if CompareKernelVersion(*v, VersionInfo{Kernel: k, Major: major, Minor: minor}) < 0 { + return false + } + } + return true +} diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go new file mode 100644 index 00000000000..3d382923686 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go @@ -0,0 +1,70 @@ +// +build windows + +package kernel + +import ( + "fmt" + "unsafe" + + "golang.org/x/sys/windows" +) + +// VersionInfo holds information about the kernel. +type VersionInfo struct { + kvi string // Version of the kernel (e.g. 6.1.7601.17592 -> 6) + major int // Major part of the kernel version (e.g. 6.1.7601.17592 -> 1) + minor int // Minor part of the kernel version (e.g. 6.1.7601.17592 -> 7601) + build int // Build number of the kernel version (e.g. 6.1.7601.17592 -> 17592) +} + +func (k *VersionInfo) String() string { + return fmt.Sprintf("%d.%d %d (%s)", k.major, k.minor, k.build, k.kvi) +} + +// GetKernelVersion gets the current kernel version. +func GetKernelVersion() (*VersionInfo, error) { + + var ( + h windows.Handle + dwVersion uint32 + err error + ) + + KVI := &VersionInfo{"Unknown", 0, 0, 0} + + if err = windows.RegOpenKeyEx(windows.HKEY_LOCAL_MACHINE, + windows.StringToUTF16Ptr(`SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\`), + 0, + windows.KEY_READ, + &h); err != nil { + return KVI, err + } + defer windows.RegCloseKey(h) + + var buf [1 << 10]uint16 + var typ uint32 + n := uint32(len(buf) * 2) // api expects array of bytes, not uint16 + + if err = windows.RegQueryValueEx(h, + windows.StringToUTF16Ptr("BuildLabEx"), + nil, + &typ, + (*byte)(unsafe.Pointer(&buf[0])), + &n); err != nil { + return KVI, err + } + + KVI.kvi = windows.UTF16ToString(buf[:]) + + // Important - docker.exe MUST be manifested for this API to return + // the correct information. + if dwVersion, err = windows.GetVersion(); err != nil { + return KVI, err + } + + KVI.major = int(dwVersion & 0xFF) + KVI.minor = int((dwVersion & 0xFF00) >> 8) + KVI.build = int((dwVersion & 0xFFFF0000) >> 16) + + return KVI, nil +} diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_linux.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_linux.go new file mode 100644 index 00000000000..e913fad0013 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_linux.go @@ -0,0 +1,17 @@ +package kernel + +import "golang.org/x/sys/unix" + +// Utsname represents the system name structure. +// It is passthrough for unix.Utsname in order to make it portable with +// other platforms where it is not available. +type Utsname unix.Utsname + +func uname() (*unix.Utsname, error) { + uts := &unix.Utsname{} + + if err := unix.Uname(uts); err != nil { + return nil, err + } + return uts, nil +} diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_solaris.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_solaris.go new file mode 100644 index 00000000000..49370bd3dd9 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_solaris.go @@ -0,0 +1,14 @@ +package kernel + +import ( + "golang.org/x/sys/unix" +) + +func uname() (*unix.Utsname, error) { + uts := &unix.Utsname{} + + if err := unix.Uname(uts); err != nil { + return nil, err + } + return uts, nil +} diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported.go new file mode 100644 index 00000000000..1da3f239fac --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported.go @@ -0,0 +1,18 @@ +// +build !linux,!solaris + +package kernel + +import ( + "errors" +) + +// Utsname represents the system name structure. +// It is defined here to make it portable as it is available on linux but not +// on windows. +type Utsname struct { + Release [65]byte +} + +func uname() (*Utsname, error) { + return nil, errors.New("Kernel version detection is available only on linux") +} diff --git a/vendor/github.com/containers/storage/pkg/parsers/parsers.go b/vendor/github.com/containers/storage/pkg/parsers/parsers.go new file mode 100644 index 00000000000..acc897168f3 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/parsers/parsers.go @@ -0,0 +1,69 @@ +// Package parsers provides helper functions to parse and validate different type +// of string. It can be hosts, unix addresses, tcp addresses, filters, kernel +// operating system versions. +package parsers + +import ( + "fmt" + "strconv" + "strings" +) + +// ParseKeyValueOpt parses and validates the specified string as a key/value pair (key=value) +func ParseKeyValueOpt(opt string) (string, string, error) { + parts := strings.SplitN(opt, "=", 2) + if len(parts) != 2 { + return "", "", fmt.Errorf("Unable to parse key/value option: %s", opt) + } + return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]), nil +} + +// ParseUintList parses and validates the specified string as the value +// found in some cgroup file (e.g. `cpuset.cpus`, `cpuset.mems`), which could be +// one of the formats below. Note that duplicates are actually allowed in the +// input string. It returns a `map[int]bool` with available elements from `val` +// set to `true`. +// Supported formats: +// 7 +// 1-6 +// 0,3-4,7,8-10 +// 0-0,0,1-7 +// 03,1-3 <- this is gonna get parsed as [1,2,3] +// 3,2,1 +// 0-2,3,1 +func ParseUintList(val string) (map[int]bool, error) { + if val == "" { + return map[int]bool{}, nil + } + + availableInts := make(map[int]bool) + split := strings.Split(val, ",") + errInvalidFormat := fmt.Errorf("invalid format: %s", val) + + for _, r := range split { + if !strings.Contains(r, "-") { + v, err := strconv.Atoi(r) + if err != nil { + return nil, errInvalidFormat + } + availableInts[v] = true + } else { + split := strings.SplitN(r, "-", 2) + min, err := strconv.Atoi(split[0]) + if err != nil { + return nil, errInvalidFormat + } + max, err := strconv.Atoi(split[1]) + if err != nil { + return nil, errInvalidFormat + } + if max < min { + return nil, errInvalidFormat + } + for i := min; i <= max; i++ { + availableInts[i] = true + } + } + } + return availableInts, nil +} diff --git a/vendor/github.com/containers/storage/pkg/pools/pools.go b/vendor/github.com/containers/storage/pkg/pools/pools.go new file mode 100644 index 00000000000..a15e3688b9e --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/pools/pools.go @@ -0,0 +1,119 @@ +// Package pools provides a collection of pools which provide various +// data types with buffers. These can be used to lower the number of +// memory allocations and reuse buffers. +// +// New pools should be added to this package to allow them to be +// shared across packages. +// +// Utility functions which operate on pools should be added to this +// package to allow them to be reused. +package pools + +import ( + "bufio" + "io" + "sync" + + "github.com/containers/storage/pkg/ioutils" +) + +var ( + // BufioReader32KPool is a pool which returns bufio.Reader with a 32K buffer. + BufioReader32KPool *BufioReaderPool + // BufioWriter32KPool is a pool which returns bufio.Writer with a 32K buffer. + BufioWriter32KPool *BufioWriterPool +) + +const buffer32K = 32 * 1024 + +// BufioReaderPool is a bufio reader that uses sync.Pool. +type BufioReaderPool struct { + pool *sync.Pool +} + +func init() { + BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K) + BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K) +} + +// newBufioReaderPoolWithSize is unexported because new pools should be +// added here to be shared where required. +func newBufioReaderPoolWithSize(size int) *BufioReaderPool { + pool := &sync.Pool{ + New: func() interface{} { return bufio.NewReaderSize(nil, size) }, + } + return &BufioReaderPool{pool: pool} +} + +// Get returns a bufio.Reader which reads from r. The buffer size is that of the pool. +func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader { + buf := bufPool.pool.Get().(*bufio.Reader) + buf.Reset(r) + return buf +} + +// Put puts the bufio.Reader back into the pool. +func (bufPool *BufioReaderPool) Put(b *bufio.Reader) { + b.Reset(nil) + bufPool.pool.Put(b) +} + +// Copy is a convenience wrapper which uses a buffer to avoid allocation in io.Copy. +func Copy(dst io.Writer, src io.Reader) (written int64, err error) { + buf := BufioReader32KPool.Get(src) + written, err = io.Copy(dst, buf) + BufioReader32KPool.Put(buf) + return +} + +// NewReadCloserWrapper returns a wrapper which puts the bufio.Reader back +// into the pool and closes the reader if it's an io.ReadCloser. +func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser { + return ioutils.NewReadCloserWrapper(r, func() error { + if readCloser, ok := r.(io.ReadCloser); ok { + readCloser.Close() + } + bufPool.Put(buf) + return nil + }) +} + +// BufioWriterPool is a bufio writer that uses sync.Pool. +type BufioWriterPool struct { + pool *sync.Pool +} + +// newBufioWriterPoolWithSize is unexported because new pools should be +// added here to be shared where required. +func newBufioWriterPoolWithSize(size int) *BufioWriterPool { + pool := &sync.Pool{ + New: func() interface{} { return bufio.NewWriterSize(nil, size) }, + } + return &BufioWriterPool{pool: pool} +} + +// Get returns a bufio.Writer which writes to w. The buffer size is that of the pool. +func (bufPool *BufioWriterPool) Get(w io.Writer) *bufio.Writer { + buf := bufPool.pool.Get().(*bufio.Writer) + buf.Reset(w) + return buf +} + +// Put puts the bufio.Writer back into the pool. +func (bufPool *BufioWriterPool) Put(b *bufio.Writer) { + b.Reset(nil) + bufPool.pool.Put(b) +} + +// NewWriteCloserWrapper returns a wrapper which puts the bufio.Writer back +// into the pool and closes the writer if it's an io.Writecloser. +func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser { + return ioutils.NewWriteCloserWrapper(w, func() error { + buf.Flush() + if writeCloser, ok := w.(io.WriteCloser); ok { + writeCloser.Close() + } + bufPool.Put(buf) + return nil + }) +} diff --git a/vendor/github.com/containers/storage/pkg/promise/promise.go b/vendor/github.com/containers/storage/pkg/promise/promise.go new file mode 100644 index 00000000000..dd52b9082f7 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/promise/promise.go @@ -0,0 +1,11 @@ +package promise + +// Go is a basic promise implementation: it wraps calls a function in a goroutine, +// and returns a channel which will later return the function's return value. +func Go(f func() error) chan error { + ch := make(chan error, 1) + go func() { + ch <- f() + }() + return ch +} diff --git a/vendor/github.com/containers/storage/pkg/reexec/README.md b/vendor/github.com/containers/storage/pkg/reexec/README.md new file mode 100644 index 00000000000..6658f69b69d --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/reexec/README.md @@ -0,0 +1,5 @@ +# reexec + +The `reexec` package facilitates the busybox style reexec of the docker binary that we require because +of the forking limitations of using Go. Handlers can be registered with a name and the argv 0 of +the exec of the binary will be used to find and execute custom init paths. diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_linux.go b/vendor/github.com/containers/storage/pkg/reexec/command_linux.go new file mode 100644 index 00000000000..d3dd86d349f --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/reexec/command_linux.go @@ -0,0 +1,34 @@ +// +build linux + +package reexec + +import ( + "context" + "os/exec" +) + +// Self returns the path to the current process's binary. +// Returns "/proc/self/exe". +func Self() string { + return "/proc/self/exe" +} + +// Command returns *exec.Cmd which has Path as current binary. +// This will use the in-memory version (/proc/self/exe) of the current binary, +// it is thus safe to delete or replace the on-disk binary (os.Args[0]). +func Command(args ...string) *exec.Cmd { + panicIfNotInitialized() + cmd := exec.Command(Self()) + cmd.Args = args + return cmd +} + +// CommandContext returns *exec.Cmd which has Path as current binary. +// This will use the in-memory version (/proc/self/exe) of the current binary, +// it is thus safe to delete or replace the on-disk binary (os.Args[0]). +func CommandContext(ctx context.Context, args ...string) *exec.Cmd { + panicIfNotInitialized() + cmd := exec.CommandContext(ctx, Self()) + cmd.Args = args + return cmd +} diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_unix.go b/vendor/github.com/containers/storage/pkg/reexec/command_unix.go new file mode 100644 index 00000000000..9dd8cb9bbee --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/reexec/command_unix.go @@ -0,0 +1,32 @@ +// +build freebsd solaris darwin + +package reexec + +import ( + "context" + "os/exec" +) + +// Self returns the path to the current process's binary. +// Uses os.Args[0]. +func Self() string { + return naiveSelf() +} + +// Command returns *exec.Cmd which has Path as current binary. +// For example if current binary is "docker" at "/usr/bin/", then cmd.Path will +// be set to "/usr/bin/docker". +func Command(args ...string) *exec.Cmd { + panicIfNotInitialized() + cmd := exec.Command(Self()) + cmd.Args = args + return cmd +} + +// CommandContext returns *exec.Cmd which has Path as current binary. +func CommandContext(ctx context.Context, args ...string) *exec.Cmd { + panicIfNotInitialized() + cmd := exec.CommandContext(ctx, Self()) + cmd.Args = args + return cmd +} diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go b/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go new file mode 100644 index 00000000000..5b3605f319c --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go @@ -0,0 +1,20 @@ +// +build !linux,!windows,!freebsd,!solaris,!darwin + +package reexec + +import ( + "context" + "os/exec" +) + +// Command is unsupported on operating systems apart from Linux, Windows, Solaris and Darwin. +func Command(args ...string) *exec.Cmd { + panicIfNotInitialized() + return nil +} + +// CommandContext is unsupported on operating systems apart from Linux, Windows, Solaris and Darwin. +func CommandContext(ctx context.Context, args ...string) *exec.Cmd { + panicIfNotInitialized() + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_windows.go b/vendor/github.com/containers/storage/pkg/reexec/command_windows.go new file mode 100644 index 00000000000..d868564767f --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/reexec/command_windows.go @@ -0,0 +1,34 @@ +// +build windows + +package reexec + +import ( + "context" + "os/exec" +) + +// Self returns the path to the current process's binary. +// Uses os.Args[0]. +func Self() string { + return naiveSelf() +} + +// Command returns *exec.Cmd which has Path as current binary. +// For example if current binary is "docker.exe" at "C:\", then cmd.Path will +// be set to "C:\docker.exe". +func Command(args ...string) *exec.Cmd { + panicIfNotInitialized() + cmd := exec.Command(Self()) + cmd.Args = args + return cmd +} + +// Command returns *exec.Cmd which has Path as current binary. +// For example if current binary is "docker.exe" at "C:\", then cmd.Path will +// be set to "C:\docker.exe". +func CommandContext(ctx context.Context, args ...string) *exec.Cmd { + panicIfNotInitialized() + cmd := exec.CommandContext(ctx, Self()) + cmd.Args = args + return cmd +} diff --git a/vendor/github.com/containers/storage/pkg/reexec/reexec.go b/vendor/github.com/containers/storage/pkg/reexec/reexec.go new file mode 100644 index 00000000000..a1938cd4f34 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/reexec/reexec.go @@ -0,0 +1,66 @@ +package reexec + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" +) + +var ( + registeredInitializers = make(map[string]func()) + initWasCalled = false +) + +// Register adds an initialization func under the specified name +func Register(name string, initializer func()) { + if _, exists := registeredInitializers[name]; exists { + panic(fmt.Sprintf("reexec func already registered under name %q", name)) + } + + registeredInitializers[name] = initializer +} + +// Init is called as the first part of the exec process and returns true if an +// initialization function was called. +func Init() bool { + initializer, exists := registeredInitializers[os.Args[0]] + initWasCalled = true + if exists { + initializer() + + return true + } + return false +} + +func panicIfNotInitialized() { + if !initWasCalled { + // The reexec package is used to run subroutines in + // subprocesses which would otherwise have unacceptable side + // effects on the main thread. If you found this error, then + // your program uses a package which needs to do this. In + // order for that to work, main() should start with this + // boilerplate, or an equivalent: + // if reexec.Init() { + // return + // } + panic("a library subroutine needed to run a subprocess, but reexec.Init() was not called in main()") + } +} + +func naiveSelf() string { + name := os.Args[0] + if filepath.Base(name) == name { + if lp, err := exec.LookPath(name); err == nil { + return lp + } + } + // handle conversion of relative paths to absolute + if absName, err := filepath.Abs(name); err == nil { + return absName + } + // if we couldn't get absolute name, return original + // (NOTE: Go only errors on Abs() if os.Getwd fails) + return name +} diff --git a/vendor/github.com/containers/storage/pkg/stringid/README.md b/vendor/github.com/containers/storage/pkg/stringid/README.md new file mode 100644 index 00000000000..37a5098fd98 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/stringid/README.md @@ -0,0 +1 @@ +This package provides helper functions for dealing with string identifiers diff --git a/vendor/github.com/containers/storage/pkg/stringid/stringid.go b/vendor/github.com/containers/storage/pkg/stringid/stringid.go new file mode 100644 index 00000000000..a0c7c42a050 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/stringid/stringid.go @@ -0,0 +1,99 @@ +// Package stringid provides helper functions for dealing with string identifiers +package stringid + +import ( + cryptorand "crypto/rand" + "encoding/hex" + "fmt" + "io" + "math" + "math/big" + "math/rand" + "regexp" + "strconv" + "strings" + "time" +) + +const shortLen = 12 + +var ( + validShortID = regexp.MustCompile("^[a-f0-9]{12}$") + validHex = regexp.MustCompile(`^[a-f0-9]{64}$`) +) + +// IsShortID determines if an arbitrary string *looks like* a short ID. +func IsShortID(id string) bool { + return validShortID.MatchString(id) +} + +// TruncateID returns a shorthand version of a string identifier for convenience. +// A collision with other shorthands is very unlikely, but possible. +// In case of a collision a lookup with TruncIndex.Get() will fail, and the caller +// will need to use a longer prefix, or the full-length Id. +func TruncateID(id string) string { + if i := strings.IndexRune(id, ':'); i >= 0 { + id = id[i+1:] + } + if len(id) > shortLen { + id = id[:shortLen] + } + return id +} + +func generateID(r io.Reader) string { + b := make([]byte, 32) + for { + if _, err := io.ReadFull(r, b); err != nil { + panic(err) // This shouldn't happen + } + id := hex.EncodeToString(b) + // if we try to parse the truncated for as an int and we don't have + // an error then the value is all numeric and causes issues when + // used as a hostname. ref #3869 + if _, err := strconv.ParseInt(TruncateID(id), 10, 64); err == nil { + continue + } + return id + } +} + +// GenerateRandomID returns a unique id. +func GenerateRandomID() string { + return generateID(cryptorand.Reader) +} + +// GenerateNonCryptoID generates unique id without using cryptographically +// secure sources of random. +// It helps you to save entropy. +func GenerateNonCryptoID() string { + return generateID(readerFunc(rand.Read)) +} + +// ValidateID checks whether an ID string is a valid image ID. +func ValidateID(id string) error { + if ok := validHex.MatchString(id); !ok { + return fmt.Errorf("image ID %q is invalid", id) + } + return nil +} + +func init() { + // safely set the seed globally so we generate random ids. Tries to use a + // crypto seed before falling back to time. + var seed int64 + if cryptoseed, err := cryptorand.Int(cryptorand.Reader, big.NewInt(math.MaxInt64)); err != nil { + // This should not happen, but worst-case fallback to time-based seed. + seed = time.Now().UnixNano() + } else { + seed = cryptoseed.Int64() + } + + rand.Seed(seed) +} + +type readerFunc func(p []byte) (int, error) + +func (fn readerFunc) Read(p []byte) (int, error) { + return fn(p) +} diff --git a/vendor/github.com/containers/storage/pkg/stringutils/README.md b/vendor/github.com/containers/storage/pkg/stringutils/README.md new file mode 100644 index 00000000000..b3e454573c3 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/stringutils/README.md @@ -0,0 +1 @@ +This package provides helper functions for dealing with strings diff --git a/vendor/github.com/containers/storage/pkg/stringutils/stringutils.go b/vendor/github.com/containers/storage/pkg/stringutils/stringutils.go new file mode 100644 index 00000000000..66a59c85d56 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/stringutils/stringutils.go @@ -0,0 +1,110 @@ +// Package stringutils provides helper functions for dealing with strings. +package stringutils + +import ( + "bytes" + "math/rand" + "strings" +) + +// GenerateRandomAlphaOnlyString generates an alphabetical random string with length n. +func GenerateRandomAlphaOnlyString(n int) string { + // make a really long string + letters := []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") + b := make([]byte, n) + for i := range b { + b[i] = letters[rand.Intn(len(letters))] + } + return string(b) +} + +// GenerateRandomASCIIString generates an ASCII random string with length n. +func GenerateRandomASCIIString(n int) string { + chars := "abcdefghijklmnopqrstuvwxyz" + + "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + + "~!@#$%^&*()-_+={}[]\\|<,>.?/\"';:` " + res := make([]byte, n) + for i := 0; i < n; i++ { + res[i] = chars[rand.Intn(len(chars))] + } + return string(res) +} + +// Ellipsis truncates a string to fit within maxlen, and appends ellipsis (...). +// For maxlen of 3 and lower, no ellipsis is appended. +func Ellipsis(s string, maxlen int) string { + r := []rune(s) + if len(r) <= maxlen { + return s + } + if maxlen <= 3 { + return string(r[:maxlen]) + } + return string(r[:maxlen-3]) + "..." +} + +// Truncate truncates a string to maxlen. +func Truncate(s string, maxlen int) string { + r := []rune(s) + if len(r) <= maxlen { + return s + } + return string(r[:maxlen]) +} + +// InSlice tests whether a string is contained in a slice of strings or not. +// Comparison is case insensitive +func InSlice(slice []string, s string) bool { + for _, ss := range slice { + if strings.EqualFold(s, ss) { + return true + } + } + return false +} + +// RemoveFromSlice removes a string from a slice. The string can be present +// multiple times. The entire slice is iterated. +func RemoveFromSlice(slice []string, s string) (ret []string) { + for _, ss := range slice { + if !strings.EqualFold(s, ss) { + ret = append(ret, ss) + } + } + return ret +} + +func quote(word string, buf *bytes.Buffer) { + // Bail out early for "simple" strings + if word != "" && !strings.ContainsAny(word, "\\'\"`${[|&;<>()~*?! \t\n") { + buf.WriteString(word) + return + } + + buf.WriteString("'") + + for i := 0; i < len(word); i++ { + b := word[i] + if b == '\'' { + // Replace literal ' with a close ', a \', and an open ' + buf.WriteString("'\\''") + } else { + buf.WriteByte(b) + } + } + + buf.WriteString("'") +} + +// ShellQuoteArguments takes a list of strings and escapes them so they will be +// handled right when passed as arguments to a program via a shell +func ShellQuoteArguments(args []string) string { + var buf bytes.Buffer + for i, arg := range args { + if i != 0 { + buf.WriteByte(' ') + } + quote(arg, &buf) + } + return buf.String() +} diff --git a/vendor/github.com/containers/storage/pkg/system/chmod.go b/vendor/github.com/containers/storage/pkg/system/chmod.go new file mode 100644 index 00000000000..a01d8abfbd5 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/chmod.go @@ -0,0 +1,17 @@ +package system + +import ( + "errors" + "os" + "syscall" +) + +func Chmod(name string, mode os.FileMode) error { + err := os.Chmod(name, mode) + + for err != nil && errors.Is(err, syscall.EINTR) { + err = os.Chmod(name, mode) + } + + return err +} diff --git a/vendor/github.com/containers/storage/pkg/system/chtimes.go b/vendor/github.com/containers/storage/pkg/system/chtimes.go new file mode 100644 index 00000000000..056d19954d6 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/chtimes.go @@ -0,0 +1,35 @@ +package system + +import ( + "os" + "time" +) + +// Chtimes changes the access time and modified time of a file at the given path +func Chtimes(name string, atime time.Time, mtime time.Time) error { + unixMinTime := time.Unix(0, 0) + unixMaxTime := maxTime + + // If the modified time is prior to the Unix Epoch, or after the + // end of Unix Time, os.Chtimes has undefined behavior + // default to Unix Epoch in this case, just in case + + if atime.Before(unixMinTime) || atime.After(unixMaxTime) { + atime = unixMinTime + } + + if mtime.Before(unixMinTime) || mtime.After(unixMaxTime) { + mtime = unixMinTime + } + + if err := os.Chtimes(name, atime, mtime); err != nil { + return err + } + + // Take platform specific action for setting create time. + if err := setCTime(name, mtime); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/chtimes_unix.go b/vendor/github.com/containers/storage/pkg/system/chtimes_unix.go new file mode 100644 index 00000000000..09d58bcbfdd --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/chtimes_unix.go @@ -0,0 +1,14 @@ +// +build !windows + +package system + +import ( + "time" +) + +//setCTime will set the create time on a file. On Unix, the create +//time is updated as a side effect of setting the modified time, so +//no action is required. +func setCTime(path string, ctime time.Time) error { + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/chtimes_windows.go b/vendor/github.com/containers/storage/pkg/system/chtimes_windows.go new file mode 100644 index 00000000000..45428c141ca --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/chtimes_windows.go @@ -0,0 +1,28 @@ +// +build windows + +package system + +import ( + "time" + + "golang.org/x/sys/windows" +) + +//setCTime will set the create time on a file. On Windows, this requires +//calling SetFileTime and explicitly including the create time. +func setCTime(path string, ctime time.Time) error { + ctimespec := windows.NsecToTimespec(ctime.UnixNano()) + pathp, e := windows.UTF16PtrFromString(path) + if e != nil { + return e + } + h, e := windows.CreateFile(pathp, + windows.FILE_WRITE_ATTRIBUTES, windows.FILE_SHARE_WRITE, nil, + windows.OPEN_EXISTING, windows.FILE_FLAG_BACKUP_SEMANTICS, 0) + if e != nil { + return e + } + defer windows.Close(h) + c := windows.NsecToFiletime(windows.TimespecToNsec(ctimespec)) + return windows.SetFileTime(h, &c, nil, nil) +} diff --git a/vendor/github.com/containers/storage/pkg/system/errors.go b/vendor/github.com/containers/storage/pkg/system/errors.go new file mode 100644 index 00000000000..288318985e3 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/errors.go @@ -0,0 +1,10 @@ +package system + +import ( + "errors" +) + +var ( + // ErrNotSupportedPlatform means the platform is not supported. + ErrNotSupportedPlatform = errors.New("platform and architecture is not supported") +) diff --git a/vendor/github.com/containers/storage/pkg/system/exitcode.go b/vendor/github.com/containers/storage/pkg/system/exitcode.go new file mode 100644 index 00000000000..60f0514b1dd --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/exitcode.go @@ -0,0 +1,33 @@ +package system + +import ( + "fmt" + "os/exec" + "syscall" +) + +// GetExitCode returns the ExitStatus of the specified error if its type is +// exec.ExitError, returns 0 and an error otherwise. +func GetExitCode(err error) (int, error) { + exitCode := 0 + if exiterr, ok := err.(*exec.ExitError); ok { + if procExit, ok := exiterr.Sys().(syscall.WaitStatus); ok { + return procExit.ExitStatus(), nil + } + } + return exitCode, fmt.Errorf("failed to get exit code") +} + +// ProcessExitCode process the specified error and returns the exit status code +// if the error was of type exec.ExitError, returns nothing otherwise. +func ProcessExitCode(err error) (exitCode int) { + if err != nil { + var exiterr error + if exitCode, exiterr = GetExitCode(err); exiterr != nil { + // TODO: Fix this so we check the error's text. + // we've failed to retrieve exit code, so we set it to 127 + exitCode = 127 + } + } + return +} diff --git a/vendor/github.com/containers/storage/pkg/system/init.go b/vendor/github.com/containers/storage/pkg/system/init.go new file mode 100644 index 00000000000..17935088ded --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/init.go @@ -0,0 +1,22 @@ +package system + +import ( + "syscall" + "time" + "unsafe" +) + +// Used by chtimes +var maxTime time.Time + +func init() { + // chtimes initialization + if unsafe.Sizeof(syscall.Timespec{}.Nsec) == 8 { + // This is a 64 bit timespec + // os.Chtimes limits time to the following + maxTime = time.Unix(0, 1<<63-1) + } else { + // This is a 32 bit timespec + maxTime = time.Unix(1<<31-1, 0) + } +} diff --git a/vendor/github.com/containers/storage/pkg/system/init_windows.go b/vendor/github.com/containers/storage/pkg/system/init_windows.go new file mode 100644 index 00000000000..019c66441ce --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/init_windows.go @@ -0,0 +1,17 @@ +package system + +import "os" + +// LCOWSupported determines if Linux Containers on Windows are supported. +// Note: This feature is in development (06/17) and enabled through an +// environment variable. At a future time, it will be enabled based +// on build number. @jhowardmsft +var lcowSupported = false + +func init() { + // LCOW initialization + if os.Getenv("LCOW_SUPPORTED") != "" { + lcowSupported = true + } + +} diff --git a/vendor/github.com/containers/storage/pkg/system/lchown.go b/vendor/github.com/containers/storage/pkg/system/lchown.go new file mode 100644 index 00000000000..eb2d8b464c4 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/lchown.go @@ -0,0 +1,20 @@ +package system + +import ( + "os" + "syscall" +) + +func Lchown(name string, uid, gid int) error { + err := syscall.Lchown(name, uid, gid) + + for err == syscall.EINTR { + err = syscall.Lchown(name, uid, gid) + } + + if err != nil { + return &os.PathError{Op: "lchown", Path: name, Err: err} + } + + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/lcow_unix.go b/vendor/github.com/containers/storage/pkg/system/lcow_unix.go new file mode 100644 index 00000000000..cff33bb4085 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/lcow_unix.go @@ -0,0 +1,8 @@ +// +build !windows + +package system + +// LCOWSupported returns true if Linux containers on Windows are supported. +func LCOWSupported() bool { + return false +} diff --git a/vendor/github.com/containers/storage/pkg/system/lcow_windows.go b/vendor/github.com/containers/storage/pkg/system/lcow_windows.go new file mode 100644 index 00000000000..e54d01e696b --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/lcow_windows.go @@ -0,0 +1,6 @@ +package system + +// LCOWSupported returns true if Linux containers on Windows are supported. +func LCOWSupported() bool { + return lcowSupported +} diff --git a/vendor/github.com/containers/storage/pkg/system/lstat_unix.go b/vendor/github.com/containers/storage/pkg/system/lstat_unix.go new file mode 100644 index 00000000000..e9d301f090c --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/lstat_unix.go @@ -0,0 +1,20 @@ +// +build !windows + +package system + +import ( + "os" + "syscall" +) + +// Lstat takes a path to a file and returns +// a system.StatT type pertaining to that file. +// +// Throws an error if the file does not exist +func Lstat(path string) (*StatT, error) { + s := &syscall.Stat_t{} + if err := syscall.Lstat(path, s); err != nil { + return nil, &os.PathError{"Lstat", path, err} + } + return fromStatT(s) +} diff --git a/vendor/github.com/containers/storage/pkg/system/lstat_windows.go b/vendor/github.com/containers/storage/pkg/system/lstat_windows.go new file mode 100644 index 00000000000..e51df0dafeb --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/lstat_windows.go @@ -0,0 +1,14 @@ +package system + +import "os" + +// Lstat calls os.Lstat to get a fileinfo interface back. +// This is then copied into our own locally defined structure. +func Lstat(path string) (*StatT, error) { + fi, err := os.Lstat(path) + if err != nil { + return nil, err + } + + return fromStatT(&fi) +} diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo.go b/vendor/github.com/containers/storage/pkg/system/meminfo.go new file mode 100644 index 00000000000..3b6e947e675 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/meminfo.go @@ -0,0 +1,17 @@ +package system + +// MemInfo contains memory statistics of the host system. +type MemInfo struct { + // Total usable RAM (i.e. physical RAM minus a few reserved bits and the + // kernel binary code). + MemTotal int64 + + // Amount of free memory. + MemFree int64 + + // Total amount of swap space available. + SwapTotal int64 + + // Amount of swap space that is currently unused. + SwapFree int64 +} diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo_linux.go b/vendor/github.com/containers/storage/pkg/system/meminfo_linux.go new file mode 100644 index 00000000000..385f1d5e735 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/meminfo_linux.go @@ -0,0 +1,65 @@ +package system + +import ( + "bufio" + "io" + "os" + "strconv" + "strings" + + "github.com/docker/go-units" +) + +// ReadMemInfo retrieves memory statistics of the host system and returns a +// MemInfo type. +func ReadMemInfo() (*MemInfo, error) { + file, err := os.Open("/proc/meminfo") + if err != nil { + return nil, err + } + defer file.Close() + return parseMemInfo(file) +} + +// parseMemInfo parses the /proc/meminfo file into +// a MemInfo object given an io.Reader to the file. +// Throws error if there are problems reading from the file +func parseMemInfo(reader io.Reader) (*MemInfo, error) { + meminfo := &MemInfo{} + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + // Expected format: ["MemTotal:", "1234", "kB"] + parts := strings.Fields(scanner.Text()) + + // Sanity checks: Skip malformed entries. + if len(parts) < 3 || parts[2] != "kB" { + continue + } + + // Convert to bytes. + size, err := strconv.Atoi(parts[1]) + if err != nil { + continue + } + bytes := int64(size) * units.KiB + + switch parts[0] { + case "MemTotal:": + meminfo.MemTotal = bytes + case "MemFree:": + meminfo.MemFree = bytes + case "SwapTotal:": + meminfo.SwapTotal = bytes + case "SwapFree:": + meminfo.SwapFree = bytes + } + + } + + // Handle errors that may have occurred during the reading of the file. + if err := scanner.Err(); err != nil { + return nil, err + } + + return meminfo, nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go b/vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go new file mode 100644 index 00000000000..925776e789b --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go @@ -0,0 +1,129 @@ +// +build solaris,cgo + +package system + +import ( + "fmt" + "unsafe" +) + +// #cgo CFLAGS: -std=c99 +// #cgo LDFLAGS: -lkstat +// #include +// #include +// #include +// #include +// #include +// #include +// struct swaptable *allocSwaptable(int num) { +// struct swaptable *st; +// struct swapent *swapent; +// st = (struct swaptable *)malloc(num * sizeof(swapent_t) + sizeof (int)); +// swapent = st->swt_ent; +// for (int i = 0; i < num; i++,swapent++) { +// swapent->ste_path = (char *)malloc(MAXPATHLEN * sizeof (char)); +// } +// st->swt_n = num; +// return st; +//} +// void freeSwaptable (struct swaptable *st) { +// struct swapent *swapent = st->swt_ent; +// for (int i = 0; i < st->swt_n; i++,swapent++) { +// free(swapent->ste_path); +// } +// free(st); +// } +// swapent_t getSwapEnt(swapent_t *ent, int i) { +// return ent[i]; +// } +// int64_t getPpKernel() { +// int64_t pp_kernel = 0; +// kstat_ctl_t *ksc; +// kstat_t *ks; +// kstat_named_t *knp; +// kid_t kid; +// +// if ((ksc = kstat_open()) == NULL) { +// return -1; +// } +// if ((ks = kstat_lookup(ksc, "unix", 0, "system_pages")) == NULL) { +// return -1; +// } +// if (((kid = kstat_read(ksc, ks, NULL)) == -1) || +// ((knp = kstat_data_lookup(ks, "pp_kernel")) == NULL)) { +// return -1; +// } +// switch (knp->data_type) { +// case KSTAT_DATA_UINT64: +// pp_kernel = knp->value.ui64; +// break; +// case KSTAT_DATA_UINT32: +// pp_kernel = knp->value.ui32; +// break; +// } +// pp_kernel *= sysconf(_SC_PAGESIZE); +// return (pp_kernel > 0 ? pp_kernel : -1); +// } +import "C" + +// Get the system memory info using sysconf same as prtconf +func getTotalMem() int64 { + pagesize := C.sysconf(C._SC_PAGESIZE) + npages := C.sysconf(C._SC_PHYS_PAGES) + return int64(pagesize * npages) +} + +func getFreeMem() int64 { + pagesize := C.sysconf(C._SC_PAGESIZE) + npages := C.sysconf(C._SC_AVPHYS_PAGES) + return int64(pagesize * npages) +} + +// ReadMemInfo retrieves memory statistics of the host system and returns a +// MemInfo type. +func ReadMemInfo() (*MemInfo, error) { + + ppKernel := C.getPpKernel() + MemTotal := getTotalMem() + MemFree := getFreeMem() + SwapTotal, SwapFree, err := getSysSwap() + + if ppKernel < 0 || MemTotal < 0 || MemFree < 0 || SwapTotal < 0 || + SwapFree < 0 { + return nil, fmt.Errorf("error getting system memory info %v\n", err) + } + + meminfo := &MemInfo{} + // Total memory is total physical memory less than memory locked by kernel + meminfo.MemTotal = MemTotal - int64(ppKernel) + meminfo.MemFree = MemFree + meminfo.SwapTotal = SwapTotal + meminfo.SwapFree = SwapFree + + return meminfo, nil +} + +func getSysSwap() (int64, int64, error) { + var tSwap int64 + var fSwap int64 + var diskblksPerPage int64 + num, err := C.swapctl(C.SC_GETNSWP, nil) + if err != nil { + return -1, -1, err + } + st := C.allocSwaptable(num) + _, err = C.swapctl(C.SC_LIST, unsafe.Pointer(st)) + if err != nil { + C.freeSwaptable(st) + return -1, -1, err + } + + diskblksPerPage = int64(C.sysconf(C._SC_PAGESIZE) >> C.DEV_BSHIFT) + for i := 0; i < int(num); i++ { + swapent := C.getSwapEnt(&st.swt_ent[0], C.int(i)) + tSwap += int64(swapent.ste_pages) * diskblksPerPage + fSwap += int64(swapent.ste_free) * diskblksPerPage + } + C.freeSwaptable(st) + return tSwap, fSwap, nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo_unsupported.go b/vendor/github.com/containers/storage/pkg/system/meminfo_unsupported.go new file mode 100644 index 00000000000..3ce019dffdd --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/meminfo_unsupported.go @@ -0,0 +1,8 @@ +// +build !linux,!windows,!solaris + +package system + +// ReadMemInfo is not supported on platforms other than linux and windows. +func ReadMemInfo() (*MemInfo, error) { + return nil, ErrNotSupportedPlatform +} diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo_windows.go b/vendor/github.com/containers/storage/pkg/system/meminfo_windows.go new file mode 100644 index 00000000000..883944a4c53 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/meminfo_windows.go @@ -0,0 +1,45 @@ +package system + +import ( + "unsafe" + + "golang.org/x/sys/windows" +) + +var ( + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + + procGlobalMemoryStatusEx = modkernel32.NewProc("GlobalMemoryStatusEx") +) + +// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366589(v=vs.85).aspx +// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366770(v=vs.85).aspx +type memorystatusex struct { + dwLength uint32 + dwMemoryLoad uint32 + ullTotalPhys uint64 + ullAvailPhys uint64 + ullTotalPageFile uint64 + ullAvailPageFile uint64 + ullTotalVirtual uint64 + ullAvailVirtual uint64 + ullAvailExtendedVirtual uint64 +} + +// ReadMemInfo retrieves memory statistics of the host system and returns a +// MemInfo type. +func ReadMemInfo() (*MemInfo, error) { + msi := &memorystatusex{ + dwLength: 64, + } + r1, _, _ := procGlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(msi))) + if r1 == 0 { + return &MemInfo{}, nil + } + return &MemInfo{ + MemTotal: int64(msi.ullTotalPhys), + MemFree: int64(msi.ullAvailPhys), + SwapTotal: int64(msi.ullTotalPageFile), + SwapFree: int64(msi.ullAvailPageFile), + }, nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/mknod.go b/vendor/github.com/containers/storage/pkg/system/mknod.go new file mode 100644 index 00000000000..c276ce8e80d --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/mknod.go @@ -0,0 +1,22 @@ +// +build !windows,!freebsd + +package system + +import ( + "golang.org/x/sys/unix" +) + +// Mknod creates a filesystem node (file, device special file or named pipe) named path +// with attributes specified by mode and dev. +func Mknod(path string, mode uint32, dev int) error { + return unix.Mknod(path, mode, dev) +} + +// Mkdev is used to build the value of linux devices (in /dev/) which specifies major +// and minor number of the newly created device special file. +// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes. +// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major, +// then the top 12 bits of the minor. +func Mkdev(major int64, minor int64) uint32 { + return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff)) +} diff --git a/vendor/github.com/containers/storage/pkg/system/mknod_freebsd.go b/vendor/github.com/containers/storage/pkg/system/mknod_freebsd.go new file mode 100644 index 00000000000..d09005589af --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/mknod_freebsd.go @@ -0,0 +1,22 @@ +// +build freebsd + +package system + +import ( + "golang.org/x/sys/unix" +) + +// Mknod creates a filesystem node (file, device special file or named pipe) named path +// with attributes specified by mode and dev. +func Mknod(path string, mode uint32, dev uint64) error { + return unix.Mknod(path, mode, dev) +} + +// Mkdev is used to build the value of linux devices (in /dev/) which specifies major +// and minor number of the newly created device special file. +// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes. +// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major, +// then the top 12 bits of the minor. +func Mkdev(major int64, minor int64) uint32 { + return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff)) +} diff --git a/vendor/github.com/containers/storage/pkg/system/mknod_windows.go b/vendor/github.com/containers/storage/pkg/system/mknod_windows.go new file mode 100644 index 00000000000..2e863c0215b --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/mknod_windows.go @@ -0,0 +1,13 @@ +// +build windows + +package system + +// Mknod is not implemented on Windows. +func Mknod(path string, mode uint32, dev int) error { + return ErrNotSupportedPlatform +} + +// Mkdev is not implemented on Windows. +func Mkdev(major int64, minor int64) uint32 { + panic("Mkdev not implemented on Windows.") +} diff --git a/vendor/github.com/containers/storage/pkg/system/path.go b/vendor/github.com/containers/storage/pkg/system/path.go new file mode 100644 index 00000000000..f634a6be673 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/path.go @@ -0,0 +1,21 @@ +package system + +import "runtime" + +const defaultUnixPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + +// DefaultPathEnv is unix style list of directories to search for +// executables. Each directory is separated from the next by a colon +// ':' character . +func DefaultPathEnv(platform string) string { + if runtime.GOOS == "windows" { + if platform != runtime.GOOS && LCOWSupported() { + return defaultUnixPathEnv + } + // Deliberately empty on Windows containers on Windows as the default path will be set by + // the container. Docker has no context of what the default path should be. + return "" + } + return defaultUnixPathEnv + +} diff --git a/vendor/github.com/containers/storage/pkg/system/path_unix.go b/vendor/github.com/containers/storage/pkg/system/path_unix.go new file mode 100644 index 00000000000..f3762e69d36 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/path_unix.go @@ -0,0 +1,9 @@ +// +build !windows + +package system + +// CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter, +// is the system drive. This is a no-op on Linux. +func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) { + return path, nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/path_windows.go b/vendor/github.com/containers/storage/pkg/system/path_windows.go new file mode 100644 index 00000000000..aab891522db --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/path_windows.go @@ -0,0 +1,33 @@ +// +build windows + +package system + +import ( + "fmt" + "path/filepath" + "strings" +) + +// CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path. +// This is used, for example, when validating a user provided path in docker cp. +// If a drive letter is supplied, it must be the system drive. The drive letter +// is always removed. Also, it translates it to OS semantics (IOW / to \). We +// need the path in this syntax so that it can ultimately be concatenated with +// a Windows long-path which doesn't support drive-letters. Examples: +// C: --> Fail +// C:\ --> \ +// a --> a +// /a --> \a +// d:\ --> Fail +func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) { + if len(path) == 2 && string(path[1]) == ":" { + return "", fmt.Errorf("No relative path specified in %q", path) + } + if !filepath.IsAbs(path) || len(path) < 2 { + return filepath.FromSlash(path), nil + } + if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") { + return "", fmt.Errorf("The specified path is not on the system drive (C:)") + } + return filepath.FromSlash(path[2:]), nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/process_unix.go b/vendor/github.com/containers/storage/pkg/system/process_unix.go new file mode 100644 index 00000000000..a9a0dd7517e --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/process_unix.go @@ -0,0 +1,24 @@ +// +build linux freebsd solaris darwin + +package system + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +// IsProcessAlive returns true if process with a given pid is running. +func IsProcessAlive(pid int) bool { + err := unix.Kill(pid, syscall.Signal(0)) + if err == nil || err == unix.EPERM { + return true + } + + return false +} + +// KillProcess force-stops a process. +func KillProcess(pid int) { + _ = unix.Kill(pid, unix.SIGKILL) +} diff --git a/vendor/github.com/containers/storage/pkg/system/rm.go b/vendor/github.com/containers/storage/pkg/system/rm.go new file mode 100644 index 00000000000..510e714283d --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/rm.go @@ -0,0 +1,79 @@ +package system + +import ( + "os" + "syscall" + "time" + + "github.com/containers/storage/pkg/mount" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// EnsureRemoveAll wraps `os.RemoveAll` to check for specific errors that can +// often be remedied. +// Only use `EnsureRemoveAll` if you really want to make every effort to remove +// a directory. +// +// Because of the way `os.Remove` (and by extension `os.RemoveAll`) works, there +// can be a race between reading directory entries and then actually attempting +// to remove everything in the directory. +// These types of errors do not need to be returned since it's ok for the dir to +// be gone we can just retry the remove operation. +// +// This should not return a `os.ErrNotExist` kind of error under any circumstances +func EnsureRemoveAll(dir string) error { + notExistErr := make(map[string]bool) + + // track retries + exitOnErr := make(map[string]int) + maxRetry := 100 + + // Attempt to unmount anything beneath this dir first + if err := mount.RecursiveUnmount(dir); err != nil { + logrus.Debugf("RecusiveUnmount on %s failed: %v", dir, err) + } + + for { + err := os.RemoveAll(dir) + if err == nil { + return nil + } + + pe, ok := err.(*os.PathError) + if !ok { + return err + } + + if os.IsNotExist(err) { + if notExistErr[pe.Path] { + return err + } + notExistErr[pe.Path] = true + + // There is a race where some subdir can be removed but after the parent + // dir entries have been read. + // So the path could be from `os.Remove(subdir)` + // If the reported non-existent path is not the passed in `dir` we + // should just retry, but otherwise return with no error. + if pe.Path == dir { + return nil + } + continue + } + + if pe.Err != syscall.EBUSY { + return err + } + + if e := mount.Unmount(pe.Path); e != nil { + return errors.Wrapf(e, "error while removing %s", dir) + } + + if exitOnErr[pe.Path] == maxRetry { + return err + } + exitOnErr[pe.Path]++ + time.Sleep(100 * time.Millisecond) + } +} diff --git a/vendor/github.com/containers/storage/pkg/system/stat_darwin.go b/vendor/github.com/containers/storage/pkg/system/stat_darwin.go new file mode 100644 index 00000000000..715f05b9387 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/stat_darwin.go @@ -0,0 +1,13 @@ +package system + +import "syscall" + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtimespec}, nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/stat_freebsd.go b/vendor/github.com/containers/storage/pkg/system/stat_freebsd.go new file mode 100644 index 00000000000..715f05b9387 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/stat_freebsd.go @@ -0,0 +1,13 @@ +package system + +import "syscall" + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtimespec}, nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/stat_linux.go b/vendor/github.com/containers/storage/pkg/system/stat_linux.go new file mode 100644 index 00000000000..af7af20fa4d --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/stat_linux.go @@ -0,0 +1,19 @@ +package system + +import "syscall" + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: s.Mode, + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtim}, nil +} + +// FromStatT converts a syscall.Stat_t type to a system.Stat_t type +// This is exposed on Linux as pkg/archive/changes uses it. +func FromStatT(s *syscall.Stat_t) (*StatT, error) { + return fromStatT(s) +} diff --git a/vendor/github.com/containers/storage/pkg/system/stat_openbsd.go b/vendor/github.com/containers/storage/pkg/system/stat_openbsd.go new file mode 100644 index 00000000000..b607dea946f --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/stat_openbsd.go @@ -0,0 +1,13 @@ +package system + +import "syscall" + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtim}, nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/stat_solaris.go b/vendor/github.com/containers/storage/pkg/system/stat_solaris.go new file mode 100644 index 00000000000..b607dea946f --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/stat_solaris.go @@ -0,0 +1,13 @@ +package system + +import "syscall" + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtim}, nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/stat_unix.go b/vendor/github.com/containers/storage/pkg/system/stat_unix.go new file mode 100644 index 00000000000..2fac918bfc3 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/stat_unix.go @@ -0,0 +1,74 @@ +// +build !windows + +package system + +import ( + "os" + "strconv" + "syscall" +) + +// StatT type contains status of a file. It contains metadata +// like permission, owner, group, size, etc about a file. +type StatT struct { + mode uint32 + uid uint32 + gid uint32 + rdev uint64 + size int64 + mtim syscall.Timespec +} + +// Mode returns file's permission mode. +func (s StatT) Mode() uint32 { + return s.mode +} + +// UID returns file's user id of owner. +func (s StatT) UID() uint32 { + return s.uid +} + +// GID returns file's group id of owner. +func (s StatT) GID() uint32 { + return s.gid +} + +// Rdev returns file's device ID (if it's special file). +func (s StatT) Rdev() uint64 { + return s.rdev +} + +// Size returns file's size. +func (s StatT) Size() int64 { + return s.size +} + +// Mtim returns file's last modification time. +func (s StatT) Mtim() syscall.Timespec { + return s.mtim +} + +// Stat takes a path to a file and returns +// a system.StatT type pertaining to that file. +// +// Throws an error if the file does not exist +func Stat(path string) (*StatT, error) { + s := &syscall.Stat_t{} + if err := syscall.Stat(path, s); err != nil { + return nil, &os.PathError{Op: "Stat", Path: path, Err: err} + } + return fromStatT(s) +} + +// Fstat takes an open file descriptor and returns +// a system.StatT type pertaining to that file. +// +// Throws an error if the file descriptor is invalid +func Fstat(fd int) (*StatT, error) { + s := &syscall.Stat_t{} + if err := syscall.Fstat(fd, s); err != nil { + return nil, &os.PathError{Op: "Fstat", Path: strconv.Itoa(fd), Err: err} + } + return fromStatT(s) +} diff --git a/vendor/github.com/containers/storage/pkg/system/stat_windows.go b/vendor/github.com/containers/storage/pkg/system/stat_windows.go new file mode 100644 index 00000000000..d306360520b --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/stat_windows.go @@ -0,0 +1,63 @@ +package system + +import ( + "os" + "time" +) + +// StatT type contains status of a file. It contains metadata +// like permission, size, etc about a file. +type StatT struct { + mode os.FileMode + size int64 + mtim time.Time +} + +// Size returns file's size. +func (s StatT) Size() int64 { + return s.size +} + +// Mode returns file's permission mode. +func (s StatT) Mode() os.FileMode { + return os.FileMode(s.mode) +} + +// Mtim returns file's last modification time. +func (s StatT) Mtim() time.Time { + return time.Time(s.mtim) +} + +// UID returns file's user id of owner. +// +// on windows this is always 0 because there is no concept of UID +func (s StatT) UID() uint32 { + return 0 +} + +// GID returns file's group id of owner. +// +// on windows this is always 0 because there is no concept of GID +func (s StatT) GID() uint32 { + return 0 +} + +// Stat takes a path to a file and returns +// a system.StatT type pertaining to that file. +// +// Throws an error if the file does not exist +func Stat(path string) (*StatT, error) { + fi, err := os.Stat(path) + if err != nil { + return nil, err + } + return fromStatT(&fi) +} + +// fromStatT converts a os.FileInfo type to a system.StatT type +func fromStatT(fi *os.FileInfo) (*StatT, error) { + return &StatT{ + size: (*fi).Size(), + mode: (*fi).Mode(), + mtim: (*fi).ModTime()}, nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/syscall_unix.go b/vendor/github.com/containers/storage/pkg/system/syscall_unix.go new file mode 100644 index 00000000000..1bb852d11fa --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/syscall_unix.go @@ -0,0 +1,25 @@ +// +build linux freebsd darwin + +package system + +import ( + "github.com/pkg/errors" + "golang.org/x/sys/unix" +) + +// Unmount is a platform-specific helper function to call +// the unmount syscall. +func Unmount(dest string) error { + return unix.Unmount(dest, 0) +} + +// CommandLineToArgv should not be used on Unix. +// It simply returns commandLine in the only element in the returned array. +func CommandLineToArgv(commandLine string) ([]string, error) { + return []string{commandLine}, nil +} + +// IsEBUSY checks if the specified error is EBUSY. +func IsEBUSY(err error) bool { + return errors.Is(err, unix.EBUSY) +} diff --git a/vendor/github.com/containers/storage/pkg/system/syscall_windows.go b/vendor/github.com/containers/storage/pkg/system/syscall_windows.go new file mode 100644 index 00000000000..f4d8692cdb8 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/syscall_windows.go @@ -0,0 +1,127 @@ +package system + +import ( + "unsafe" + + "github.com/sirupsen/logrus" + "golang.org/x/sys/windows" +) + +var ( + ntuserApiset = windows.NewLazyDLL("ext-ms-win-ntuser-window-l1-1-0") + procGetVersionExW = modkernel32.NewProc("GetVersionExW") + procGetProductInfo = modkernel32.NewProc("GetProductInfo") +) + +// OSVersion is a wrapper for Windows version information +// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx +type OSVersion struct { + Version uint32 + MajorVersion uint8 + MinorVersion uint8 + Build uint16 +} + +// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724833(v=vs.85).aspx +type osVersionInfoEx struct { + OSVersionInfoSize uint32 + MajorVersion uint32 + MinorVersion uint32 + BuildNumber uint32 + PlatformID uint32 + CSDVersion [128]uint16 + ServicePackMajor uint16 + ServicePackMinor uint16 + SuiteMask uint16 + ProductType byte + Reserve byte +} + +// GetOSVersion gets the operating system version on Windows. Note that +// docker.exe must be manifested to get the correct version information. +func GetOSVersion() OSVersion { + var err error + osv := OSVersion{} + osv.Version, err = windows.GetVersion() + if err != nil { + // GetVersion never fails. + panic(err) + } + osv.MajorVersion = uint8(osv.Version & 0xFF) + osv.MinorVersion = uint8(osv.Version >> 8 & 0xFF) + osv.Build = uint16(osv.Version >> 16) + return osv +} + +// IsWindowsClient returns true if the SKU is client +// @engine maintainers - this function should not be removed or modified as it +// is used to enforce licensing restrictions on Windows. +func IsWindowsClient() bool { + osviex := &osVersionInfoEx{OSVersionInfoSize: 284} + r1, _, err := procGetVersionExW.Call(uintptr(unsafe.Pointer(osviex))) + if r1 == 0 { + logrus.Warnf("GetVersionExW failed - assuming server SKU: %v", err) + return false + } + const verNTWorkstation = 0x00000001 + return osviex.ProductType == verNTWorkstation +} + +// IsIoTCore returns true if the currently running image is based off of +// Windows 10 IoT Core. +// @engine maintainers - this function should not be removed or modified as it +// is used to enforce licensing restrictions on Windows. +func IsIoTCore() bool { + var returnedProductType uint32 + r1, _, err := procGetProductInfo.Call(6, 1, 0, 0, uintptr(unsafe.Pointer(&returnedProductType))) + if r1 == 0 { + logrus.Warnf("GetProductInfo failed - assuming this is not IoT: %v", err) + return false + } + const productIoTUAP = 0x0000007B + const productIoTUAPCommercial = 0x00000083 + return returnedProductType == productIoTUAP || returnedProductType == productIoTUAPCommercial +} + +// Unmount is a platform-specific helper function to call +// the unmount syscall. Not supported on Windows +func Unmount(dest string) error { + return nil +} + +// CommandLineToArgv wraps the Windows syscall to turn a commandline into an argument array. +func CommandLineToArgv(commandLine string) ([]string, error) { + var argc int32 + + argsPtr, err := windows.UTF16PtrFromString(commandLine) + if err != nil { + return nil, err + } + + argv, err := windows.CommandLineToArgv(argsPtr, &argc) + if err != nil { + return nil, err + } + defer windows.LocalFree(windows.Handle(uintptr(unsafe.Pointer(argv)))) + + newArgs := make([]string, argc) + for i, v := range (*argv)[:argc] { + newArgs[i] = string(windows.UTF16ToString((*v)[:])) + } + + return newArgs, nil +} + +// HasWin32KSupport determines whether containers that depend on win32k can +// run on this machine. Win32k is the driver used to implement windowing. +func HasWin32KSupport() bool { + // For now, check for ntuser API support on the host. In the future, a host + // may support win32k in containers even if the host does not support ntuser + // APIs. + return ntuserApiset.Load() == nil +} + +// IsEBUSY checks if the specified error is EBUSY. +func IsEBUSY(err error) bool { + return false +} diff --git a/vendor/github.com/containers/storage/pkg/system/umask.go b/vendor/github.com/containers/storage/pkg/system/umask.go new file mode 100644 index 00000000000..5a10eda5afb --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/umask.go @@ -0,0 +1,13 @@ +// +build !windows + +package system + +import ( + "golang.org/x/sys/unix" +) + +// Umask sets current process's file mode creation mask to newmask +// and returns oldmask. +func Umask(newmask int) (oldmask int, err error) { + return unix.Umask(newmask), nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/umask_windows.go b/vendor/github.com/containers/storage/pkg/system/umask_windows.go new file mode 100644 index 00000000000..13f1de1769c --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/umask_windows.go @@ -0,0 +1,9 @@ +// +build windows + +package system + +// Umask is not supported on the windows platform. +func Umask(newmask int) (oldmask int, err error) { + // should not be called on cli code path + return 0, ErrNotSupportedPlatform +} diff --git a/vendor/github.com/containers/storage/pkg/system/utimes_freebsd.go b/vendor/github.com/containers/storage/pkg/system/utimes_freebsd.go new file mode 100644 index 00000000000..6a77524376d --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/utimes_freebsd.go @@ -0,0 +1,24 @@ +package system + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/unix" +) + +// LUtimesNano is used to change access and modification time of the specified path. +// It's used for symbol link file because unix.UtimesNano doesn't support a NOFOLLOW flag atm. +func LUtimesNano(path string, ts []syscall.Timespec) error { + var _path *byte + _path, err := unix.BytePtrFromString(path) + if err != nil { + return err + } + + if _, _, err := unix.Syscall(unix.SYS_LUTIMES, uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), 0); err != 0 && err != unix.ENOSYS { + return err + } + + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/utimes_linux.go b/vendor/github.com/containers/storage/pkg/system/utimes_linux.go new file mode 100644 index 00000000000..edc588a63f3 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/utimes_linux.go @@ -0,0 +1,25 @@ +package system + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/unix" +) + +// LUtimesNano is used to change access and modification time of the specified path. +// It's used for symbol link file because unix.UtimesNano doesn't support a NOFOLLOW flag atm. +func LUtimesNano(path string, ts []syscall.Timespec) error { + atFdCwd := unix.AT_FDCWD + + var _path *byte + _path, err := unix.BytePtrFromString(path) + if err != nil { + return err + } + if _, _, err := unix.Syscall6(unix.SYS_UTIMENSAT, uintptr(atFdCwd), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), unix.AT_SYMLINK_NOFOLLOW, 0, 0); err != 0 && err != unix.ENOSYS { + return err + } + + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/utimes_unsupported.go b/vendor/github.com/containers/storage/pkg/system/utimes_unsupported.go new file mode 100644 index 00000000000..139714544d0 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/utimes_unsupported.go @@ -0,0 +1,10 @@ +// +build !linux,!freebsd + +package system + +import "syscall" + +// LUtimesNano is only supported on linux and freebsd. +func LUtimesNano(path string, ts []syscall.Timespec) error { + return ErrNotSupportedPlatform +} diff --git a/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go b/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go new file mode 100644 index 00000000000..6b47c4e717f --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go @@ -0,0 +1,87 @@ +package system + +import ( + "bytes" + "os" + + "golang.org/x/sys/unix" +) + +const ( + // Value is larger than the maximum size allowed + E2BIG unix.Errno = unix.E2BIG + + // Operation not supported + EOPNOTSUPP unix.Errno = unix.EOPNOTSUPP + + // Value is too small or too large for maximum size allowed + EOVERFLOW unix.Errno = unix.EOVERFLOW +) + +// Lgetxattr retrieves the value of the extended attribute identified by attr +// and associated with the given path in the file system. +// Returns a []byte slice if the xattr is set and nil otherwise. +func Lgetxattr(path string, attr string) ([]byte, error) { + // Start with a 128 length byte array + dest := make([]byte, 128) + sz, errno := unix.Lgetxattr(path, attr, dest) + + for errno == unix.ERANGE { + // Buffer too small, use zero-sized buffer to get the actual size + sz, errno = unix.Lgetxattr(path, attr, []byte{}) + if errno != nil { + return nil, &os.PathError{Op: "lgetxattr", Path: path, Err: errno} + } + dest = make([]byte, sz) + sz, errno = unix.Lgetxattr(path, attr, dest) + } + + switch { + case errno == unix.ENODATA: + return nil, nil + case errno != nil: + return nil, &os.PathError{Op: "lgetxattr", Path: path, Err: errno} + } + + return dest[:sz], nil +} + +// Lsetxattr sets the value of the extended attribute identified by attr +// and associated with the given path in the file system. +func Lsetxattr(path string, attr string, data []byte, flags int) error { + if err := unix.Lsetxattr(path, attr, data, flags); err != nil { + return &os.PathError{Op: "lsetxattr", Path: path, Err: err} + } + + return nil +} + +// Llistxattr lists extended attributes associated with the given path +// in the file system. +func Llistxattr(path string) ([]string, error) { + dest := make([]byte, 128) + sz, errno := unix.Llistxattr(path, dest) + + for errno == unix.ERANGE { + // Buffer too small, use zero-sized buffer to get the actual size + sz, errno = unix.Llistxattr(path, []byte{}) + if errno != nil { + return nil, &os.PathError{Op: "llistxattr", Path: path, Err: errno} + } + + dest = make([]byte, sz) + sz, errno = unix.Llistxattr(path, dest) + } + if errno != nil { + return nil, &os.PathError{Op: "llistxattr", Path: path, Err: errno} + } + + var attrs []string + for _, token := range bytes.Split(dest[:sz], []byte{0}) { + if len(token) > 0 { + attrs = append(attrs, string(token)) + } + } + + return attrs, nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go b/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go new file mode 100644 index 00000000000..3fc27f0b139 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go @@ -0,0 +1,31 @@ +// +build !linux + +package system + +import "syscall" + +const ( + // Value is larger than the maximum size allowed + E2BIG syscall.Errno = syscall.Errno(0) + + // Operation not supported + EOPNOTSUPP syscall.Errno = syscall.Errno(0) + + // Value is too small or too large for maximum size allowed + EOVERFLOW syscall.Errno = syscall.Errno(0) +) + +// Lgetxattr is not supported on platforms other than linux. +func Lgetxattr(path string, attr string) ([]byte, error) { + return nil, ErrNotSupportedPlatform +} + +// Lsetxattr is not supported on platforms other than linux. +func Lsetxattr(path string, attr string, data []byte, flags int) error { + return ErrNotSupportedPlatform +} + +// Llistxattr is not supported on platforms other than linux. +func Llistxattr(path string) ([]string, error) { + return nil, ErrNotSupportedPlatform +} diff --git a/vendor/github.com/containers/storage/pkg/tarlog/tarlogger.go b/vendor/github.com/containers/storage/pkg/tarlog/tarlogger.go new file mode 100644 index 00000000000..674e0a0baed --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/tarlog/tarlogger.go @@ -0,0 +1,66 @@ +package tarlog + +import ( + "io" + "sync" + + "github.com/sirupsen/logrus" + "github.com/vbatts/tar-split/archive/tar" +) + +type tarLogger struct { + writer *io.PipeWriter + closeMutex *sync.Mutex + closed bool +} + +// NewLogger returns a writer that, when a tar archive is written to it, calls +// `logger` for each file header it encounters in the archive. +func NewLogger(logger func(*tar.Header)) (io.WriteCloser, error) { + reader, writer := io.Pipe() + t := &tarLogger{ + writer: writer, + closeMutex: new(sync.Mutex), + closed: false, + } + tr := tar.NewReader(reader) + t.closeMutex.Lock() + go func() { + hdr, err := tr.Next() + for err == nil { + logger(hdr) + hdr, err = tr.Next() + + } + // Make sure to avoid writes after the reader has been closed. + if err := reader.Close(); err != nil { + logrus.Errorf("Closing tarlogger reader: %v", err) + } + // Unblock the Close(). + t.closeMutex.Unlock() + }() + return t, nil +} + +func (t *tarLogger) Write(b []byte) (int, error) { + if t.closed { + // We cannot use os.Pipe() as this alters the tar's digest. Using + // io.Pipe() requires this workaround as it does not allow for writes + // after close. + return len(b), nil + } + n, err := t.writer.Write(b) + if err == io.ErrClosedPipe { + // The pipe got closed. Track it and avoid to call Write in future. + t.closed = true + return len(b), nil + } + return n, err +} + +func (t *tarLogger) Close() error { + err := t.writer.Close() + // Wait for the reader to finish. + t.closeMutex.Lock() + return err +} diff --git a/vendor/github.com/containers/storage/pkg/truncindex/truncindex.go b/vendor/github.com/containers/storage/pkg/truncindex/truncindex.go new file mode 100644 index 00000000000..74776e65e6f --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/truncindex/truncindex.go @@ -0,0 +1,139 @@ +// Package truncindex provides a general 'index tree', used by Docker +// in order to be able to reference containers by only a few unambiguous +// characters of their id. +package truncindex + +import ( + "errors" + "fmt" + "strings" + "sync" + + "github.com/tchap/go-patricia/patricia" +) + +var ( + // ErrEmptyPrefix is an error returned if the prefix was empty. + ErrEmptyPrefix = errors.New("Prefix can't be empty") + + // ErrIllegalChar is returned when a space is in the ID + ErrIllegalChar = errors.New("illegal character: ' '") + + // ErrNotExist is returned when ID or its prefix not found in index. + ErrNotExist = errors.New("ID does not exist") +) + +// ErrAmbiguousPrefix is returned if the prefix was ambiguous +// (multiple ids for the prefix). +type ErrAmbiguousPrefix struct { + prefix string +} + +func (e ErrAmbiguousPrefix) Error() string { + return fmt.Sprintf("Multiple IDs found with provided prefix: %s", e.prefix) +} + +// TruncIndex allows the retrieval of string identifiers by any of their unique prefixes. +// This is used to retrieve image and container IDs by more convenient shorthand prefixes. +type TruncIndex struct { + sync.RWMutex + trie *patricia.Trie + ids map[string]struct{} +} + +// NewTruncIndex creates a new TruncIndex and initializes with a list of IDs. +func NewTruncIndex(ids []string) (idx *TruncIndex) { + idx = &TruncIndex{ + ids: make(map[string]struct{}), + + // Change patricia max prefix per node length, + // because our len(ID) always 64 + trie: patricia.NewTrie(patricia.MaxPrefixPerNode(64)), + } + for _, id := range ids { + idx.addID(id) + } + return +} + +func (idx *TruncIndex) addID(id string) error { + if strings.Contains(id, " ") { + return ErrIllegalChar + } + if id == "" { + return ErrEmptyPrefix + } + if _, exists := idx.ids[id]; exists { + return fmt.Errorf("id already exists: '%s'", id) + } + idx.ids[id] = struct{}{} + if inserted := idx.trie.Insert(patricia.Prefix(id), struct{}{}); !inserted { + return fmt.Errorf("failed to insert id: %s", id) + } + return nil +} + +// Add adds a new ID to the TruncIndex. +func (idx *TruncIndex) Add(id string) error { + idx.Lock() + defer idx.Unlock() + return idx.addID(id) +} + +// Delete removes an ID from the TruncIndex. If there are multiple IDs +// with the given prefix, an error is thrown. +func (idx *TruncIndex) Delete(id string) error { + idx.Lock() + defer idx.Unlock() + if _, exists := idx.ids[id]; !exists || id == "" { + return fmt.Errorf("no such id: '%s'", id) + } + delete(idx.ids, id) + if deleted := idx.trie.Delete(patricia.Prefix(id)); !deleted { + return fmt.Errorf("no such id: '%s'", id) + } + return nil +} + +// Get retrieves an ID from the TruncIndex. If there are multiple IDs +// with the given prefix, an error is thrown. +func (idx *TruncIndex) Get(s string) (string, error) { + if s == "" { + return "", ErrEmptyPrefix + } + var ( + id string + ) + subTreeVisitFunc := func(prefix patricia.Prefix, item patricia.Item) error { + if id != "" { + // we haven't found the ID if there are two or more IDs + id = "" + return ErrAmbiguousPrefix{prefix: string(prefix)} + } + id = string(prefix) + return nil + } + + idx.RLock() + defer idx.RUnlock() + if err := idx.trie.VisitSubtree(patricia.Prefix(s), subTreeVisitFunc); err != nil { + return "", err + } + if id != "" { + return id, nil + } + return "", ErrNotExist +} + +// Iterate iterates over all stored IDs and passes each of them to the given +// handler. Take care that the handler method does not call any public +// method on truncindex as the internal locking is not reentrant/recursive +// and will result in deadlock. +func (idx *TruncIndex) Iterate(handler func(id string)) { + idx.Lock() + defer idx.Unlock() + idx.trie.Visit(func(prefix patricia.Prefix, item patricia.Item) error { + handler(string(prefix)) + return nil + }) +} diff --git a/vendor/github.com/containers/storage/pkg/unshare/getenv_linux_cgo.go b/vendor/github.com/containers/storage/pkg/unshare/getenv_linux_cgo.go new file mode 100644 index 00000000000..4f441c32c59 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/unshare/getenv_linux_cgo.go @@ -0,0 +1,22 @@ +// +build linux,cgo + +package unshare + +import ( + "unsafe" +) + +/* +#cgo remoteclient CFLAGS: -Wall -Werror +#include +*/ +import "C" + +func getenv(name string) string { + cName := C.CString(name) + defer C.free(unsafe.Pointer(cName)) + + value := C.GoString(C.getenv(cName)) + + return value +} diff --git a/vendor/github.com/containers/storage/pkg/unshare/getenv_linux_nocgo.go b/vendor/github.com/containers/storage/pkg/unshare/getenv_linux_nocgo.go new file mode 100644 index 00000000000..a5005403afa --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/unshare/getenv_linux_nocgo.go @@ -0,0 +1,11 @@ +// +build linux,!cgo + +package unshare + +import ( + "os" +) + +func getenv(name string) string { + return os.Getenv(name) +} diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare.c b/vendor/github.com/containers/storage/pkg/unshare/unshare.c new file mode 100644 index 00000000000..c0e359b2761 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/unshare/unshare.c @@ -0,0 +1,378 @@ +#ifndef UNSHARE_NO_CODE_AT_ALL + +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Open Source projects like conda-forge, want to package podman and are based + off of centos:6, Conda-force has minimal libc requirements and is lacking + the memfd.h file, so we use mmam.h +*/ +#ifndef MFD_ALLOW_SEALING +#define MFD_ALLOW_SEALING 2U +#endif +#ifndef MFD_CLOEXEC +#define MFD_CLOEXEC 1U +#endif + +#ifndef F_LINUX_SPECIFIC_BASE +#define F_LINUX_SPECIFIC_BASE 1024 +#endif +#ifndef F_ADD_SEALS +#define F_ADD_SEALS (F_LINUX_SPECIFIC_BASE + 9) +#define F_GET_SEALS (F_LINUX_SPECIFIC_BASE + 10) +#endif +#ifndef F_SEAL_SEAL +#define F_SEAL_SEAL 0x0001LU +#endif +#ifndef F_SEAL_SHRINK +#define F_SEAL_SHRINK 0x0002LU +#endif +#ifndef F_SEAL_GROW +#define F_SEAL_GROW 0x0004LU +#endif +#ifndef F_SEAL_WRITE +#define F_SEAL_WRITE 0x0008LU +#endif + +#define BUFSTEP 1024 + +static const char *_max_user_namespaces = "/proc/sys/user/max_user_namespaces"; +static const char *_unprivileged_user_namespaces = "/proc/sys/kernel/unprivileged_userns_clone"; + +static int _containers_unshare_parse_envint(const char *envname) { + char *p, *q; + long l; + + p = getenv(envname); + if (p == NULL) { + return -1; + } + q = NULL; + l = strtol(p, &q, 10); + if ((q == NULL) || (*q != '\0')) { + fprintf(stderr, "Error parsing \"%s\"=\"%s\"!\n", envname, p); + _exit(1); + } + unsetenv(envname); + return l; +} + +static void _check_proc_sys_file(const char *path) +{ + FILE *fp; + char buf[32]; + size_t n_read; + long r; + + fp = fopen(path, "r"); + if (fp == NULL) { + if (errno != ENOENT) + fprintf(stderr, "Error reading %s: %m\n", _max_user_namespaces); + } else { + memset(buf, 0, sizeof(buf)); + n_read = fread(buf, 1, sizeof(buf) - 1, fp); + if (n_read > 0) { + r = atoi(buf); + if (r == 0) { + fprintf(stderr, "User namespaces are not enabled in %s.\n", path); + } + } else { + fprintf(stderr, "Error reading %s: no contents, should contain a number greater than 0.\n", path); + } + fclose(fp); + } +} + +static char **parse_proc_stringlist(const char *list) { + int fd, n, i, n_strings; + char *buf, *new_buf, **ret; + size_t size, new_size, used; + + fd = open(list, O_RDONLY); + if (fd == -1) { + return NULL; + } + buf = NULL; + size = 0; + used = 0; + for (;;) { + new_size = used + BUFSTEP; + new_buf = realloc(buf, new_size); + if (new_buf == NULL) { + free(buf); + fprintf(stderr, "realloc(%ld): out of memory\n", (long)(size + BUFSTEP)); + return NULL; + } + buf = new_buf; + size = new_size; + memset(buf + used, '\0', size - used); + n = read(fd, buf + used, size - used - 1); + if (n < 0) { + fprintf(stderr, "read(): %m\n"); + return NULL; + } + if (n == 0) { + break; + } + used += n; + } + close(fd); + n_strings = 0; + for (n = 0; n < used; n++) { + if ((n == 0) || (buf[n-1] == '\0')) { + n_strings++; + } + } + ret = calloc(n_strings + 1, sizeof(char *)); + if (ret == NULL) { + fprintf(stderr, "calloc(): out of memory\n"); + return NULL; + } + i = 0; + for (n = 0; n < used; n++) { + if ((n == 0) || (buf[n-1] == '\0')) { + ret[i++] = &buf[n]; + } + } + ret[i] = NULL; + return ret; +} + +/* + * Taken from the runc cloned_binary.c file + * Copyright (C) 2019 Aleksa Sarai + * Copyright (C) 2019 SUSE LLC + * + * This work is dual licensed under the following licenses. You may use, + * redistribute, and/or modify the work under the conditions of either (or + * both) licenses. + * + * === Apache-2.0 === + */ +static int try_bindfd(void) +{ + int fd, ret = -1; + char src[PATH_MAX] = {0}; + char template[64] = {0}; + + strncpy(template, "/tmp/containers.XXXXXX", sizeof(template) - 1); + + /* + * We need somewhere to mount it, mounting anything over /proc/self is a + * BAD idea on the host -- even if we do it temporarily. + */ + fd = mkstemp(template); + if (fd < 0) + return ret; + close(fd); + + ret = -EPERM; + + if (readlink("/proc/self/exe", src, sizeof (src) - 1) < 0) + goto out; + + if (mount(src, template, NULL, MS_BIND, NULL) < 0) + goto out; + if (mount(NULL, template, NULL, MS_REMOUNT | MS_BIND | MS_RDONLY, NULL) < 0) + goto out_umount; + + /* Get read-only handle that we're sure can't be made read-write. */ + ret = open(template, O_PATH | O_CLOEXEC); + +out_umount: + /* + * Make sure the MNT_DETACH works, otherwise we could get remounted + * read-write and that would be quite bad (the fd would be made read-write + * too, invalidating the protection). + */ + if (umount2(template, MNT_DETACH) < 0) { + if (ret >= 0) + close(ret); + ret = -ENOTRECOVERABLE; + } + +out: + /* + * We don't care about unlink errors, the worst that happens is that + * there's an empty file left around in STATEDIR. + */ + unlink(template); + return ret; +} + +static int copy_self_proc_exe(char **argv) { + char *exename; + int fd, mmfd, n_read, n_written; + struct stat st; + char buf[2048]; + + fd = open("/proc/self/exe", O_RDONLY | O_CLOEXEC); + if (fd == -1) { + fprintf(stderr, "open(\"/proc/self/exe\"): %m\n"); + return -1; + } + if (fstat(fd, &st) == -1) { + fprintf(stderr, "fstat(\"/proc/self/exe\"): %m\n"); + close(fd); + return -1; + } + exename = basename(argv[0]); + mmfd = syscall(SYS_memfd_create, exename, (long) MFD_ALLOW_SEALING | MFD_CLOEXEC); + if (mmfd == -1) { + fprintf(stderr, "memfd_create(): %m\n"); + goto close_fd; + } + for (;;) { + n_read = read(fd, buf, sizeof(buf)); + if (n_read < 0) { + fprintf(stderr, "read(\"/proc/self/exe\"): %m\n"); + return -1; + } + if (n_read == 0) { + break; + } + n_written = write(mmfd, buf, n_read); + if (n_written < 0) { + fprintf(stderr, "write(anonfd): %m\n"); + goto close_fd; + } + if (n_written != n_read) { + fprintf(stderr, "write(anonfd): short write (%d != %d)\n", n_written, n_read); + goto close_fd; + } + } + close(fd); + if (fcntl(mmfd, F_ADD_SEALS, F_SEAL_SHRINK | F_SEAL_GROW | F_SEAL_WRITE | F_SEAL_SEAL) == -1) { + fprintf(stderr, "Close_Fd sealing memfd copy: %m\n"); + goto close_mmfd; + } + + return mmfd; + +close_fd: + close(fd); +close_mmfd: + close(mmfd); + return -1; +} +static int containers_reexec(int flags) { + char **argv; + int fd = -1; + + argv = parse_proc_stringlist("/proc/self/cmdline"); + if (argv == NULL) { + return -1; + } + + if (flags & CLONE_NEWNS) + fd = try_bindfd(); + if (fd < 0) + fd = copy_self_proc_exe(argv); + if (fd < 0) + return fd; + + if (fexecve(fd, argv, environ) == -1) { + close(fd); + fprintf(stderr, "Error during reexec(...): %m\n"); + return -1; + } + close(fd); + return 0; +} + +void _containers_unshare(void) +{ + int flags, pidfd, continuefd, n, pgrp, sid, ctty; + char buf[2048]; + + flags = _containers_unshare_parse_envint("_Containers-unshare"); + if (flags == -1) { + return; + } + if ((flags & CLONE_NEWUSER) != 0) { + if (unshare(CLONE_NEWUSER) == -1) { + fprintf(stderr, "Error during unshare(CLONE_NEWUSER): %m\n"); + _check_proc_sys_file (_max_user_namespaces); + _check_proc_sys_file (_unprivileged_user_namespaces); + _exit(1); + } + } + pidfd = _containers_unshare_parse_envint("_Containers-pid-pipe"); + if (pidfd != -1) { + snprintf(buf, sizeof(buf), "%llu", (unsigned long long) getpid()); + size_t size = write(pidfd, buf, strlen(buf)); + if (size != strlen(buf)) { + fprintf(stderr, "Error writing PID to pipe on fd %d: %m\n", pidfd); + _exit(1); + } + close(pidfd); + } + continuefd = _containers_unshare_parse_envint("_Containers-continue-pipe"); + if (continuefd != -1) { + n = read(continuefd, buf, sizeof(buf)); + if (n > 0) { + fprintf(stderr, "Error: %.*s\n", n, buf); + _exit(1); + } + close(continuefd); + } + sid = _containers_unshare_parse_envint("_Containers-setsid"); + if (sid == 1) { + if (setsid() == -1) { + fprintf(stderr, "Error during setsid: %m\n"); + _exit(1); + } + } + pgrp = _containers_unshare_parse_envint("_Containers-setpgrp"); + if (pgrp == 1) { + if (setpgrp() == -1) { + fprintf(stderr, "Error during setpgrp: %m\n"); + _exit(1); + } + } + ctty = _containers_unshare_parse_envint("_Containers-ctty"); + if (ctty != -1) { + if (ioctl(ctty, TIOCSCTTY, 0) == -1) { + fprintf(stderr, "Error while setting controlling terminal to %d: %m\n", ctty); + _exit(1); + } + } + if ((flags & CLONE_NEWUSER) != 0) { + if (setresgid(0, 0, 0) != 0) { + fprintf(stderr, "Error during setresgid(0): %m\n"); + _exit(1); + } + if (setresuid(0, 0, 0) != 0) { + fprintf(stderr, "Error during setresuid(0): %m\n"); + _exit(1); + } + } + if ((flags & ~CLONE_NEWUSER) != 0) { + if (unshare(flags & ~CLONE_NEWUSER) == -1) { + fprintf(stderr, "Error during unshare(...): %m\n"); + _exit(1); + } + } + if (containers_reexec(flags) != 0) { + _exit(1); + } + return; +} + +#endif // !UNSHARE_NO_CODE_AT_ALL diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare.go b/vendor/github.com/containers/storage/pkg/unshare/unshare.go new file mode 100644 index 00000000000..53cfeb0ec45 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/unshare/unshare.go @@ -0,0 +1,56 @@ +package unshare + +import ( + "fmt" + "os" + "os/user" + "sync" + + "github.com/pkg/errors" + "github.com/syndtr/gocapability/capability" +) + +var ( + homeDirOnce sync.Once + homeDirErr error + homeDir string + + hasCapSysAdminOnce sync.Once + hasCapSysAdminRet bool + hasCapSysAdminErr error +) + +// HomeDir returns the home directory for the current user. +func HomeDir() (string, error) { + homeDirOnce.Do(func() { + home := os.Getenv("HOME") + if home == "" { + usr, err := user.LookupId(fmt.Sprintf("%d", GetRootlessUID())) + if err != nil { + homeDir, homeDirErr = "", errors.Wrapf(err, "unable to resolve HOME directory") + return + } + homeDir, homeDirErr = usr.HomeDir, nil + return + } + homeDir, homeDirErr = home, nil + }) + return homeDir, homeDirErr +} + +// HasCapSysAdmin returns whether the current process has CAP_SYS_ADMIN. +func HasCapSysAdmin() (bool, error) { + hasCapSysAdminOnce.Do(func() { + currentCaps, err := capability.NewPid2(0) + if err != nil { + hasCapSysAdminErr = err + return + } + if err = currentCaps.Load(); err != nil { + hasCapSysAdminErr = err + return + } + hasCapSysAdminRet = currentCaps.Get(capability.EFFECTIVE, capability.CAP_SYS_ADMIN) + }) + return hasCapSysAdminRet, hasCapSysAdminErr +} diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_cgo.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_cgo.go new file mode 100644 index 00000000000..b3f8099f6a0 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_cgo.go @@ -0,0 +1,10 @@ +// +build linux,cgo,!gccgo + +package unshare + +// #cgo CFLAGS: -Wall +// extern void _containers_unshare(void); +// void __attribute__((constructor)) init(void) { +// _containers_unshare(); +// } +import "C" diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_gccgo.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_gccgo.go new file mode 100644 index 00000000000..2f95da7d8e9 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_gccgo.go @@ -0,0 +1,25 @@ +// +build linux,cgo,gccgo + +package unshare + +// #cgo CFLAGS: -Wall -Wextra +// extern void _containers_unshare(void); +// void __attribute__((constructor)) init(void) { +// _containers_unshare(); +// } +import "C" + +// This next bit is straight out of libcontainer. + +// AlwaysFalse is here to stay false +// (and be exported so the compiler doesn't optimize out its reference) +var AlwaysFalse bool + +func init() { + if AlwaysFalse { + // by referencing this C init() in a noop test, it will ensure the compiler + // links in the C function. + // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=65134 + C.init() + } +} diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go new file mode 100644 index 00000000000..6d351ce80a9 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go @@ -0,0 +1,605 @@ +// +build linux + +package unshare + +import ( + "bufio" + "bytes" + "fmt" + "io" + "os" + "os/exec" + "os/user" + "runtime" + "strconv" + "strings" + "sync" + "syscall" + + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/reexec" + "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/syndtr/gocapability/capability" +) + +// Cmd wraps an exec.Cmd created by the reexec package in unshare(), and +// handles setting ID maps and other related settings by triggering +// initialization code in the child. +type Cmd struct { + *exec.Cmd + UnshareFlags int + UseNewuidmap bool + UidMappings []specs.LinuxIDMapping // nolint: golint + UseNewgidmap bool + GidMappings []specs.LinuxIDMapping // nolint: golint + GidMappingsEnableSetgroups bool + Setsid bool + Setpgrp bool + Ctty *os.File + OOMScoreAdj *int + Hook func(pid int) error +} + +// Command creates a new Cmd which can be customized. +func Command(args ...string) *Cmd { + cmd := reexec.Command(args...) + return &Cmd{ + Cmd: cmd, + } +} + +func getRootlessUID() int { + uidEnv := getenv("_CONTAINERS_ROOTLESS_UID") + if uidEnv != "" { + u, _ := strconv.Atoi(uidEnv) + return u + } + return os.Geteuid() +} + +func getRootlessGID() int { + gidEnv := getenv("_CONTAINERS_ROOTLESS_GID") + if gidEnv != "" { + u, _ := strconv.Atoi(gidEnv) + return u + } + + /* If the _CONTAINERS_ROOTLESS_UID is set, assume the gid==uid. */ + uidEnv := os.Getenv("_CONTAINERS_ROOTLESS_UID") + if uidEnv != "" { + u, _ := strconv.Atoi(uidEnv) + return u + } + return os.Getegid() +} + +func (c *Cmd) Start() error { + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + // Set an environment variable to tell the child to synchronize its startup. + if c.Env == nil { + c.Env = os.Environ() + } + c.Env = append(c.Env, fmt.Sprintf("_Containers-unshare=%d", c.UnshareFlags)) + + // Please the libpod "rootless" package to find the expected env variables. + if IsRootless() { + c.Env = append(c.Env, "_CONTAINERS_USERNS_CONFIGURED=done") + c.Env = append(c.Env, fmt.Sprintf("_CONTAINERS_ROOTLESS_UID=%d", getRootlessUID())) + c.Env = append(c.Env, fmt.Sprintf("_CONTAINERS_ROOTLESS_GID=%d", getRootlessGID())) + } + + // Create the pipe for reading the child's PID. + pidRead, pidWrite, err := os.Pipe() + if err != nil { + return errors.Wrapf(err, "error creating pid pipe") + } + c.Env = append(c.Env, fmt.Sprintf("_Containers-pid-pipe=%d", len(c.ExtraFiles)+3)) + c.ExtraFiles = append(c.ExtraFiles, pidWrite) + + // Create the pipe for letting the child know to proceed. + continueRead, continueWrite, err := os.Pipe() + if err != nil { + pidRead.Close() + pidWrite.Close() + return errors.Wrapf(err, "error creating pid pipe") + } + c.Env = append(c.Env, fmt.Sprintf("_Containers-continue-pipe=%d", len(c.ExtraFiles)+3)) + c.ExtraFiles = append(c.ExtraFiles, continueRead) + + // Pass along other instructions. + if c.Setsid { + c.Env = append(c.Env, "_Containers-setsid=1") + } + if c.Setpgrp { + c.Env = append(c.Env, "_Containers-setpgrp=1") + } + if c.Ctty != nil { + c.Env = append(c.Env, fmt.Sprintf("_Containers-ctty=%d", len(c.ExtraFiles)+3)) + c.ExtraFiles = append(c.ExtraFiles, c.Ctty) + } + + // Make sure we clean up our pipes. + defer func() { + if pidRead != nil { + pidRead.Close() + } + if pidWrite != nil { + pidWrite.Close() + } + if continueRead != nil { + continueRead.Close() + } + if continueWrite != nil { + continueWrite.Close() + } + }() + + // Start the new process. + err = c.Cmd.Start() + if err != nil { + return err + } + + // Close the ends of the pipes that the parent doesn't need. + continueRead.Close() + continueRead = nil + pidWrite.Close() + pidWrite = nil + + // Read the child's PID from the pipe. + pidString := "" + b := new(bytes.Buffer) + if _, err := io.Copy(b, pidRead); err != nil { + return errors.Wrapf(err, "Reading child PID") + } + pidString = b.String() + pid, err := strconv.Atoi(pidString) + if err != nil { + fmt.Fprintf(continueWrite, "error parsing PID %q: %v", pidString, err) + return errors.Wrapf(err, "error parsing PID %q", pidString) + } + pidString = fmt.Sprintf("%d", pid) + + // If we created a new user namespace, set any specified mappings. + if c.UnshareFlags&syscall.CLONE_NEWUSER != 0 { + // Always set "setgroups". + setgroups, err := os.OpenFile(fmt.Sprintf("/proc/%s/setgroups", pidString), os.O_TRUNC|os.O_WRONLY, 0) + if err != nil { + fmt.Fprintf(continueWrite, "error opening setgroups: %v", err) + return errors.Wrapf(err, "error opening /proc/%s/setgroups", pidString) + } + defer setgroups.Close() + if c.GidMappingsEnableSetgroups { + if _, err := fmt.Fprintf(setgroups, "allow"); err != nil { + fmt.Fprintf(continueWrite, "error writing \"allow\" to setgroups: %v", err) + return errors.Wrapf(err, "error opening \"allow\" to /proc/%s/setgroups", pidString) + } + } else { + if _, err := fmt.Fprintf(setgroups, "deny"); err != nil { + fmt.Fprintf(continueWrite, "error writing \"deny\" to setgroups: %v", err) + return errors.Wrapf(err, "error writing \"deny\" to /proc/%s/setgroups", pidString) + } + } + + if len(c.UidMappings) == 0 || len(c.GidMappings) == 0 { + uidmap, gidmap, err := GetHostIDMappings("") + if err != nil { + fmt.Fprintf(continueWrite, "Reading ID mappings in parent: %v", err) + return errors.Wrapf(err, "Reading ID mappings in parent") + } + if len(c.UidMappings) == 0 { + c.UidMappings = uidmap + for i := range c.UidMappings { + c.UidMappings[i].HostID = c.UidMappings[i].ContainerID + } + } + if len(c.GidMappings) == 0 { + c.GidMappings = gidmap + for i := range c.GidMappings { + c.GidMappings[i].HostID = c.GidMappings[i].ContainerID + } + } + } + + if len(c.GidMappings) > 0 { + // Build the GID map, since writing to the proc file has to be done all at once. + g := new(bytes.Buffer) + for _, m := range c.GidMappings { + fmt.Fprintf(g, "%d %d %d\n", m.ContainerID, m.HostID, m.Size) + } + gidmapSet := false + // Set the GID map. + if c.UseNewgidmap { + cmd := exec.Command("newgidmap", append([]string{pidString}, strings.Fields(strings.Replace(g.String(), "\n", " ", -1))...)...) + g.Reset() + cmd.Stdout = g + cmd.Stderr = g + err := cmd.Run() + if err == nil { + gidmapSet = true + } else { + logrus.Warnf("Error running newgidmap: %v: %s", err, g.String()) + logrus.Warnf("Falling back to single mapping") + g.Reset() + g.Write([]byte(fmt.Sprintf("0 %d 1\n", os.Getegid()))) + } + } + if !gidmapSet { + if c.UseNewgidmap { + setgroups, err := os.OpenFile(fmt.Sprintf("/proc/%s/setgroups", pidString), os.O_TRUNC|os.O_WRONLY, 0) + if err != nil { + fmt.Fprintf(continueWrite, "error opening /proc/%s/setgroups: %v", pidString, err) + return errors.Wrapf(err, "error opening /proc/%s/setgroups", pidString) + } + defer setgroups.Close() + if _, err := fmt.Fprintf(setgroups, "deny"); err != nil { + fmt.Fprintf(continueWrite, "error writing 'deny' to /proc/%s/setgroups: %v", pidString, err) + return errors.Wrapf(err, "error writing 'deny' to /proc/%s/setgroups", pidString) + } + } + gidmap, err := os.OpenFile(fmt.Sprintf("/proc/%s/gid_map", pidString), os.O_TRUNC|os.O_WRONLY, 0) + if err != nil { + fmt.Fprintf(continueWrite, "error opening /proc/%s/gid_map: %v", pidString, err) + return errors.Wrapf(err, "error opening /proc/%s/gid_map", pidString) + } + defer gidmap.Close() + if _, err := fmt.Fprintf(gidmap, "%s", g.String()); err != nil { + fmt.Fprintf(continueWrite, "error writing %q to /proc/%s/gid_map: %v", g.String(), pidString, err) + return errors.Wrapf(err, "error writing %q to /proc/%s/gid_map", g.String(), pidString) + } + } + } + + if len(c.UidMappings) > 0 { + // Build the UID map, since writing to the proc file has to be done all at once. + u := new(bytes.Buffer) + for _, m := range c.UidMappings { + fmt.Fprintf(u, "%d %d %d\n", m.ContainerID, m.HostID, m.Size) + } + uidmapSet := false + // Set the GID map. + if c.UseNewuidmap { + cmd := exec.Command("newuidmap", append([]string{pidString}, strings.Fields(strings.Replace(u.String(), "\n", " ", -1))...)...) + u.Reset() + cmd.Stdout = u + cmd.Stderr = u + err := cmd.Run() + if err == nil { + uidmapSet = true + } else { + logrus.Warnf("Error running newuidmap: %v: %s", err, u.String()) + logrus.Warnf("Falling back to single mapping") + u.Reset() + u.Write([]byte(fmt.Sprintf("0 %d 1\n", os.Geteuid()))) + } + } + if !uidmapSet { + uidmap, err := os.OpenFile(fmt.Sprintf("/proc/%s/uid_map", pidString), os.O_TRUNC|os.O_WRONLY, 0) + if err != nil { + fmt.Fprintf(continueWrite, "error opening /proc/%s/uid_map: %v", pidString, err) + return errors.Wrapf(err, "error opening /proc/%s/uid_map", pidString) + } + defer uidmap.Close() + if _, err := fmt.Fprintf(uidmap, "%s", u.String()); err != nil { + fmt.Fprintf(continueWrite, "error writing %q to /proc/%s/uid_map: %v", u.String(), pidString, err) + return errors.Wrapf(err, "error writing %q to /proc/%s/uid_map", u.String(), pidString) + } + } + } + } + + if c.OOMScoreAdj != nil { + oomScoreAdj, err := os.OpenFile(fmt.Sprintf("/proc/%s/oom_score_adj", pidString), os.O_TRUNC|os.O_WRONLY, 0) + if err != nil { + fmt.Fprintf(continueWrite, "error opening oom_score_adj: %v", err) + return errors.Wrapf(err, "error opening /proc/%s/oom_score_adj", pidString) + } + defer oomScoreAdj.Close() + if _, err := fmt.Fprintf(oomScoreAdj, "%d\n", *c.OOMScoreAdj); err != nil { + fmt.Fprintf(continueWrite, "error writing \"%d\" to oom_score_adj: %v", c.OOMScoreAdj, err) + return errors.Wrapf(err, "error writing \"%d\" to /proc/%s/oom_score_adj", c.OOMScoreAdj, pidString) + } + } + // Run any additional setup that we want to do before the child starts running proper. + if c.Hook != nil { + if err = c.Hook(pid); err != nil { + fmt.Fprintf(continueWrite, "hook error: %v", err) + return err + } + } + + return nil +} + +func (c *Cmd) Run() error { + if err := c.Start(); err != nil { + return err + } + return c.Wait() +} + +func (c *Cmd) CombinedOutput() ([]byte, error) { + return nil, errors.New("unshare: CombinedOutput() not implemented") +} + +func (c *Cmd) Output() ([]byte, error) { + return nil, errors.New("unshare: Output() not implemented") +} + +var ( + isRootlessOnce sync.Once + isRootless bool +) + +const ( + // UsernsEnvName is the environment variable, if set indicates in rootless mode + UsernsEnvName = "_CONTAINERS_USERNS_CONFIGURED" +) + +// IsRootless tells us if we are running in rootless mode +func IsRootless() bool { + isRootlessOnce.Do(func() { + isRootless = getRootlessUID() != 0 || getenv(UsernsEnvName) != "" + }) + return isRootless +} + +// GetRootlessUID returns the UID of the user in the parent userNS +func GetRootlessUID() int { + uidEnv := getenv("_CONTAINERS_ROOTLESS_UID") + if uidEnv != "" { + u, _ := strconv.Atoi(uidEnv) + return u + } + return os.Getuid() +} + +// RootlessEnv returns the environment settings for the rootless containers +func RootlessEnv() []string { + return append(os.Environ(), UsernsEnvName+"=done") +} + +type Runnable interface { + Run() error +} + +func bailOnError(err error, format string, a ...interface{}) { // nolint: golint,goprintffuncname + if err != nil { + if format != "" { + logrus.Errorf("%s: %v", fmt.Sprintf(format, a...), err) + } else { + logrus.Errorf("%v", err) + } + os.Exit(1) + } +} + +// MaybeReexecUsingUserNamespace re-exec the process in a new namespace +func MaybeReexecUsingUserNamespace(evenForRoot bool) { + // If we've already been through this once, no need to try again. + if os.Geteuid() == 0 && IsRootless() { + return + } + + var uidNum, gidNum uint64 + // Figure out who we are. + me, err := user.Current() + if !os.IsNotExist(err) { + bailOnError(err, "error determining current user") + uidNum, err = strconv.ParseUint(me.Uid, 10, 32) + bailOnError(err, "error parsing current UID %s", me.Uid) + gidNum, err = strconv.ParseUint(me.Gid, 10, 32) + bailOnError(err, "error parsing current GID %s", me.Gid) + } + + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + // ID mappings to use to reexec ourselves. + var uidmap, gidmap []specs.LinuxIDMapping + if uidNum != 0 || evenForRoot { + // Read the set of ID mappings that we're allowed to use. Each + // range in /etc/subuid and /etc/subgid file is a starting host + // ID and a range size. + uidmap, gidmap, err = GetSubIDMappings(me.Username, me.Username) + if err != nil { + logrus.Warnf("Reading allowed ID mappings: %v", err) + } + if len(uidmap) == 0 { + logrus.Warnf("Found no UID ranges set aside for user %q in /etc/subuid.", me.Username) + } + if len(gidmap) == 0 { + logrus.Warnf("Found no GID ranges set aside for user %q in /etc/subgid.", me.Username) + } + // Map our UID and GID, then the subuid and subgid ranges, + // consecutively, starting at 0, to get the mappings to use for + // a copy of ourselves. + uidmap = append([]specs.LinuxIDMapping{{HostID: uint32(uidNum), ContainerID: 0, Size: 1}}, uidmap...) + gidmap = append([]specs.LinuxIDMapping{{HostID: uint32(gidNum), ContainerID: 0, Size: 1}}, gidmap...) + var rangeStart uint32 + for i := range uidmap { + uidmap[i].ContainerID = rangeStart + rangeStart += uidmap[i].Size + } + rangeStart = 0 + for i := range gidmap { + gidmap[i].ContainerID = rangeStart + rangeStart += gidmap[i].Size + } + } else { + // If we have CAP_SYS_ADMIN, then we don't need to create a new namespace in order to be able + // to use unshare(), so don't bother creating a new user namespace at this point. + capabilities, err := capability.NewPid(0) + bailOnError(err, "Reading the current capabilities sets") + if capabilities.Get(capability.EFFECTIVE, capability.CAP_SYS_ADMIN) { + return + } + // Read the set of ID mappings that we're currently using. + uidmap, gidmap, err = GetHostIDMappings("") + bailOnError(err, "Reading current ID mappings") + // Just reuse them. + for i := range uidmap { + uidmap[i].HostID = uidmap[i].ContainerID + } + for i := range gidmap { + gidmap[i].HostID = gidmap[i].ContainerID + } + } + + // Unlike most uses of reexec or unshare, we're using a name that + // _won't_ be recognized as a registered reexec handler, since we + // _want_ to fall through reexec.Init() to the normal main(). + cmd := Command(append([]string{fmt.Sprintf("%s-in-a-user-namespace", os.Args[0])}, os.Args[1:]...)...) + + // If, somehow, we don't become UID 0 in our child, indicate that the child shouldn't try again. + err = os.Setenv(UsernsEnvName, "1") + bailOnError(err, "error setting %s=1 in environment", UsernsEnvName) + + // Set the default isolation type to use the "rootless" method. + if _, present := os.LookupEnv("BUILDAH_ISOLATION"); !present { + if err = os.Setenv("BUILDAH_ISOLATION", "rootless"); err != nil { + if err := os.Setenv("BUILDAH_ISOLATION", "rootless"); err != nil { + logrus.Errorf("Setting BUILDAH_ISOLATION=rootless in environment: %v", err) + os.Exit(1) + } + } + } + + // Reuse our stdio. + cmd.Stdin = os.Stdin + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + + // Set up a new user namespace with the ID mapping. + cmd.UnshareFlags = syscall.CLONE_NEWUSER | syscall.CLONE_NEWNS + cmd.UseNewuidmap = uidNum != 0 + cmd.UidMappings = uidmap + cmd.UseNewgidmap = uidNum != 0 + cmd.GidMappings = gidmap + cmd.GidMappingsEnableSetgroups = true + + // Finish up. + logrus.Debugf("Running %+v with environment %+v, UID map %+v, and GID map %+v", cmd.Cmd.Args, os.Environ(), cmd.UidMappings, cmd.GidMappings) + ExecRunnable(cmd, nil) +} + +// ExecRunnable runs the specified unshare command, captures its exit status, +// and exits with the same status. +func ExecRunnable(cmd Runnable, cleanup func()) { + exit := func(status int) { + if cleanup != nil { + cleanup() + } + os.Exit(status) + } + if err := cmd.Run(); err != nil { + if exitError, ok := errors.Cause(err).(*exec.ExitError); ok { + if exitError.ProcessState.Exited() { + if waitStatus, ok := exitError.ProcessState.Sys().(syscall.WaitStatus); ok { + if waitStatus.Exited() { + logrus.Errorf("%v", exitError) + exit(waitStatus.ExitStatus()) + } + if waitStatus.Signaled() { + logrus.Errorf("%v", exitError) + exit(int(waitStatus.Signal()) + 128) + } + } + } + } + logrus.Errorf("%v", err) + logrus.Errorf("(Unable to determine exit status)") + exit(1) + } + exit(0) +} + +// getHostIDMappings reads mappings from the named node under /proc. +func getHostIDMappings(path string) ([]specs.LinuxIDMapping, error) { + var mappings []specs.LinuxIDMapping + f, err := os.Open(path) + if err != nil { + return nil, errors.Wrapf(err, "Reading ID mappings from %q", path) + } + defer f.Close() + scanner := bufio.NewScanner(f) + for scanner.Scan() { + line := scanner.Text() + fields := strings.Fields(line) + if len(fields) != 3 { + return nil, errors.Errorf("line %q from %q has %d fields, not 3", line, path, len(fields)) + } + cid, err := strconv.ParseUint(fields[0], 10, 32) + if err != nil { + return nil, errors.Wrapf(err, "error parsing container ID value %q from line %q in %q", fields[0], line, path) + } + hid, err := strconv.ParseUint(fields[1], 10, 32) + if err != nil { + return nil, errors.Wrapf(err, "error parsing host ID value %q from line %q in %q", fields[1], line, path) + } + size, err := strconv.ParseUint(fields[2], 10, 32) + if err != nil { + return nil, errors.Wrapf(err, "error parsing size value %q from line %q in %q", fields[2], line, path) + } + mappings = append(mappings, specs.LinuxIDMapping{ContainerID: uint32(cid), HostID: uint32(hid), Size: uint32(size)}) + } + return mappings, nil +} + +// GetHostIDMappings reads mappings for the specified process (or the current +// process if pid is "self" or an empty string) from the kernel. +func GetHostIDMappings(pid string) ([]specs.LinuxIDMapping, []specs.LinuxIDMapping, error) { + if pid == "" { + pid = "self" + } + uidmap, err := getHostIDMappings(fmt.Sprintf("/proc/%s/uid_map", pid)) + if err != nil { + return nil, nil, err + } + gidmap, err := getHostIDMappings(fmt.Sprintf("/proc/%s/gid_map", pid)) + if err != nil { + return nil, nil, err + } + return uidmap, gidmap, nil +} + +// GetSubIDMappings reads mappings from /etc/subuid and /etc/subgid. +func GetSubIDMappings(user, group string) ([]specs.LinuxIDMapping, []specs.LinuxIDMapping, error) { + mappings, err := idtools.NewIDMappings(user, group) + if err != nil { + return nil, nil, errors.Wrapf(err, "Reading subuid mappings for user %q and subgid mappings for group %q", user, group) + } + var uidmap, gidmap []specs.LinuxIDMapping + for _, m := range mappings.UIDs() { + uidmap = append(uidmap, specs.LinuxIDMapping{ + ContainerID: uint32(m.ContainerID), + HostID: uint32(m.HostID), + Size: uint32(m.Size), + }) + } + for _, m := range mappings.GIDs() { + gidmap = append(gidmap, specs.LinuxIDMapping{ + ContainerID: uint32(m.ContainerID), + HostID: uint32(m.HostID), + Size: uint32(m.Size), + }) + } + return uidmap, gidmap, nil +} + +// ParseIDMappings parses mapping triples. +func ParseIDMappings(uidmap, gidmap []string) ([]idtools.IDMap, []idtools.IDMap, error) { + uid, err := idtools.ParseIDMap(uidmap, "userns-uid-map") + if err != nil { + return nil, nil, err + } + gid, err := idtools.ParseIDMap(gidmap, "userns-gid-map") + if err != nil { + return nil, nil, err + } + return uid, gid, nil +} diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported.go new file mode 100644 index 00000000000..bf4d567b8af --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported.go @@ -0,0 +1,45 @@ +// +build !linux + +package unshare + +import ( + "os" + + "github.com/containers/storage/pkg/idtools" + "github.com/opencontainers/runtime-spec/specs-go" +) + +const ( + // UsernsEnvName is the environment variable, if set indicates in rootless mode + UsernsEnvName = "_CONTAINERS_USERNS_CONFIGURED" +) + +// IsRootless tells us if we are running in rootless mode +func IsRootless() bool { + return false +} + +// GetRootlessUID returns the UID of the user in the parent userNS +func GetRootlessUID() int { + return os.Getuid() +} + +// RootlessEnv returns the environment settings for the rootless containers +func RootlessEnv() []string { + return append(os.Environ(), UsernsEnvName+"=") +} + +// MaybeReexecUsingUserNamespace re-exec the process in a new namespace +func MaybeReexecUsingUserNamespace(evenForRoot bool) { +} + +// GetHostIDMappings reads mappings for the specified process (or the current +// process if pid is "self" or an empty string) from the kernel. +func GetHostIDMappings(pid string) ([]specs.LinuxIDMapping, []specs.LinuxIDMapping, error) { + return nil, nil, nil +} + +// ParseIDMappings parses mapping triples. +func ParseIDMappings(uidmap, gidmap []string) ([]idtools.IDMap, []idtools.IDMap, error) { + return nil, nil, nil +} diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported_cgo.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported_cgo.go new file mode 100644 index 00000000000..d5f2d22a801 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported_cgo.go @@ -0,0 +1,10 @@ +// +build !linux,cgo + +package unshare + +// Go refuses to compile a subpackage with CGO_ENABLED=1 if there is a *.c file but no 'import "C"'. +// OTOH if we did have an 'import "C"', the Linux-only code would fail to compile. +// So, satisfy the Go compiler by using import "C" but #ifdef-ing out all of the code. + +// #cgo CPPFLAGS: -DUNSHARE_NO_CODE_AT_ALL +import "C" diff --git a/vendor/github.com/containers/storage/storage.conf b/vendor/github.com/containers/storage/storage.conf new file mode 100644 index 00000000000..c17dd6d37ea --- /dev/null +++ b/vendor/github.com/containers/storage/storage.conf @@ -0,0 +1,210 @@ +# This file is is the configuration file for all tools +# that use the containers/storage library. The storage.conf file +# overrides all other storage.conf files. Container engines using the +# container/storage library do not inherit fields from other storage.conf +# files. +# +# Note: The storage.conf file overrides other storage.conf files based on this precedence: +# /usr/containers/storage.conf +# /etc/containers/storage.conf +# $HOME/.config/containers/storage.conf +# $XDG_CONFIG_HOME/containers/storage.conf (If XDG_CONFIG_HOME is set) +# See man 5 containers-storage.conf for more information +# The "container storage" table contains all of the server options. +[storage] + +# Default Storage Driver, Must be set for proper operation. +driver = "overlay" + +# Temporary storage location +runroot = "/run/containers/storage" + +# Primary Read/Write location of container storage +# When changing the graphroot location on an SELINUX system, you must +# ensure the labeling matches the default locations labels with the +# following commands: +# semanage fcontext -a -e /var/lib/containers/storage /NEWSTORAGEPATH +# restorecon -R -v /NEWSTORAGEPATH +graphroot = "/var/lib/containers/storage" + + +# Storage path for rootless users +# +# rootless_storage_path = "$HOME/.local/share/containers/storage" + +[storage.options] +# Storage options to be passed to underlying storage drivers + +# AdditionalImageStores is used to pass paths to additional Read/Only image stores +# Must be comma separated list. +additionalimagestores = [ +] + +# Remap-UIDs/GIDs is the mapping from UIDs/GIDs as they should appear inside of +# a container, to the UIDs/GIDs as they should appear outside of the container, +# and the length of the range of UIDs/GIDs. Additional mapped sets can be +# listed and will be heeded by libraries, but there are limits to the number of +# mappings which the kernel will allow when you later attempt to run a +# container. +# +# remap-uids = 0:1668442479:65536 +# remap-gids = 0:1668442479:65536 + +# Remap-User/Group is a user name which can be used to look up one or more UID/GID +# ranges in the /etc/subuid or /etc/subgid file. Mappings are set up starting +# with an in-container ID of 0 and then a host-level ID taken from the lowest +# range that matches the specified name, and using the length of that range. +# Additional ranges are then assigned, using the ranges which specify the +# lowest host-level IDs first, to the lowest not-yet-mapped in-container ID, +# until all of the entries have been used for maps. +# +# remap-user = "containers" +# remap-group = "containers" + +# Root-auto-userns-user is a user name which can be used to look up one or more UID/GID +# ranges in the /etc/subuid and /etc/subgid file. These ranges will be partitioned +# to containers configured to create automatically a user namespace. Containers +# configured to automatically create a user namespace can still overlap with containers +# having an explicit mapping set. +# This setting is ignored when running as rootless. +# root-auto-userns-user = "storage" +# +# Auto-userns-min-size is the minimum size for a user namespace created automatically. +# auto-userns-min-size=1024 +# +# Auto-userns-max-size is the minimum size for a user namespace created automatically. +# auto-userns-max-size=65536 + +[storage.options.overlay] +# ignore_chown_errors can be set to allow a non privileged user running with +# a single UID within a user namespace to run containers. The user can pull +# and use any image even those with multiple uids. Note multiple UIDs will be +# squashed down to the default uid in the container. These images will have no +# separation between the users in the container. Only supported for the overlay +# and vfs drivers. +#ignore_chown_errors = "false" + +# Inodes is used to set a maximum inodes of the container image. +# inodes = "" + +# Path to an helper program to use for mounting the file system instead of mounting it +# directly. +#mount_program = "/usr/bin/fuse-overlayfs" + +# mountopt specifies comma separated list of extra mount options +mountopt = "nodev" + +# Set to skip a PRIVATE bind mount on the storage home directory. +# skip_mount_home = "false" + +# Size is used to set a maximum size of the container image. +# size = "" + +# ForceMask specifies the permissions mask that is used for new files and +# directories. +# +# The values "shared" and "private" are accepted. +# Octal permission masks are also accepted. +# +# "": No value specified. +# All files/directories, get set with the permissions identified within the +# image. +# "private": it is equivalent to 0700. +# All files/directories get set with 0700 permissions. The owner has rwx +# access to the files. No other users on the system can access the files. +# This setting could be used with networked based homedirs. +# "shared": it is equivalent to 0755. +# The owner has rwx access to the files and everyone else can read, access +# and execute them. This setting is useful for sharing containers storage +# with other users. For instance have a storage owned by root but shared +# to rootless users as an additional store. +# NOTE: All files within the image are made readable and executable by any +# user on the system. Even /etc/shadow within your image is now readable by +# any user. +# +# OCTAL: Users can experiment with other OCTAL Permissions. +# +# Note: The force_mask Flag is an experimental feature, it could change in the +# future. When "force_mask" is set the original permission mask is stored in +# the "user.containers.override_stat" xattr and the "mount_program" option must +# be specified. Mount programs like "/usr/bin/fuse-overlayfs" present the +# extended attribute permissions to processes within containers rather then the +# "force_mask" permissions. +# +# force_mask = "" + +[storage.options.thinpool] +# Storage Options for thinpool + +# autoextend_percent determines the amount by which pool needs to be +# grown. This is specified in terms of % of pool size. So a value of 20 means +# that when threshold is hit, pool will be grown by 20% of existing +# pool size. +# autoextend_percent = "20" + +# autoextend_threshold determines the pool extension threshold in terms +# of percentage of pool size. For example, if threshold is 60, that means when +# pool is 60% full, threshold has been hit. +# autoextend_threshold = "80" + +# basesize specifies the size to use when creating the base device, which +# limits the size of images and containers. +# basesize = "10G" + +# blocksize specifies a custom blocksize to use for the thin pool. +# blocksize="64k" + +# directlvm_device specifies a custom block storage device to use for the +# thin pool. Required if you setup devicemapper. +# directlvm_device = "" + +# directlvm_device_force wipes device even if device already has a filesystem. +# directlvm_device_force = "True" + +# fs specifies the filesystem type to use for the base device. +# fs="xfs" + +# log_level sets the log level of devicemapper. +# 0: LogLevelSuppress 0 (Default) +# 2: LogLevelFatal +# 3: LogLevelErr +# 4: LogLevelWarn +# 5: LogLevelNotice +# 6: LogLevelInfo +# 7: LogLevelDebug +# log_level = "7" + +# min_free_space specifies the min free space percent in a thin pool require for +# new device creation to succeed. Valid values are from 0% - 99%. +# Value 0% disables +# min_free_space = "10%" + +# mkfsarg specifies extra mkfs arguments to be used when creating the base +# device. +# mkfsarg = "" + +# metadata_size is used to set the `pvcreate --metadatasize` options when +# creating thin devices. Default is 128k +# metadata_size = "" + +# Size is used to set a maximum size of the container image. +# size = "" + +# use_deferred_removal marks devicemapper block device for deferred removal. +# If the thinpool is in use when the driver attempts to remove it, the driver +# tells the kernel to remove it as soon as possible. Note this does not free +# up the disk space, use deferred deletion to fully remove the thinpool. +# use_deferred_removal = "True" + +# use_deferred_deletion marks thinpool device for deferred deletion. +# If the device is busy when the driver attempts to delete it, the driver +# will attempt to delete device every 30 seconds until successful. +# If the program using the driver exits, the driver will continue attempting +# to cleanup the next time the driver is used. Deferred deletion permanently +# deletes the device and all data stored in device will be lost. +# use_deferred_deletion = "True" + +# xfs_nospace_max_retries specifies the maximum number of retries XFS should +# attempt to complete IO when ENOSPC (no space) error is returned by +# underlying storage device. +# xfs_nospace_max_retries = "0" diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go new file mode 100644 index 00000000000..225015e4382 --- /dev/null +++ b/vendor/github.com/containers/storage/store.go @@ -0,0 +1,3753 @@ +package storage + +import ( + "encoding/base64" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "strings" + "sync" + "time" + + // register all of the built-in drivers + _ "github.com/containers/storage/drivers/register" + + drivers "github.com/containers/storage/drivers" + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/directory" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/ioutils" + "github.com/containers/storage/pkg/parsers" + "github.com/containers/storage/pkg/stringid" + "github.com/containers/storage/pkg/stringutils" + "github.com/containers/storage/pkg/system" + "github.com/containers/storage/types" + "github.com/hashicorp/go-multierror" + digest "github.com/opencontainers/go-digest" + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/pkg/errors" +) + +type updateNameOperation int + +const ( + setNames updateNameOperation = iota + addNames + removeNames +) + +var ( + stores []*store + storesLock sync.Mutex +) + +// ROFileBasedStore wraps up the methods of the various types of file-based +// data stores that we implement which are needed for both read-only and +// read-write files. +type ROFileBasedStore interface { + Locker + + // Load reloads the contents of the store from disk. It should be called + // with the lock held. + Load() error + + // ReloadIfChanged reloads the contents of the store from disk if it is changed. + ReloadIfChanged() error +} + +// RWFileBasedStore wraps up the methods of various types of file-based data +// stores that we implement using read-write files. +type RWFileBasedStore interface { + // Save saves the contents of the store to disk. It should be called with + // the lock held, and Touch() should be called afterward before releasing the + // lock. + Save() error +} + +// FileBasedStore wraps up the common methods of various types of file-based +// data stores that we implement. +type FileBasedStore interface { + ROFileBasedStore + RWFileBasedStore +} + +// ROMetadataStore wraps a method for reading metadata associated with an ID. +type ROMetadataStore interface { + // Metadata reads metadata associated with an item with the specified ID. + Metadata(id string) (string, error) +} + +// RWMetadataStore wraps a method for setting metadata associated with an ID. +type RWMetadataStore interface { + // SetMetadata updates the metadata associated with the item with the specified ID. + SetMetadata(id, metadata string) error +} + +// MetadataStore wraps up methods for getting and setting metadata associated with IDs. +type MetadataStore interface { + ROMetadataStore + RWMetadataStore +} + +// An ROBigDataStore wraps up the read-only big-data related methods of the +// various types of file-based lookaside stores that we implement. +type ROBigDataStore interface { + // BigData retrieves a (potentially large) piece of data associated with + // this ID, if it has previously been set. + BigData(id, key string) ([]byte, error) + + // BigDataSize retrieves the size of a (potentially large) piece of + // data associated with this ID, if it has previously been set. + BigDataSize(id, key string) (int64, error) + + // BigDataDigest retrieves the digest of a (potentially large) piece of + // data associated with this ID, if it has previously been set. + BigDataDigest(id, key string) (digest.Digest, error) + + // BigDataNames() returns a list of the names of previously-stored pieces of + // data. + BigDataNames(id string) ([]string, error) +} + +// A RWImageBigDataStore wraps up how we store big-data associated with images. +type RWImageBigDataStore interface { + // SetBigData stores a (potentially large) piece of data associated + // with this ID. + // Pass github.com/containers/image/manifest.Digest as digestManifest + // to allow ByDigest to find images by their correct digests. + SetBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error +} + +// A ContainerBigDataStore wraps up how we store big-data associated with containers. +type ContainerBigDataStore interface { + ROBigDataStore + // SetBigData stores a (potentially large) piece of data associated + // with this ID. + SetBigData(id, key string, data []byte) error +} + +// A ROLayerBigDataStore wraps up how we store RO big-data associated with layers. +type ROLayerBigDataStore interface { + // SetBigData stores a (potentially large) piece of data associated + // with this ID. + BigData(id, key string) (io.ReadCloser, error) + + // BigDataNames() returns a list of the names of previously-stored pieces of + // data. + BigDataNames(id string) ([]string, error) +} + +// A RWLayerBigDataStore wraps up how we store big-data associated with layers. +type RWLayerBigDataStore interface { + // SetBigData stores a (potentially large) piece of data associated + // with this ID. + SetBigData(id, key string, data io.Reader) error +} + +// A LayerBigDataStore wraps up how we store big-data associated with layers. +type LayerBigDataStore interface { + ROLayerBigDataStore + RWLayerBigDataStore +} + +// A FlaggableStore can have flags set and cleared on items which it manages. +type FlaggableStore interface { + // ClearFlag removes a named flag from an item in the store. + ClearFlag(id string, flag string) error + + // SetFlag sets a named flag and its value on an item in the store. + SetFlag(id string, flag string, value interface{}) error +} + +type StoreOptions = types.StoreOptions + +// Store wraps up the various types of file-based stores that we use into a +// singleton object that initializes and manages them all together. +type Store interface { + // RunRoot, GraphRoot, GraphDriverName, and GraphOptions retrieve + // settings that were passed to GetStore() when the object was created. + RunRoot() string + GraphRoot() string + GraphDriverName() string + GraphOptions() []string + UIDMap() []idtools.IDMap + GIDMap() []idtools.IDMap + + // GraphDriver obtains and returns a handle to the graph Driver object used + // by the Store. + GraphDriver() (drivers.Driver, error) + + // CreateLayer creates a new layer in the underlying storage driver, + // optionally having the specified ID (one will be assigned if none is + // specified), with the specified layer (or no layer) as its parent, + // and with optional names. (The writeable flag is ignored.) + CreateLayer(id, parent string, names []string, mountLabel string, writeable bool, options *LayerOptions) (*Layer, error) + + // PutLayer combines the functions of CreateLayer and ApplyDiff, + // marking the layer for automatic removal if applying the diff fails + // for any reason. + // + // Note that we do some of this work in a child process. The calling + // process's main() function needs to import our pkg/reexec package and + // should begin with something like this in order to allow us to + // properly start that child process: + // if reexec.Init() { + // return + // } + PutLayer(id, parent string, names []string, mountLabel string, writeable bool, options *LayerOptions, diff io.Reader) (*Layer, int64, error) + + // CreateImage creates a new image, optionally with the specified ID + // (one will be assigned if none is specified), with optional names, + // referring to a specified image, and with optional metadata. An + // image is a record which associates the ID of a layer with a + // additional bookkeeping information which the library stores for the + // convenience of its caller. + CreateImage(id string, names []string, layer, metadata string, options *ImageOptions) (*Image, error) + + // CreateContainer creates a new container, optionally with the + // specified ID (one will be assigned if none is specified), with + // optional names, using the specified image's top layer as the basis + // for the container's layer, and assigning the specified ID to that + // layer (one will be created if none is specified). A container is a + // layer which is associated with additional bookkeeping information + // which the library stores for the convenience of its caller. + CreateContainer(id string, names []string, image, layer, metadata string, options *ContainerOptions) (*Container, error) + + // Metadata retrieves the metadata which is associated with a layer, + // image, or container (whichever the passed-in ID refers to). + Metadata(id string) (string, error) + + // SetMetadata updates the metadata which is associated with a layer, + // image, or container (whichever the passed-in ID refers to) to match + // the specified value. The metadata value can be retrieved at any + // time using Metadata, or using Layer, Image, or Container and reading + // the object directly. + SetMetadata(id, metadata string) error + + // Exists checks if there is a layer, image, or container which has the + // passed-in ID or name. + Exists(id string) bool + + // Status asks for a status report, in the form of key-value pairs, + // from the underlying storage driver. The contents vary from driver + // to driver. + Status() ([][2]string, error) + + // Delete removes the layer, image, or container which has the + // passed-in ID or name. Note that no safety checks are performed, so + // this can leave images with references to layers which do not exist, + // and layers with references to parents which no longer exist. + Delete(id string) error + + // DeleteLayer attempts to remove the specified layer. If the layer is the + // parent of any other layer, or is referred to by any images, it will return + // an error. + DeleteLayer(id string) error + + // DeleteImage removes the specified image if it is not referred to by + // any containers. If its top layer is then no longer referred to by + // any other images and is not the parent of any other layers, its top + // layer will be removed. If that layer's parent is no longer referred + // to by any other images and is not the parent of any other layers, + // then it, too, will be removed. This procedure will be repeated + // until a layer which should not be removed, or the base layer, is + // reached, at which point the list of removed layers is returned. If + // the commit argument is false, the image and layers are not removed, + // but the list of layers which would be removed is still returned. + DeleteImage(id string, commit bool) (layers []string, err error) + + // DeleteContainer removes the specified container and its layer. If + // there is no matching container, or if the container exists but its + // layer does not, an error will be returned. + DeleteContainer(id string) error + + // Wipe removes all known layers, images, and containers. + Wipe() error + + // MountImage mounts an image to temp directory and returns the mount point. + // MountImage allows caller to mount an image. Images will always + // be mounted read/only + MountImage(id string, mountOptions []string, mountLabel string) (string, error) + + // Unmount attempts to unmount an image, given an ID. + // Returns whether or not the layer is still mounted. + UnmountImage(id string, force bool) (bool, error) + + // Mount attempts to mount a layer, image, or container for access, and + // returns the pathname if it succeeds. + // Note if the mountLabel == "", the default label for the container + // will be used. + // + // Note that we do some of this work in a child process. The calling + // process's main() function needs to import our pkg/reexec package and + // should begin with something like this in order to allow us to + // properly start that child process: + // if reexec.Init() { + // return + // } + Mount(id, mountLabel string) (string, error) + + // Unmount attempts to unmount a layer, image, or container, given an ID, a + // name, or a mount path. Returns whether or not the layer is still mounted. + Unmount(id string, force bool) (bool, error) + + // Mounted returns number of times the layer has been mounted. + Mounted(id string) (int, error) + + // Changes returns a summary of the changes which would need to be made + // to one layer to make its contents the same as a second layer. If + // the first layer is not specified, the second layer's parent is + // assumed. Each Change structure contains a Path relative to the + // layer's root directory, and a Kind which is either ChangeAdd, + // ChangeModify, or ChangeDelete. + Changes(from, to string) ([]archive.Change, error) + + // DiffSize returns a count of the size of the tarstream which would + // specify the changes returned by Changes. + DiffSize(from, to string) (int64, error) + + // Diff returns the tarstream which would specify the changes returned + // by Changes. If options are passed in, they can override default + // behaviors. + Diff(from, to string, options *DiffOptions) (io.ReadCloser, error) + + // ApplyDiff applies a tarstream to a layer. Information about the + // tarstream is cached with the layer. Typically, a layer which is + // populated using a tarstream will be expected to not be modified in + // any other way, either before or after the diff is applied. + // + // Note that we do some of this work in a child process. The calling + // process's main() function needs to import our pkg/reexec package and + // should begin with something like this in order to allow us to + // properly start that child process: + // if reexec.Init() { + // return + // } + ApplyDiff(to string, diff io.Reader) (int64, error) + + // ApplyDiffer applies a diff to a layer. + // It is the caller responsibility to clean the staging directory if it is not + // successfully applied with ApplyDiffFromStagingDirectory. + ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) + + // ApplyDiffFromStagingDirectory uses stagingDirectory to create the diff. + ApplyDiffFromStagingDirectory(to, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffOpts) error + + // CleanupStagingDirectory cleanups the staging directory. It can be used to cleanup the staging directory on errors + CleanupStagingDirectory(stagingDirectory string) error + + // DifferTarget gets the path to the differ target. + DifferTarget(id string) (string, error) + + // LayersByCompressedDigest returns a slice of the layers with the + // specified compressed digest value recorded for them. + LayersByCompressedDigest(d digest.Digest) ([]Layer, error) + + // LayersByUncompressedDigest returns a slice of the layers with the + // specified uncompressed digest value recorded for them. + LayersByUncompressedDigest(d digest.Digest) ([]Layer, error) + + // LayerSize returns a cached approximation of the layer's size, or -1 + // if we don't have a value on hand. + LayerSize(id string) (int64, error) + + // LayerParentOwners returns the UIDs and GIDs of owners of parents of + // the layer's mountpoint for which the layer's UID and GID maps (if + // any are defined) don't contain corresponding IDs. + LayerParentOwners(id string) ([]int, []int, error) + + // Layers returns a list of the currently known layers. + Layers() ([]Layer, error) + + // Images returns a list of the currently known images. + Images() ([]Image, error) + + // Containers returns a list of the currently known containers. + Containers() ([]Container, error) + + // Names returns the list of names for a layer, image, or container. + Names(id string) ([]string, error) + + // Free removes the store from the list of stores + Free() + + // SetNames changes the list of names for a layer, image, or container. + // Duplicate names are removed from the list automatically. + // Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`. + SetNames(id string, names []string) error + + // AddNames adds the list of names for a layer, image, or container. + // Duplicate names are removed from the list automatically. + AddNames(id string, names []string) error + + // RemoveNames removes the list of names for a layer, image, or container. + // Duplicate names are removed from the list automatically. + RemoveNames(id string, names []string) error + + // ListImageBigData retrieves a list of the (possibly large) chunks of + // named data associated with an image. + ListImageBigData(id string) ([]string, error) + + // ImageBigData retrieves a (possibly large) chunk of named data + // associated with an image. + ImageBigData(id, key string) ([]byte, error) + + // ImageBigDataSize retrieves the size of a (possibly large) chunk + // of named data associated with an image. + ImageBigDataSize(id, key string) (int64, error) + + // ImageBigDataDigest retrieves the digest of a (possibly large) chunk + // of named data associated with an image. + ImageBigDataDigest(id, key string) (digest.Digest, error) + + // SetImageBigData stores a (possibly large) chunk of named data + // associated with an image. Pass + // github.com/containers/image/manifest.Digest as digestManifest to + // allow ImagesByDigest to find images by their correct digests. + SetImageBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error + + // ListLayerBigData retrieves a list of the (possibly large) chunks of + // named data associated with an layer. + ListLayerBigData(id string) ([]string, error) + + // LayerBigData retrieves a (possibly large) chunk of named data + // associated with a layer. + LayerBigData(id, key string) (io.ReadCloser, error) + + // SetLayerBigData stores a (possibly large) chunk of named data + // associated with a layer. + SetLayerBigData(id, key string, data io.Reader) error + + // ImageSize computes the size of the image's layers and ancillary data. + ImageSize(id string) (int64, error) + + // ListContainerBigData retrieves a list of the (possibly large) chunks of + // named data associated with a container. + ListContainerBigData(id string) ([]string, error) + + // ContainerBigData retrieves a (possibly large) chunk of named data + // associated with a container. + ContainerBigData(id, key string) ([]byte, error) + + // ContainerBigDataSize retrieves the size of a (possibly large) + // chunk of named data associated with a container. + ContainerBigDataSize(id, key string) (int64, error) + + // ContainerBigDataDigest retrieves the digest of a (possibly large) + // chunk of named data associated with a container. + ContainerBigDataDigest(id, key string) (digest.Digest, error) + + // SetContainerBigData stores a (possibly large) chunk of named data + // associated with a container. + SetContainerBigData(id, key string, data []byte) error + + // ContainerSize computes the size of the container's layer and ancillary + // data. Warning: this is a potentially expensive operation. + ContainerSize(id string) (int64, error) + + // Layer returns a specific layer. + Layer(id string) (*Layer, error) + + // Image returns a specific image. + Image(id string) (*Image, error) + + // ImagesByTopLayer returns a list of images which reference the specified + // layer as their top layer. They will have different IDs and names + // and may have different metadata, big data items, and flags. + ImagesByTopLayer(id string) ([]*Image, error) + + // ImagesByDigest returns a list of images which contain a big data item + // named ImageDigestBigDataKey whose contents have the specified digest. + ImagesByDigest(d digest.Digest) ([]*Image, error) + + // Container returns a specific container. + Container(id string) (*Container, error) + + // ContainerByLayer returns a specific container based on its layer ID or + // name. + ContainerByLayer(id string) (*Container, error) + + // ContainerDirectory returns a path of a directory which the caller + // can use to store data, specific to the container, which the library + // does not directly manage. The directory will be deleted when the + // container is deleted. + ContainerDirectory(id string) (string, error) + + // SetContainerDirectoryFile is a convenience function which stores + // a piece of data in the specified file relative to the container's + // directory. + SetContainerDirectoryFile(id, file string, data []byte) error + + // FromContainerDirectory is a convenience function which reads + // the contents of the specified file relative to the container's + // directory. + FromContainerDirectory(id, file string) ([]byte, error) + + // ContainerRunDirectory returns a path of a directory which the + // caller can use to store data, specific to the container, which the + // library does not directly manage. The directory will be deleted + // when the host system is restarted. + ContainerRunDirectory(id string) (string, error) + + // SetContainerRunDirectoryFile is a convenience function which stores + // a piece of data in the specified file relative to the container's + // run directory. + SetContainerRunDirectoryFile(id, file string, data []byte) error + + // FromContainerRunDirectory is a convenience function which reads + // the contents of the specified file relative to the container's run + // directory. + FromContainerRunDirectory(id, file string) ([]byte, error) + + // ContainerParentOwners returns the UIDs and GIDs of owners of parents + // of the container's layer's mountpoint for which the layer's UID and + // GID maps (if any are defined) don't contain corresponding IDs. + ContainerParentOwners(id string) ([]int, []int, error) + + // Lookup returns the ID of a layer, image, or container with the specified + // name or ID. + Lookup(name string) (string, error) + + // Shutdown attempts to free any kernel resources which are being used + // by the underlying driver. If "force" is true, any mounted (i.e., in + // use) layers are unmounted beforehand. If "force" is not true, then + // layers being in use is considered to be an error condition. A list + // of still-mounted layers is returned along with possible errors. + Shutdown(force bool) (layers []string, err error) + + // Version returns version information, in the form of key-value pairs, from + // the storage package. + Version() ([][2]string, error) + + // GetDigestLock returns digest-specific Locker. + GetDigestLock(digest.Digest) (Locker, error) + + // LayerFromAdditionalLayerStore searches layers from the additional layer store and + // returns the object for handling this. Note that this hasn't been stored to this store + // yet so this needs to be done through PutAs method. + // Releasing AdditionalLayer handler is caller's responsibility. + // This API is experimental and can be changed without bumping the major version number. + LookupAdditionalLayer(d digest.Digest, imageref string) (AdditionalLayer, error) +} + +// AdditionalLayer reprents a layer that is contained in the additional layer store +// This API is experimental and can be changed without bumping the major version number. +type AdditionalLayer interface { + // PutAs creates layer based on this handler, using diff contents from the additional + // layer store. + PutAs(id, parent string, names []string) (*Layer, error) + + // UncompressedDigest returns the uncompressed digest of this layer + UncompressedDigest() digest.Digest + + // CompressedSize returns the compressed size of this layer + CompressedSize() int64 + + // Release tells the additional layer store that we don't use this handler. + Release() +} + +type AutoUserNsOptions = types.AutoUserNsOptions + +type IDMappingOptions = types.IDMappingOptions + +// LayerOptions is used for passing options to a Store's CreateLayer() and PutLayer() methods. +type LayerOptions struct { + // IDMappingOptions specifies the type of ID mapping which should be + // used for this layer. If nothing is specified, the layer will + // inherit settings from its parent layer or, if it has no parent + // layer, the Store object. + types.IDMappingOptions + // TemplateLayer is the ID of a layer whose contents will be used to + // initialize this layer. If set, it should be a child of the layer + // which we want to use as the parent of the new layer. + TemplateLayer string + // OriginalDigest specifies a digest of the tarstream (diff), if one is + // provided along with these LayerOptions, and reliably known by the caller. + // Use the default "" if this fields is not applicable or the value is not known. + OriginalDigest digest.Digest + // UncompressedDigest specifies a digest of the uncompressed version (“DiffID”) + // of the tarstream (diff), if one is provided along with these LayerOptions, + // and reliably known by the caller. + // Use the default "" if this fields is not applicable or the value is not known. + UncompressedDigest digest.Digest +} + +// ImageOptions is used for passing options to a Store's CreateImage() method. +type ImageOptions struct { + // CreationDate, if not zero, will override the default behavior of marking the image as having been + // created when CreateImage() was called, recording CreationDate instead. + CreationDate time.Time + // Digest is a hard-coded digest value that we can use to look up the image. It is optional. + Digest digest.Digest +} + +// ContainerOptions is used for passing options to a Store's CreateContainer() method. +type ContainerOptions struct { + // IDMappingOptions specifies the type of ID mapping which should be + // used for this container's layer. If nothing is specified, the + // container's layer will inherit settings from the image's top layer + // or, if it is not being created based on an image, the Store object. + types.IDMappingOptions + LabelOpts []string + Flags map[string]interface{} + MountOpts []string + Volatile bool + StorageOpt map[string]string +} + +type store struct { + lastLoaded time.Time + runRoot string + graphLock Locker + usernsLock Locker + graphRoot string + graphDriverName string + graphOptions []string + uidMap []idtools.IDMap + gidMap []idtools.IDMap + autoUsernsUser string + additionalUIDs *idSet // Set by getAvailableIDs() + additionalGIDs *idSet // Set by getAvailableIDs() + autoNsMinSize uint32 + autoNsMaxSize uint32 + graphDriver drivers.Driver + layerStore LayerStore + roLayerStores []ROLayerStore + imageStore ImageStore + roImageStores []ROImageStore + containerStore ContainerStore + digestLockRoot string + disableVolatile bool +} + +// GetStore attempts to find an already-created Store object matching the +// specified location and graph driver, and if it can't, it creates and +// initializes a new Store object, and the underlying storage that it controls. +// +// If StoreOptions `options` haven't been fully populated, then DefaultStoreOptions are used. +// +// These defaults observe environment variables: +// * `STORAGE_DRIVER` for the name of the storage driver to attempt to use +// * `STORAGE_OPTS` for the string of options to pass to the driver +// +// Note that we do some of this work in a child process. The calling process's +// main() function needs to import our pkg/reexec package and should begin with +// something like this in order to allow us to properly start that child +// process: +// if reexec.Init() { +// return +// } +func GetStore(options types.StoreOptions) (Store, error) { + if options.RunRoot == "" && options.GraphRoot == "" && options.GraphDriverName == "" && len(options.GraphDriverOptions) == 0 { + options = types.Options() + } + + if options.GraphRoot != "" { + dir, err := filepath.Abs(options.GraphRoot) + if err != nil { + return nil, err + } + options.GraphRoot = dir + } + if options.RunRoot != "" { + dir, err := filepath.Abs(options.RunRoot) + if err != nil { + return nil, err + } + options.RunRoot = dir + } + + storesLock.Lock() + defer storesLock.Unlock() + + // return if BOTH run and graph root are matched, otherwise our run-root can be overridden if the graph is found first + for _, s := range stores { + if (s.graphRoot == options.GraphRoot) && (s.runRoot == options.RunRoot) && (options.GraphDriverName == "" || s.graphDriverName == options.GraphDriverName) { + return s, nil + } + } + + // if passed a run-root or graph-root alone, the other should be defaulted only error if we have neither. + switch { + case options.RunRoot == "" && options.GraphRoot == "": + return nil, errors.Wrap(ErrIncompleteOptions, "no storage runroot or graphroot specified") + case options.GraphRoot == "": + options.GraphRoot = types.Options().GraphRoot + case options.RunRoot == "": + options.RunRoot = types.Options().RunRoot + } + + if err := os.MkdirAll(options.RunRoot, 0700); err != nil { + return nil, err + } + if err := os.MkdirAll(options.GraphRoot, 0700); err != nil { + return nil, err + } + for _, subdir := range []string{"mounts", "tmp", options.GraphDriverName} { + if err := os.MkdirAll(filepath.Join(options.GraphRoot, subdir), 0700); err != nil { + return nil, err + } + } + + graphLock, err := GetLockfile(filepath.Join(options.GraphRoot, "storage.lock")) + if err != nil { + return nil, err + } + + usernsLock, err := GetLockfile(filepath.Join(options.GraphRoot, "userns.lock")) + if err != nil { + return nil, err + } + + autoNsMinSize := options.AutoNsMinSize + autoNsMaxSize := options.AutoNsMaxSize + if autoNsMinSize == 0 { + autoNsMinSize = AutoUserNsMinSize + } + if autoNsMaxSize == 0 { + autoNsMaxSize = AutoUserNsMaxSize + } + s := &store{ + runRoot: options.RunRoot, + graphLock: graphLock, + graphRoot: options.GraphRoot, + graphDriverName: options.GraphDriverName, + graphOptions: options.GraphDriverOptions, + uidMap: copyIDMap(options.UIDMap), + gidMap: copyIDMap(options.GIDMap), + autoUsernsUser: options.RootAutoNsUser, + autoNsMinSize: autoNsMinSize, + autoNsMaxSize: autoNsMaxSize, + additionalUIDs: nil, + additionalGIDs: nil, + usernsLock: usernsLock, + disableVolatile: options.DisableVolatile, + } + if err := s.load(); err != nil { + return nil, err + } + + stores = append(stores, s) + + return s, nil +} + +func copyUint32Slice(slice []uint32) []uint32 { + m := []uint32{} + if slice != nil { + m = make([]uint32, len(slice)) + copy(m, slice) + } + if len(m) > 0 { + return m[:] + } + return nil +} + +func copyIDMap(idmap []idtools.IDMap) []idtools.IDMap { + m := []idtools.IDMap{} + if idmap != nil { + m = make([]idtools.IDMap, len(idmap)) + copy(m, idmap) + } + if len(m) > 0 { + return m[:] + } + return nil +} + +func (s *store) RunRoot() string { + return s.runRoot +} + +func (s *store) GraphDriverName() string { + return s.graphDriverName +} + +func (s *store) GraphRoot() string { + return s.graphRoot +} + +func (s *store) GraphOptions() []string { + return s.graphOptions +} + +func (s *store) UIDMap() []idtools.IDMap { + return copyIDMap(s.uidMap) +} + +func (s *store) GIDMap() []idtools.IDMap { + return copyIDMap(s.gidMap) +} + +func (s *store) load() error { + driver, err := s.GraphDriver() + if err != nil { + return err + } + s.graphDriver = driver + s.graphDriverName = driver.String() + driverPrefix := s.graphDriverName + "-" + + gipath := filepath.Join(s.graphRoot, driverPrefix+"images") + if err := os.MkdirAll(gipath, 0700); err != nil { + return err + } + ris, err := newImageStore(gipath) + if err != nil { + return err + } + s.imageStore = ris + if _, err := s.ROImageStores(); err != nil { + return err + } + + gcpath := filepath.Join(s.graphRoot, driverPrefix+"containers") + if err := os.MkdirAll(gcpath, 0700); err != nil { + return err + } + rcs, err := newContainerStore(gcpath) + if err != nil { + return err + } + rcpath := filepath.Join(s.runRoot, driverPrefix+"containers") + if err := os.MkdirAll(rcpath, 0700); err != nil { + return err + } + s.containerStore = rcs + + for _, store := range driver.AdditionalImageStores() { + gipath := filepath.Join(store, driverPrefix+"images") + ris, err := newROImageStore(gipath) + if err != nil { + return err + } + s.roImageStores = append(s.roImageStores, ris) + } + + s.digestLockRoot = filepath.Join(s.runRoot, driverPrefix+"locks") + if err := os.MkdirAll(s.digestLockRoot, 0700); err != nil { + return err + } + + return nil +} + +// GetDigestLock returns a digest-specific Locker. +func (s *store) GetDigestLock(d digest.Digest) (Locker, error) { + return GetLockfile(filepath.Join(s.digestLockRoot, d.String())) +} + +func (s *store) getGraphDriver() (drivers.Driver, error) { + if s.graphDriver != nil { + return s.graphDriver, nil + } + config := drivers.Options{ + Root: s.graphRoot, + RunRoot: s.runRoot, + DriverOptions: s.graphOptions, + UIDMaps: s.uidMap, + GIDMaps: s.gidMap, + } + driver, err := drivers.New(s.graphDriverName, config) + if err != nil { + return nil, err + } + s.graphDriver = driver + s.graphDriverName = driver.String() + return driver, nil +} + +func (s *store) GraphDriver() (drivers.Driver, error) { + s.graphLock.Lock() + defer s.graphLock.Unlock() + if s.graphLock.TouchedSince(s.lastLoaded) { + s.graphDriver = nil + s.layerStore = nil + s.lastLoaded = time.Now() + } + return s.getGraphDriver() +} + +// LayerStore obtains and returns a handle to the writeable layer store object +// used by the Store. Accessing this store directly will bypass locking and +// synchronization, so it is not a part of the exported Store interface. +func (s *store) LayerStore() (LayerStore, error) { + s.graphLock.Lock() + defer s.graphLock.Unlock() + if s.graphLock.TouchedSince(s.lastLoaded) { + s.graphDriver = nil + s.layerStore = nil + s.lastLoaded = time.Now() + } + if s.layerStore != nil { + return s.layerStore, nil + } + driver, err := s.getGraphDriver() + if err != nil { + return nil, err + } + driverPrefix := s.graphDriverName + "-" + rlpath := filepath.Join(s.runRoot, driverPrefix+"layers") + if err := os.MkdirAll(rlpath, 0700); err != nil { + return nil, err + } + glpath := filepath.Join(s.graphRoot, driverPrefix+"layers") + if err := os.MkdirAll(glpath, 0700); err != nil { + return nil, err + } + rls, err := s.newLayerStore(rlpath, glpath, driver) + if err != nil { + return nil, err + } + s.layerStore = rls + return s.layerStore, nil +} + +// ROLayerStores obtains additional read/only layer store objects used by the +// Store. Accessing these stores directly will bypass locking and +// synchronization, so it is not part of the exported Store interface. +func (s *store) ROLayerStores() ([]ROLayerStore, error) { + s.graphLock.Lock() + defer s.graphLock.Unlock() + if s.roLayerStores != nil { + return s.roLayerStores, nil + } + driver, err := s.getGraphDriver() + if err != nil { + return nil, err + } + driverPrefix := s.graphDriverName + "-" + rlpath := filepath.Join(s.runRoot, driverPrefix+"layers") + if err := os.MkdirAll(rlpath, 0700); err != nil { + return nil, err + } + for _, store := range driver.AdditionalImageStores() { + glpath := filepath.Join(store, driverPrefix+"layers") + rls, err := newROLayerStore(rlpath, glpath, driver) + if err != nil { + return nil, err + } + s.roLayerStores = append(s.roLayerStores, rls) + } + return s.roLayerStores, nil +} + +// ImageStore obtains and returns a handle to the writable image store object +// used by the Store. Accessing this store directly will bypass locking and +// synchronization, so it is not a part of the exported Store interface. +func (s *store) ImageStore() (ImageStore, error) { + if s.imageStore != nil { + return s.imageStore, nil + } + return nil, ErrLoadError +} + +// ROImageStores obtains additional read/only image store objects used by the +// Store. Accessing these stores directly will bypass locking and +// synchronization, so it is not a part of the exported Store interface. +func (s *store) ROImageStores() ([]ROImageStore, error) { + if s.imageStore == nil { + return nil, ErrLoadError + } + + return s.roImageStores, nil +} + +// ContainerStore obtains and returns a handle to the container store object +// used by the Store. Accessing this store directly will bypass locking and +// synchronization, so it is not a part of the exported Store interface. +func (s *store) ContainerStore() (ContainerStore, error) { + if s.containerStore != nil { + return s.containerStore, nil + } + return nil, ErrLoadError +} + +func (s *store) canUseShifting(uidmap, gidmap []idtools.IDMap) bool { + if s.graphDriver == nil || !s.graphDriver.SupportsShifting() { + return false + } + if uidmap != nil && !idtools.IsContiguous(uidmap) { + return false + } + if gidmap != nil && !idtools.IsContiguous(gidmap) { + return false + } + return true +} + +func (s *store) PutLayer(id, parent string, names []string, mountLabel string, writeable bool, options *LayerOptions, diff io.Reader) (*Layer, int64, error) { + var parentLayer *Layer + rlstore, err := s.LayerStore() + if err != nil { + return nil, -1, err + } + rlstores, err := s.ROLayerStores() + if err != nil { + return nil, -1, err + } + rcstore, err := s.ContainerStore() + if err != nil { + return nil, -1, err + } + rlstore.Lock() + defer rlstore.Unlock() + if err := rlstore.ReloadIfChanged(); err != nil { + return nil, -1, err + } + rcstore.Lock() + defer rcstore.Unlock() + if err := rcstore.ReloadIfChanged(); err != nil { + return nil, -1, err + } + if id == "" { + id = stringid.GenerateRandomID() + } + if options == nil { + options = &LayerOptions{} + } + if options.HostUIDMapping { + options.UIDMap = nil + } + if options.HostGIDMapping { + options.GIDMap = nil + } + uidMap := options.UIDMap + gidMap := options.GIDMap + if parent != "" { + var ilayer *Layer + for _, l := range append([]ROLayerStore{rlstore}, rlstores...) { + lstore := l + if lstore != rlstore { + lstore.RLock() + defer lstore.Unlock() + if err := lstore.ReloadIfChanged(); err != nil { + return nil, -1, err + } + } + if l, err := lstore.Get(parent); err == nil && l != nil { + ilayer = l + parent = ilayer.ID + break + } + } + if ilayer == nil { + return nil, -1, ErrLayerUnknown + } + parentLayer = ilayer + containers, err := rcstore.Containers() + if err != nil { + return nil, -1, err + } + for _, container := range containers { + if container.LayerID == parent { + return nil, -1, ErrParentIsContainer + } + } + if !options.HostUIDMapping && len(options.UIDMap) == 0 { + uidMap = ilayer.UIDMap + } + if !options.HostGIDMapping && len(options.GIDMap) == 0 { + gidMap = ilayer.GIDMap + } + } else { + if !options.HostUIDMapping && len(options.UIDMap) == 0 { + uidMap = s.uidMap + } + if !options.HostGIDMapping && len(options.GIDMap) == 0 { + gidMap = s.gidMap + } + } + layerOptions := LayerOptions{ + OriginalDigest: options.OriginalDigest, + UncompressedDigest: options.UncompressedDigest, + } + if s.canUseShifting(uidMap, gidMap) { + layerOptions.IDMappingOptions = types.IDMappingOptions{HostUIDMapping: true, HostGIDMapping: true, UIDMap: nil, GIDMap: nil} + } else { + layerOptions.IDMappingOptions = types.IDMappingOptions{ + HostUIDMapping: options.HostUIDMapping, + HostGIDMapping: options.HostGIDMapping, + UIDMap: copyIDMap(uidMap), + GIDMap: copyIDMap(gidMap), + } + } + return rlstore.Put(id, parentLayer, names, mountLabel, nil, &layerOptions, writeable, nil, diff) +} + +func (s *store) CreateLayer(id, parent string, names []string, mountLabel string, writeable bool, options *LayerOptions) (*Layer, error) { + layer, _, err := s.PutLayer(id, parent, names, mountLabel, writeable, options, nil) + return layer, err +} + +func (s *store) CreateImage(id string, names []string, layer, metadata string, options *ImageOptions) (*Image, error) { + if id == "" { + id = stringid.GenerateRandomID() + } + + if layer != "" { + lstore, err := s.LayerStore() + if err != nil { + return nil, err + } + lstores, err := s.ROLayerStores() + if err != nil { + return nil, err + } + var ilayer *Layer + for _, s := range append([]ROLayerStore{lstore}, lstores...) { + store := s + if store == lstore { + store.Lock() + } else { + store.RLock() + } + defer store.Unlock() + err := store.ReloadIfChanged() + if err != nil { + return nil, err + } + ilayer, err = store.Get(layer) + if err == nil { + break + } + } + if ilayer == nil { + return nil, ErrLayerUnknown + } + layer = ilayer.ID + } + + ristore, err := s.ImageStore() + if err != nil { + return nil, err + } + ristore.Lock() + defer ristore.Unlock() + if err := ristore.ReloadIfChanged(); err != nil { + return nil, err + } + + creationDate := time.Now().UTC() + if options != nil && !options.CreationDate.IsZero() { + creationDate = options.CreationDate + } + + return ristore.Create(id, names, layer, metadata, creationDate, options.Digest) +} + +func (s *store) imageTopLayerForMapping(image *Image, ristore ROImageStore, createMappedLayer bool, rlstore LayerStore, lstores []ROLayerStore, options types.IDMappingOptions) (*Layer, error) { + layerMatchesMappingOptions := func(layer *Layer, options types.IDMappingOptions) bool { + // If the driver supports shifting and the layer has no mappings, we can use it. + if s.canUseShifting(options.UIDMap, options.GIDMap) && len(layer.UIDMap) == 0 && len(layer.GIDMap) == 0 { + return true + } + // If we want host mapping, and the layer uses mappings, it's not the best match. + if options.HostUIDMapping && len(layer.UIDMap) != 0 { + return false + } + if options.HostGIDMapping && len(layer.GIDMap) != 0 { + return false + } + // Compare the maps. + return reflect.DeepEqual(layer.UIDMap, options.UIDMap) && reflect.DeepEqual(layer.GIDMap, options.GIDMap) + } + var layer, parentLayer *Layer + allStores := append([]ROLayerStore{rlstore}, lstores...) + // Locate the image's top layer and its parent, if it has one. + for _, s := range allStores { + store := s + if store != rlstore { + store.RLock() + defer store.Unlock() + if err := store.ReloadIfChanged(); err != nil { + return nil, err + } + } + // Walk the top layer list. + for _, candidate := range append([]string{image.TopLayer}, image.MappedTopLayers...) { + if cLayer, err := store.Get(candidate); err == nil { + // We want the layer's parent, too, if it has one. + var cParentLayer *Layer + if cLayer.Parent != "" { + // Its parent should be in one of the stores, somewhere. + for _, ps := range allStores { + if cParentLayer, err = ps.Get(cLayer.Parent); err == nil { + break + } + } + if cParentLayer == nil { + continue + } + } + // If the layer matches the desired mappings, it's a perfect match, + // so we're actually done here. + if layerMatchesMappingOptions(cLayer, options) { + return cLayer, nil + } + // Record the first one that we found, even if it's not ideal, so that + // we have a starting point. + if layer == nil { + layer = cLayer + parentLayer = cParentLayer + } + } + } + } + if layer == nil { + return nil, ErrLayerUnknown + } + // The top layer's mappings don't match the ones we want, but it's in a read-only + // image store, so we can't create and add a mapped copy of the layer to the image. + // We'll have to do the mapping for the container itself, elsewhere. + if !createMappedLayer { + return layer, nil + } + // The top layer's mappings don't match the ones we want, and it's in an image store + // that lets us edit image metadata... + if istore, ok := ristore.(*imageStore); ok { + // ... so create a duplicate of the layer with the desired mappings, and + // register it as an alternate top layer in the image. + var layerOptions LayerOptions + if s.canUseShifting(options.UIDMap, options.GIDMap) { + layerOptions = LayerOptions{ + IDMappingOptions: types.IDMappingOptions{ + HostUIDMapping: true, + HostGIDMapping: true, + UIDMap: nil, + GIDMap: nil, + }, + } + } else { + layerOptions = LayerOptions{ + IDMappingOptions: types.IDMappingOptions{ + HostUIDMapping: options.HostUIDMapping, + HostGIDMapping: options.HostGIDMapping, + UIDMap: copyIDMap(options.UIDMap), + GIDMap: copyIDMap(options.GIDMap), + }, + } + } + layerOptions.TemplateLayer = layer.ID + mappedLayer, _, err := rlstore.Put("", parentLayer, nil, layer.MountLabel, nil, &layerOptions, false, nil, nil) + if err != nil { + return nil, errors.Wrapf(err, "error creating an ID-mapped copy of layer %q", layer.ID) + } + if err = istore.addMappedTopLayer(image.ID, mappedLayer.ID); err != nil { + if err2 := rlstore.Delete(mappedLayer.ID); err2 != nil { + err = errors.WithMessage(err, fmt.Sprintf("error deleting layer %q: %v", mappedLayer.ID, err2)) + } + return nil, errors.Wrapf(err, "error registering ID-mapped layer with image %q", image.ID) + } + layer = mappedLayer + } + return layer, nil +} + +func (s *store) CreateContainer(id string, names []string, image, layer, metadata string, options *ContainerOptions) (*Container, error) { + if options == nil { + options = &ContainerOptions{} + } + if options.HostUIDMapping { + options.UIDMap = nil + } + if options.HostGIDMapping { + options.GIDMap = nil + } + rlstore, err := s.LayerStore() + if err != nil { + return nil, err + } + if id == "" { + id = stringid.GenerateRandomID() + } + + var imageTopLayer *Layer + imageID := "" + + if options.AutoUserNs || options.UIDMap != nil || options.GIDMap != nil { + // Prevent multiple instances to retrieve the same range when AutoUserNs + // are used. + // It doesn't prevent containers that specify an explicit mapping to overlap + // with AutoUserNs. + s.usernsLock.Lock() + defer s.usernsLock.Unlock() + } + + var imageHomeStore ROImageStore + var istore ImageStore + var istores []ROImageStore + var lstores []ROLayerStore + var cimage *Image + if image != "" { + var err error + lstores, err = s.ROLayerStores() + if err != nil { + return nil, err + } + istore, err = s.ImageStore() + if err != nil { + return nil, err + } + istores, err = s.ROImageStores() + if err != nil { + return nil, err + } + rlstore.Lock() + defer rlstore.Unlock() + if err := rlstore.ReloadIfChanged(); err != nil { + return nil, err + } + for _, s := range append([]ROImageStore{istore}, istores...) { + store := s + if store == istore { + store.Lock() + } else { + store.RLock() + } + defer store.Unlock() + if err := store.ReloadIfChanged(); err != nil { + return nil, err + } + cimage, err = store.Get(image) + if err == nil { + imageHomeStore = store + break + } + } + if cimage == nil { + return nil, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) + } + imageID = cimage.ID + } + + if options.AutoUserNs { + var err error + options.UIDMap, options.GIDMap, err = s.getAutoUserNS(id, &options.AutoUserNsOpts, cimage) + if err != nil { + return nil, err + } + } + + uidMap := options.UIDMap + gidMap := options.GIDMap + + idMappingsOptions := options.IDMappingOptions + if image != "" { + if cimage.TopLayer != "" { + createMappedLayer := imageHomeStore == istore + ilayer, err := s.imageTopLayerForMapping(cimage, imageHomeStore, createMappedLayer, rlstore, lstores, idMappingsOptions) + if err != nil { + return nil, err + } + imageTopLayer = ilayer + + if !options.HostUIDMapping && len(options.UIDMap) == 0 { + uidMap = ilayer.UIDMap + } + if !options.HostGIDMapping && len(options.GIDMap) == 0 { + gidMap = ilayer.GIDMap + } + } + } else { + rlstore.Lock() + defer rlstore.Unlock() + if err := rlstore.ReloadIfChanged(); err != nil { + return nil, err + } + if !options.HostUIDMapping && len(options.UIDMap) == 0 { + uidMap = s.uidMap + } + if !options.HostGIDMapping && len(options.GIDMap) == 0 { + gidMap = s.gidMap + } + } + var layerOptions *LayerOptions + if s.canUseShifting(uidMap, gidMap) { + layerOptions = &LayerOptions{ + IDMappingOptions: types.IDMappingOptions{ + HostUIDMapping: true, + HostGIDMapping: true, + UIDMap: nil, + GIDMap: nil, + }, + } + } else { + layerOptions = &LayerOptions{ + IDMappingOptions: types.IDMappingOptions{ + HostUIDMapping: idMappingsOptions.HostUIDMapping, + HostGIDMapping: idMappingsOptions.HostGIDMapping, + UIDMap: copyIDMap(uidMap), + GIDMap: copyIDMap(gidMap), + }, + } + } + if options.Flags == nil { + options.Flags = make(map[string]interface{}) + } + plabel, _ := options.Flags["ProcessLabel"].(string) + mlabel, _ := options.Flags["MountLabel"].(string) + if (plabel == "" && mlabel != "") || + (plabel != "" && mlabel == "") { + return nil, errors.Errorf("ProcessLabel and Mountlabel must either not be specified or both specified") + } + + if plabel == "" { + processLabel, mountLabel, err := label.InitLabels(options.LabelOpts) + if err != nil { + return nil, err + } + options.Flags["ProcessLabel"] = processLabel + options.Flags["MountLabel"] = mountLabel + } + + clayer, err := rlstore.Create(layer, imageTopLayer, nil, options.Flags["MountLabel"].(string), options.StorageOpt, layerOptions, true) + if err != nil { + return nil, err + } + layer = clayer.ID + rcstore, err := s.ContainerStore() + if err != nil { + return nil, err + } + rcstore.Lock() + defer rcstore.Unlock() + if err := rcstore.ReloadIfChanged(); err != nil { + return nil, err + } + options.IDMappingOptions = types.IDMappingOptions{ + HostUIDMapping: len(options.UIDMap) == 0, + HostGIDMapping: len(options.GIDMap) == 0, + UIDMap: copyIDMap(options.UIDMap), + GIDMap: copyIDMap(options.GIDMap), + } + container, err := rcstore.Create(id, names, imageID, layer, metadata, options) + if err != nil || container == nil { + rlstore.Delete(layer) + } + return container, err +} + +func (s *store) SetMetadata(id, metadata string) error { + rlstore, err := s.LayerStore() + if err != nil { + return err + } + ristore, err := s.ImageStore() + if err != nil { + return err + } + rcstore, err := s.ContainerStore() + if err != nil { + return err + } + + rlstore.Lock() + defer rlstore.Unlock() + if err := rlstore.ReloadIfChanged(); err != nil { + return err + } + ristore.Lock() + defer ristore.Unlock() + if err := ristore.ReloadIfChanged(); err != nil { + return err + } + rcstore.Lock() + defer rcstore.Unlock() + if err := rcstore.ReloadIfChanged(); err != nil { + return err + } + + if rlstore.Exists(id) { + return rlstore.SetMetadata(id, metadata) + } + if ristore.Exists(id) { + return ristore.SetMetadata(id, metadata) + } + if rcstore.Exists(id) { + return rcstore.SetMetadata(id, metadata) + } + return ErrNotAnID +} + +func (s *store) Metadata(id string) (string, error) { + lstore, err := s.LayerStore() + if err != nil { + return "", err + } + lstores, err := s.ROLayerStores() + if err != nil { + return "", err + } + for _, s := range append([]ROLayerStore{lstore}, lstores...) { + store := s + store.RLock() + defer store.Unlock() + if err := store.ReloadIfChanged(); err != nil { + return "", err + } + if store.Exists(id) { + return store.Metadata(id) + } + } + + istore, err := s.ImageStore() + if err != nil { + return "", err + } + istores, err := s.ROImageStores() + if err != nil { + return "", err + } + for _, s := range append([]ROImageStore{istore}, istores...) { + store := s + store.RLock() + defer store.Unlock() + if err := store.ReloadIfChanged(); err != nil { + return "", err + } + if store.Exists(id) { + return store.Metadata(id) + } + } + + cstore, err := s.ContainerStore() + if err != nil { + return "", err + } + cstore.RLock() + defer cstore.Unlock() + if err := cstore.ReloadIfChanged(); err != nil { + return "", err + } + if cstore.Exists(id) { + return cstore.Metadata(id) + } + return "", ErrNotAnID +} + +func (s *store) ListImageBigData(id string) ([]string, error) { + istore, err := s.ImageStore() + if err != nil { + return nil, err + } + istores, err := s.ROImageStores() + if err != nil { + return nil, err + } + for _, s := range append([]ROImageStore{istore}, istores...) { + store := s + store.RLock() + defer store.Unlock() + if err := store.ReloadIfChanged(); err != nil { + return nil, err + } + bigDataNames, err := store.BigDataNames(id) + if err == nil { + return bigDataNames, err + } + } + return nil, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) +} + +func (s *store) ImageBigDataSize(id, key string) (int64, error) { + istore, err := s.ImageStore() + if err != nil { + return -1, err + } + istores, err := s.ROImageStores() + if err != nil { + return -1, err + } + for _, s := range append([]ROImageStore{istore}, istores...) { + store := s + store.RLock() + defer store.Unlock() + if err := store.ReloadIfChanged(); err != nil { + return -1, err + } + size, err := store.BigDataSize(id, key) + if err == nil { + return size, nil + } + } + return -1, ErrSizeUnknown +} + +func (s *store) ImageBigDataDigest(id, key string) (digest.Digest, error) { + ristore, err := s.ImageStore() + if err != nil { + return "", err + } + stores, err := s.ROImageStores() + if err != nil { + return "", err + } + stores = append([]ROImageStore{ristore}, stores...) + for _, r := range stores { + ristore := r + ristore.RLock() + defer ristore.Unlock() + if err := ristore.ReloadIfChanged(); err != nil { + return "", err + } + d, err := ristore.BigDataDigest(id, key) + if err == nil && d.Validate() == nil { + return d, nil + } + } + return "", ErrDigestUnknown +} + +func (s *store) ImageBigData(id, key string) ([]byte, error) { + istore, err := s.ImageStore() + if err != nil { + return nil, err + } + istores, err := s.ROImageStores() + if err != nil { + return nil, err + } + foundImage := false + for _, s := range append([]ROImageStore{istore}, istores...) { + store := s + store.RLock() + defer store.Unlock() + if err := store.ReloadIfChanged(); err != nil { + return nil, err + } + data, err := store.BigData(id, key) + if err == nil { + return data, nil + } + if store.Exists(id) { + foundImage = true + } + } + if foundImage { + return nil, errors.Wrapf(os.ErrNotExist, "error locating item named %q for image with ID %q (consider removing the image to resolve the issue)", key, id) + } + return nil, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) +} + +// ListLayerBigData retrieves a list of the (possibly large) chunks of +// named data associated with an layer. +func (s *store) ListLayerBigData(id string) ([]string, error) { + lstore, err := s.LayerStore() + if err != nil { + return nil, err + } + lstores, err := s.ROLayerStores() + if err != nil { + return nil, err + } + foundLayer := false + for _, s := range append([]ROLayerStore{lstore}, lstores...) { + store := s + store.RLock() + defer store.Unlock() + if err := store.ReloadIfChanged(); err != nil { + return nil, err + } + data, err := store.BigDataNames(id) + if err == nil { + return data, nil + } + if store.Exists(id) { + foundLayer = true + } + } + if foundLayer { + return nil, errors.Wrapf(os.ErrNotExist, "error locating big data for layer with ID %q", id) + } + return nil, errors.Wrapf(ErrLayerUnknown, "error locating layer with ID %q", id) +} + +// LayerBigData retrieves a (possibly large) chunk of named data +// associated with a layer. +func (s *store) LayerBigData(id, key string) (io.ReadCloser, error) { + lstore, err := s.LayerStore() + if err != nil { + return nil, err + } + lstores, err := s.ROLayerStores() + if err != nil { + return nil, err + } + foundLayer := false + for _, s := range append([]ROLayerStore{lstore}, lstores...) { + store := s + store.RLock() + defer store.Unlock() + if err := store.ReloadIfChanged(); err != nil { + return nil, err + } + data, err := store.BigData(id, key) + if err == nil { + return data, nil + } + if store.Exists(id) { + foundLayer = true + } + } + if foundLayer { + return nil, errors.Wrapf(os.ErrNotExist, "error locating item named %q for layer with ID %q", key, id) + } + return nil, errors.Wrapf(ErrLayerUnknown, "error locating layer with ID %q", id) +} + +// SetLayerBigData stores a (possibly large) chunk of named data +// associated with a layer. +func (s *store) SetLayerBigData(id, key string, data io.Reader) error { + store, err := s.LayerStore() + if err != nil { + return err + } + + store.Lock() + defer store.Unlock() + if err := store.ReloadIfChanged(); err != nil { + return err + } + return store.SetBigData(id, key, data) +} + +func (s *store) SetImageBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error { + ristore, err := s.ImageStore() + if err != nil { + return err + } + + ristore.Lock() + defer ristore.Unlock() + if err := ristore.ReloadIfChanged(); err != nil { + return err + } + + return ristore.SetBigData(id, key, data, digestManifest) +} + +func (s *store) ImageSize(id string) (int64, error) { + var image *Image + + lstore, err := s.LayerStore() + if err != nil { + return -1, errors.Wrapf(err, "error loading primary layer store data") + } + lstores, err := s.ROLayerStores() + if err != nil { + return -1, errors.Wrapf(err, "error loading additional layer stores") + } + for _, s := range append([]ROLayerStore{lstore}, lstores...) { + store := s + store.RLock() + defer store.Unlock() + if err := store.ReloadIfChanged(); err != nil { + return -1, err + } + } + + var imageStore ROBigDataStore + istore, err := s.ImageStore() + if err != nil { + return -1, errors.Wrapf(err, "error loading primary image store data") + } + istores, err := s.ROImageStores() + if err != nil { + return -1, errors.Wrapf(err, "error loading additional image stores") + } + + // Look for the image's record. + for _, s := range append([]ROImageStore{istore}, istores...) { + store := s + store.RLock() + defer store.Unlock() + if err := store.ReloadIfChanged(); err != nil { + return -1, err + } + if image, err = store.Get(id); err == nil { + imageStore = store + break + } + } + if image == nil { + return -1, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) + } + + // Start with a list of the image's top layers, if it has any. + queue := make(map[string]struct{}) + for _, layerID := range append([]string{image.TopLayer}, image.MappedTopLayers...) { + if layerID != "" { + queue[layerID] = struct{}{} + } + } + visited := make(map[string]struct{}) + // Walk all of the layers. + var size int64 + for len(visited) < len(queue) { + for layerID := range queue { + // Visit each layer only once. + if _, ok := visited[layerID]; ok { + continue + } + visited[layerID] = struct{}{} + // Look for the layer and the store that knows about it. + var layerStore ROLayerStore + var layer *Layer + for _, store := range append([]ROLayerStore{lstore}, lstores...) { + if layer, err = store.Get(layerID); err == nil { + layerStore = store + break + } + } + if layer == nil { + return -1, errors.Wrapf(ErrLayerUnknown, "error locating layer with ID %q", layerID) + } + // The UncompressedSize is only valid if there's a digest to go with it. + n := layer.UncompressedSize + if layer.UncompressedDigest == "" { + // Compute the size. + n, err = layerStore.DiffSize("", layer.ID) + if err != nil { + return -1, errors.Wrapf(err, "size/digest of layer with ID %q could not be calculated", layerID) + } + } + // Count this layer. + size += n + // Make a note to visit the layer's parent if we haven't already. + if layer.Parent != "" { + queue[layer.Parent] = struct{}{} + } + } + } + + // Count big data items. + names, err := imageStore.BigDataNames(id) + if err != nil { + return -1, errors.Wrapf(err, "error reading list of big data items for image %q", id) + } + for _, name := range names { + n, err := imageStore.BigDataSize(id, name) + if err != nil { + return -1, errors.Wrapf(err, "error reading size of big data item %q for image %q", name, id) + } + size += n + } + + return size, nil +} + +func (s *store) ContainerSize(id string) (int64, error) { + lstore, err := s.LayerStore() + if err != nil { + return -1, err + } + lstores, err := s.ROLayerStores() + if err != nil { + return -1, err + } + for _, s := range append([]ROLayerStore{lstore}, lstores...) { + store := s + store.RLock() + defer store.Unlock() + if err := store.ReloadIfChanged(); err != nil { + return -1, err + } + } + + // Get the location of the container directory and container run directory. + // Do it before we lock the container store because they do, too. + cdir, err := s.ContainerDirectory(id) + if err != nil { + return -1, err + } + rdir, err := s.ContainerRunDirectory(id) + if err != nil { + return -1, err + } + + rcstore, err := s.ContainerStore() + if err != nil { + return -1, err + } + rcstore.RLock() + defer rcstore.Unlock() + if err := rcstore.ReloadIfChanged(); err != nil { + return -1, err + } + + // Read the container record. + container, err := rcstore.Get(id) + if err != nil { + return -1, err + } + + // Read the container's layer's size. + var layer *Layer + var size int64 + for _, store := range append([]ROLayerStore{lstore}, lstores...) { + if layer, err = store.Get(container.LayerID); err == nil { + size, err = store.DiffSize("", layer.ID) + if err != nil { + return -1, errors.Wrapf(err, "error determining size of layer with ID %q", layer.ID) + } + break + } + } + if layer == nil { + return -1, errors.Wrapf(ErrLayerUnknown, "error locating layer with ID %q", container.LayerID) + } + + // Count big data items. + names, err := rcstore.BigDataNames(id) + if err != nil { + return -1, errors.Wrapf(err, "error reading list of big data items for container %q", container.ID) + } + for _, name := range names { + n, err := rcstore.BigDataSize(id, name) + if err != nil { + return -1, errors.Wrapf(err, "error reading size of big data item %q for container %q", name, id) + } + size += n + } + + // Count the size of our container directory and container run directory. + n, err := directory.Size(cdir) + if err != nil { + return -1, err + } + size += n + n, err = directory.Size(rdir) + if err != nil { + return -1, err + } + size += n + + return size, nil +} + +func (s *store) ListContainerBigData(id string) ([]string, error) { + rcstore, err := s.ContainerStore() + if err != nil { + return nil, err + } + + rcstore.RLock() + defer rcstore.Unlock() + if err := rcstore.ReloadIfChanged(); err != nil { + return nil, err + } + + return rcstore.BigDataNames(id) +} + +func (s *store) ContainerBigDataSize(id, key string) (int64, error) { + rcstore, err := s.ContainerStore() + if err != nil { + return -1, err + } + rcstore.RLock() + defer rcstore.Unlock() + if err := rcstore.ReloadIfChanged(); err != nil { + return -1, err + } + return rcstore.BigDataSize(id, key) +} + +func (s *store) ContainerBigDataDigest(id, key string) (digest.Digest, error) { + rcstore, err := s.ContainerStore() + if err != nil { + return "", err + } + rcstore.RLock() + defer rcstore.Unlock() + if err := rcstore.ReloadIfChanged(); err != nil { + return "", err + } + return rcstore.BigDataDigest(id, key) +} + +func (s *store) ContainerBigData(id, key string) ([]byte, error) { + rcstore, err := s.ContainerStore() + if err != nil { + return nil, err + } + rcstore.RLock() + defer rcstore.Unlock() + if err := rcstore.ReloadIfChanged(); err != nil { + return nil, err + } + return rcstore.BigData(id, key) +} + +func (s *store) SetContainerBigData(id, key string, data []byte) error { + rcstore, err := s.ContainerStore() + if err != nil { + return err + } + rcstore.Lock() + defer rcstore.Unlock() + if err := rcstore.ReloadIfChanged(); err != nil { + return err + } + return rcstore.SetBigData(id, key, data) +} + +func (s *store) Exists(id string) bool { + lstore, err := s.LayerStore() + if err != nil { + return false + } + lstores, err := s.ROLayerStores() + if err != nil { + return false + } + for _, s := range append([]ROLayerStore{lstore}, lstores...) { + store := s + store.RLock() + defer store.Unlock() + if err := store.ReloadIfChanged(); err != nil { + return false + } + if store.Exists(id) { + return true + } + } + + istore, err := s.ImageStore() + if err != nil { + return false + } + istores, err := s.ROImageStores() + if err != nil { + return false + } + for _, s := range append([]ROImageStore{istore}, istores...) { + store := s + store.RLock() + defer store.Unlock() + if err := store.ReloadIfChanged(); err != nil { + return false + } + if store.Exists(id) { + return true + } + } + + rcstore, err := s.ContainerStore() + if err != nil { + return false + } + rcstore.RLock() + defer rcstore.Unlock() + if err := rcstore.ReloadIfChanged(); err != nil { + return false + } + if rcstore.Exists(id) { + return true + } + + return false +} + +func dedupeNames(names []string) []string { + seen := make(map[string]bool) + deduped := make([]string, 0, len(names)) + for _, name := range names { + if _, wasSeen := seen[name]; !wasSeen { + seen[name] = true + deduped = append(deduped, name) + } + } + return deduped +} + +// Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`. +func (s *store) SetNames(id string, names []string) error { + return s.updateNames(id, names, setNames) +} + +func (s *store) AddNames(id string, names []string) error { + return s.updateNames(id, names, addNames) +} + +func (s *store) RemoveNames(id string, names []string) error { + return s.updateNames(id, names, removeNames) +} + +func (s *store) updateNames(id string, names []string, op updateNameOperation) error { + deduped := dedupeNames(names) + + rlstore, err := s.LayerStore() + if err != nil { + return err + } + rlstore.Lock() + defer rlstore.Unlock() + if err := rlstore.ReloadIfChanged(); err != nil { + return err + } + if rlstore.Exists(id) { + switch op { + case setNames: + return rlstore.SetNames(id, deduped) + case removeNames: + return rlstore.RemoveNames(id, deduped) + case addNames: + return rlstore.AddNames(id, deduped) + default: + return errInvalidUpdateNameOperation + } + } + + ristore, err := s.ImageStore() + if err != nil { + return err + } + ristore.Lock() + defer ristore.Unlock() + if err := ristore.ReloadIfChanged(); err != nil { + return err + } + if ristore.Exists(id) { + switch op { + case setNames: + return ristore.SetNames(id, deduped) + case removeNames: + return ristore.RemoveNames(id, deduped) + case addNames: + return ristore.AddNames(id, deduped) + default: + return errInvalidUpdateNameOperation + } + } + + // Check is id refers to a RO Store + ristores, err := s.ROImageStores() + if err != nil { + return err + } + for _, s := range ristores { + store := s + store.RLock() + defer store.Unlock() + if err := store.ReloadIfChanged(); err != nil { + return err + } + if i, err := store.Get(id); err == nil { + if len(deduped) > 1 { + // Do not want to create image name in R/W storage + deduped = deduped[1:] + } + _, err := ristore.Create(id, deduped, i.TopLayer, i.Metadata, i.Created, i.Digest) + if err == nil { + return ristore.Save() + } + return err + } + } + + rcstore, err := s.ContainerStore() + if err != nil { + return err + } + rcstore.Lock() + defer rcstore.Unlock() + if err := rcstore.ReloadIfChanged(); err != nil { + return err + } + if rcstore.Exists(id) { + switch op { + case setNames: + return rcstore.SetNames(id, deduped) + case removeNames: + return rcstore.RemoveNames(id, deduped) + case addNames: + return rcstore.AddNames(id, deduped) + default: + return errInvalidUpdateNameOperation + } + } + return ErrLayerUnknown +} + +func (s *store) Names(id string) ([]string, error) { + lstore, err := s.LayerStore() + if err != nil { + return nil, err + } + lstores, err := s.ROLayerStores() + if err != nil { + return nil, err + } + for _, s := range append([]ROLayerStore{lstore}, lstores...) { + store := s + store.RLock() + defer store.Unlock() + if err := store.ReloadIfChanged(); err != nil { + return nil, err + } + if l, err := store.Get(id); l != nil && err == nil { + return l.Names, nil + } + } + + istore, err := s.ImageStore() + if err != nil { + return nil, err + } + istores, err := s.ROImageStores() + if err != nil { + return nil, err + } + for _, s := range append([]ROImageStore{istore}, istores...) { + store := s + store.RLock() + defer store.Unlock() + if err := store.ReloadIfChanged(); err != nil { + return nil, err + } + if i, err := store.Get(id); i != nil && err == nil { + return i.Names, nil + } + } + + rcstore, err := s.ContainerStore() + if err != nil { + return nil, err + } + rcstore.RLock() + defer rcstore.Unlock() + if err := rcstore.ReloadIfChanged(); err != nil { + return nil, err + } + if c, err := rcstore.Get(id); c != nil && err == nil { + return c.Names, nil + } + return nil, ErrLayerUnknown +} + +func (s *store) Lookup(name string) (string, error) { + lstore, err := s.LayerStore() + if err != nil { + return "", err + } + lstores, err := s.ROLayerStores() + if err != nil { + return "", err + } + for _, s := range append([]ROLayerStore{lstore}, lstores...) { + store := s + store.RLock() + defer store.Unlock() + if err := store.ReloadIfChanged(); err != nil { + return "", err + } + if l, err := store.Get(name); l != nil && err == nil { + return l.ID, nil + } + } + + istore, err := s.ImageStore() + if err != nil { + return "", err + } + istores, err := s.ROImageStores() + if err != nil { + return "", err + } + for _, s := range append([]ROImageStore{istore}, istores...) { + store := s + store.RLock() + defer store.Unlock() + if err := store.ReloadIfChanged(); err != nil { + return "", err + } + if i, err := store.Get(name); i != nil && err == nil { + return i.ID, nil + } + } + + cstore, err := s.ContainerStore() + if err != nil { + return "", err + } + cstore.RLock() + defer cstore.Unlock() + if err := cstore.ReloadIfChanged(); err != nil { + return "", err + } + if c, err := cstore.Get(name); c != nil && err == nil { + return c.ID, nil + } + + return "", ErrLayerUnknown +} + +func (s *store) DeleteLayer(id string) error { + rlstore, err := s.LayerStore() + if err != nil { + return err + } + ristore, err := s.ImageStore() + if err != nil { + return err + } + rcstore, err := s.ContainerStore() + if err != nil { + return err + } + + rlstore.Lock() + defer rlstore.Unlock() + if err := rlstore.ReloadIfChanged(); err != nil { + return err + } + ristore.Lock() + defer ristore.Unlock() + if err := ristore.ReloadIfChanged(); err != nil { + return err + } + rcstore.Lock() + defer rcstore.Unlock() + if err := rcstore.ReloadIfChanged(); err != nil { + return err + } + + if rlstore.Exists(id) { + if l, err := rlstore.Get(id); err != nil { + id = l.ID + } + layers, err := rlstore.Layers() + if err != nil { + return err + } + for _, layer := range layers { + if layer.Parent == id { + return errors.Wrapf(ErrLayerHasChildren, "used by layer %v", layer.ID) + } + } + images, err := ristore.Images() + if err != nil { + return err + } + + for _, image := range images { + if image.TopLayer == id { + return errors.Wrapf(ErrLayerUsedByImage, "layer %v used by image %v", id, image.ID) + } + if stringutils.InSlice(image.MappedTopLayers, id) { + // No write access to the image store, fail before the layer is deleted + if _, ok := ristore.(*imageStore); !ok { + return errors.Wrapf(ErrLayerUsedByImage, "layer %v used by image %v", id, image.ID) + } + } + } + containers, err := rcstore.Containers() + if err != nil { + return err + } + for _, container := range containers { + if container.LayerID == id { + return errors.Wrapf(ErrLayerUsedByContainer, "layer %v used by container %v", id, container.ID) + } + } + if err := rlstore.Delete(id); err != nil { + return errors.Wrapf(err, "delete layer %v", id) + } + + // The check here is used to avoid iterating the images if we don't need to. + // There is already a check above for the imageStore to be writeable when the layer is part of MappedTopLayers. + if istore, ok := ristore.(*imageStore); ok { + for _, image := range images { + if stringutils.InSlice(image.MappedTopLayers, id) { + if err = istore.removeMappedTopLayer(image.ID, id); err != nil { + return errors.Wrapf(err, "remove mapped top layer %v from image %v", id, image.ID) + } + } + } + } + return nil + } + return ErrNotALayer +} + +func (s *store) DeleteImage(id string, commit bool) (layers []string, err error) { + rlstore, err := s.LayerStore() + if err != nil { + return nil, err + } + ristore, err := s.ImageStore() + if err != nil { + return nil, err + } + rcstore, err := s.ContainerStore() + if err != nil { + return nil, err + } + + rlstore.Lock() + defer rlstore.Unlock() + if err := rlstore.ReloadIfChanged(); err != nil { + return nil, err + } + ristore.Lock() + defer ristore.Unlock() + if err := ristore.ReloadIfChanged(); err != nil { + return nil, err + } + rcstore.Lock() + defer rcstore.Unlock() + if err := rcstore.ReloadIfChanged(); err != nil { + return nil, err + } + layersToRemove := []string{} + if ristore.Exists(id) { + image, err := ristore.Get(id) + if err != nil { + return nil, err + } + id = image.ID + containers, err := rcstore.Containers() + if err != nil { + return nil, err + } + aContainerByImage := make(map[string]string) + for _, container := range containers { + aContainerByImage[container.ImageID] = container.ID + } + if container, ok := aContainerByImage[id]; ok { + return nil, errors.Wrapf(ErrImageUsedByContainer, "Image used by %v", container) + } + images, err := ristore.Images() + if err != nil { + return nil, err + } + layers, err := rlstore.Layers() + if err != nil { + return nil, err + } + childrenByParent := make(map[string][]string) + for _, layer := range layers { + childrenByParent[layer.Parent] = append(childrenByParent[layer.Parent], layer.ID) + } + otherImagesTopLayers := make(map[string]struct{}) + for _, img := range images { + if img.ID != id { + otherImagesTopLayers[img.TopLayer] = struct{}{} + for _, layerID := range img.MappedTopLayers { + otherImagesTopLayers[layerID] = struct{}{} + } + } + } + if commit { + if err = ristore.Delete(id); err != nil { + return nil, err + } + } + layer := image.TopLayer + layersToRemoveMap := make(map[string]struct{}) + for layer != "" { + if rcstore.Exists(layer) { + break + } + if _, used := otherImagesTopLayers[layer]; used { + break + } + parent := "" + if l, err := rlstore.Get(layer); err == nil { + parent = l.Parent + } + hasChildrenNotBeingRemoved := func() bool { + layersToCheck := []string{layer} + if layer == image.TopLayer { + layersToCheck = append(layersToCheck, image.MappedTopLayers...) + } + for _, layer := range layersToCheck { + if childList := childrenByParent[layer]; len(childList) > 0 { + for _, child := range childList { + if _, childIsSlatedForRemoval := layersToRemoveMap[child]; childIsSlatedForRemoval { + continue + } + return true + } + } + } + return false + } + if hasChildrenNotBeingRemoved() { + break + } + if layer == image.TopLayer { + layersToRemove = append(layersToRemove, image.MappedTopLayers...) + for _, mappedTopLayer := range image.MappedTopLayers { + layersToRemoveMap[mappedTopLayer] = struct{}{} + } + } + layersToRemove = append(layersToRemove, layer) + layersToRemoveMap[layer] = struct{}{} + layer = parent + } + } else { + return nil, ErrNotAnImage + } + if commit { + for _, layer := range layersToRemove { + if err = rlstore.Delete(layer); err != nil { + return nil, err + } + } + } + return layersToRemove, nil +} + +func (s *store) DeleteContainer(id string) error { + rlstore, err := s.LayerStore() + if err != nil { + return err + } + ristore, err := s.ImageStore() + if err != nil { + return err + } + rcstore, err := s.ContainerStore() + if err != nil { + return err + } + + rlstore.Lock() + defer rlstore.Unlock() + if err := rlstore.ReloadIfChanged(); err != nil { + return err + } + ristore.Lock() + defer ristore.Unlock() + if err := ristore.ReloadIfChanged(); err != nil { + return err + } + rcstore.Lock() + defer rcstore.Unlock() + if err := rcstore.ReloadIfChanged(); err != nil { + return err + } + + if rcstore.Exists(id) { + if container, err := rcstore.Get(id); err == nil { + errChan := make(chan error) + var wg sync.WaitGroup + + if rlstore.Exists(container.LayerID) { + wg.Add(1) + go func() { + errChan <- rlstore.Delete(container.LayerID) + wg.Done() + }() + } + wg.Add(1) + go func() { + errChan <- rcstore.Delete(id) + wg.Done() + }() + + middleDir := s.graphDriverName + "-containers" + gcpath := filepath.Join(s.GraphRoot(), middleDir, container.ID) + wg.Add(1) + go func() { + defer wg.Done() + // attempt a simple rm -rf first + err := os.RemoveAll(gcpath) + if err == nil { + errChan <- nil + return + } + // and if it fails get to the more complicated cleanup + errChan <- system.EnsureRemoveAll(gcpath) + }() + + rcpath := filepath.Join(s.RunRoot(), middleDir, container.ID) + wg.Add(1) + go func() { + defer wg.Done() + // attempt a simple rm -rf first + err := os.RemoveAll(rcpath) + if err == nil { + errChan <- nil + return + } + // and if it fails get to the more complicated cleanup + errChan <- system.EnsureRemoveAll(rcpath) + }() + + go func() { + wg.Wait() + close(errChan) + }() + + var errors []error + for { + select { + case err, ok := <-errChan: + if !ok { + return multierror.Append(nil, errors...).ErrorOrNil() + } + if err != nil { + errors = append(errors, err) + } + } + } + } + } + return ErrNotAContainer +} + +func (s *store) Delete(id string) error { + rlstore, err := s.LayerStore() + if err != nil { + return err + } + ristore, err := s.ImageStore() + if err != nil { + return err + } + rcstore, err := s.ContainerStore() + if err != nil { + return err + } + + rlstore.Lock() + defer rlstore.Unlock() + if err := rlstore.ReloadIfChanged(); err != nil { + return err + } + ristore.Lock() + defer ristore.Unlock() + if err := ristore.ReloadIfChanged(); err != nil { + return err + } + rcstore.Lock() + defer rcstore.Unlock() + if err := rcstore.ReloadIfChanged(); err != nil { + return err + } + + if rcstore.Exists(id) { + if container, err := rcstore.Get(id); err == nil { + if rlstore.Exists(container.LayerID) { + if err = rlstore.Delete(container.LayerID); err != nil { + return err + } + if err = rcstore.Delete(id); err != nil { + return err + } + middleDir := s.graphDriverName + "-containers" + gcpath := filepath.Join(s.GraphRoot(), middleDir, container.ID, "userdata") + if err = os.RemoveAll(gcpath); err != nil { + return err + } + rcpath := filepath.Join(s.RunRoot(), middleDir, container.ID, "userdata") + if err = os.RemoveAll(rcpath); err != nil { + return err + } + return nil + } + return ErrNotALayer + } + } + if ristore.Exists(id) { + return ristore.Delete(id) + } + if rlstore.Exists(id) { + return rlstore.Delete(id) + } + return ErrLayerUnknown +} + +func (s *store) Wipe() error { + rcstore, err := s.ContainerStore() + if err != nil { + return err + } + ristore, err := s.ImageStore() + if err != nil { + return err + } + rlstore, err := s.LayerStore() + if err != nil { + return err + } + + rlstore.Lock() + defer rlstore.Unlock() + if err := rlstore.ReloadIfChanged(); err != nil { + return err + } + ristore.Lock() + defer ristore.Unlock() + if err := ristore.ReloadIfChanged(); err != nil { + return err + } + rcstore.Lock() + defer rcstore.Unlock() + if err := rcstore.ReloadIfChanged(); err != nil { + return err + } + + if err = rcstore.Wipe(); err != nil { + return err + } + if err = ristore.Wipe(); err != nil { + return err + } + return rlstore.Wipe() +} + +func (s *store) Status() ([][2]string, error) { + rlstore, err := s.LayerStore() + if err != nil { + return nil, err + } + return rlstore.Status() +} + +func (s *store) Version() ([][2]string, error) { + return [][2]string{}, nil +} + +func (s *store) mount(id string, options drivers.MountOpts) (string, error) { + rlstore, err := s.LayerStore() + if err != nil { + return "", err + } + + s.graphLock.Lock() + defer s.graphLock.Unlock() + rlstore.Lock() + defer rlstore.Unlock() + if err := rlstore.ReloadIfChanged(); err != nil { + return "", err + } + + modified, err := s.graphLock.Modified() + if err != nil { + return "", err + } + + /* We need to make sure the home mount is present when the Mount is done. */ + if modified { + s.graphDriver = nil + s.layerStore = nil + s.graphDriver, err = s.getGraphDriver() + if err != nil { + return "", err + } + s.lastLoaded = time.Now() + } + + if options.UidMaps != nil || options.GidMaps != nil { + options.DisableShifting = !s.canUseShifting(options.UidMaps, options.GidMaps) + } + + if rlstore.Exists(id) { + return rlstore.Mount(id, options) + } + return "", ErrLayerUnknown +} + +func (s *store) MountImage(id string, mountOpts []string, mountLabel string) (string, error) { + // Append ReadOnly option to mountOptions + img, err := s.Image(id) + if err != nil { + return "", err + } + + if err := validateMountOptions(mountOpts); err != nil { + return "", err + } + options := drivers.MountOpts{ + MountLabel: mountLabel, + Options: append(mountOpts, "ro"), + } + + return s.mount(img.TopLayer, options) +} + +func (s *store) Mount(id, mountLabel string) (string, error) { + options := drivers.MountOpts{ + MountLabel: mountLabel, + } + // check if `id` is a container, then grab the LayerID, uidmap and gidmap, along with + // otherwise we assume the id is a LayerID and attempt to mount it. + if container, err := s.Container(id); err == nil { + id = container.LayerID + options.UidMaps = container.UIDMap + options.GidMaps = container.GIDMap + options.Options = container.MountOpts() + if !s.disableVolatile { + if v, found := container.Flags["Volatile"]; found { + options.Volatile = v.(bool) + } + } + } + return s.mount(id, options) +} + +func (s *store) Mounted(id string) (int, error) { + if layerID, err := s.ContainerLayerID(id); err == nil { + id = layerID + } + rlstore, err := s.LayerStore() + if err != nil { + return 0, err + } + rlstore.RLock() + defer rlstore.Unlock() + if err := rlstore.ReloadIfChanged(); err != nil { + return 0, err + } + + return rlstore.Mounted(id) +} + +func (s *store) UnmountImage(id string, force bool) (bool, error) { + img, err := s.Image(id) + if err != nil { + return false, err + } + return s.Unmount(img.TopLayer, force) +} + +func (s *store) Unmount(id string, force bool) (bool, error) { + if layerID, err := s.ContainerLayerID(id); err == nil { + id = layerID + } + rlstore, err := s.LayerStore() + if err != nil { + return false, err + } + rlstore.Lock() + defer rlstore.Unlock() + if err := rlstore.ReloadIfChanged(); err != nil { + return false, err + } + if rlstore.Exists(id) { + return rlstore.Unmount(id, force) + } + return false, ErrLayerUnknown +} + +func (s *store) Changes(from, to string) ([]archive.Change, error) { + lstore, err := s.LayerStore() + if err != nil { + return nil, err + } + lstores, err := s.ROLayerStores() + if err != nil { + return nil, err + } + for _, s := range append([]ROLayerStore{lstore}, lstores...) { + store := s + store.RLock() + defer store.Unlock() + if err := store.ReloadIfChanged(); err != nil { + return nil, err + } + if store.Exists(to) { + return store.Changes(from, to) + } + } + return nil, ErrLayerUnknown +} + +func (s *store) DiffSize(from, to string) (int64, error) { + lstore, err := s.LayerStore() + if err != nil { + return -1, err + } + lstores, err := s.ROLayerStores() + if err != nil { + return -1, err + } + for _, s := range append([]ROLayerStore{lstore}, lstores...) { + store := s + store.RLock() + defer store.Unlock() + if err := store.ReloadIfChanged(); err != nil { + return -1, err + } + if store.Exists(to) { + return store.DiffSize(from, to) + } + } + return -1, ErrLayerUnknown +} + +func (s *store) Diff(from, to string, options *DiffOptions) (io.ReadCloser, error) { + lstore, err := s.LayerStore() + if err != nil { + return nil, err + } + lstores, err := s.ROLayerStores() + if err != nil { + return nil, err + } + + // NaiveDiff could cause mounts to happen without a lock, so be safe + // and treat the .Diff operation as a Mount. + s.graphLock.Lock() + defer s.graphLock.Unlock() + + modified, err := s.graphLock.Modified() + if err != nil { + return nil, err + } + + // We need to make sure the home mount is present when the Mount is done. + if modified { + s.graphDriver = nil + s.layerStore = nil + s.graphDriver, err = s.getGraphDriver() + if err != nil { + return nil, err + } + s.lastLoaded = time.Now() + } + + for _, s := range append([]ROLayerStore{lstore}, lstores...) { + store := s + store.RLock() + if err := store.ReloadIfChanged(); err != nil { + store.Unlock() + return nil, err + } + if store.Exists(to) { + rc, err := store.Diff(from, to, options) + if rc != nil && err == nil { + wrapped := ioutils.NewReadCloserWrapper(rc, func() error { + err := rc.Close() + store.Unlock() + return err + }) + return wrapped, nil + } + store.Unlock() + return rc, err + } + store.Unlock() + } + return nil, ErrLayerUnknown +} + +func (s *store) ApplyDiffFromStagingDirectory(to, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffOpts) error { + rlstore, err := s.LayerStore() + if err != nil { + return err + } + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + if err = rlstore.Load(); err != nil { + return err + } + } + if !rlstore.Exists(to) { + return ErrLayerUnknown + } + return rlstore.ApplyDiffFromStagingDirectory(to, stagingDirectory, diffOutput, options) +} + +func (s *store) CleanupStagingDirectory(stagingDirectory string) error { + rlstore, err := s.LayerStore() + if err != nil { + return err + } + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + if err = rlstore.Load(); err != nil { + return err + } + } + return rlstore.CleanupStagingDirectory(stagingDirectory) +} + +func (s *store) ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) { + rlstore, err := s.LayerStore() + if err != nil { + return nil, err + } + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + if err = rlstore.Load(); err != nil { + return nil, err + } + } + if to != "" && !rlstore.Exists(to) { + return nil, ErrLayerUnknown + } + return rlstore.ApplyDiffWithDiffer(to, options, differ) +} + +func (s *store) DifferTarget(id string) (string, error) { + rlstore, err := s.LayerStore() + if err != nil { + return "", err + } + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + if err = rlstore.Load(); err != nil { + return "", err + } + } + if rlstore.Exists(id) { + return rlstore.DifferTarget(id) + } + return "", ErrLayerUnknown +} + +func (s *store) ApplyDiff(to string, diff io.Reader) (int64, error) { + rlstore, err := s.LayerStore() + if err != nil { + return -1, err + } + rlstore.Lock() + defer rlstore.Unlock() + if err := rlstore.ReloadIfChanged(); err != nil { + return -1, err + } + if rlstore.Exists(to) { + return rlstore.ApplyDiff(to, diff) + } + return -1, ErrLayerUnknown +} + +func (s *store) layersByMappedDigest(m func(ROLayerStore, digest.Digest) ([]Layer, error), d digest.Digest) ([]Layer, error) { + var layers []Layer + lstore, err := s.LayerStore() + if err != nil { + return nil, err + } + + lstores, err := s.ROLayerStores() + if err != nil { + return nil, err + } + for _, s := range append([]ROLayerStore{lstore}, lstores...) { + store := s + store.RLock() + defer store.Unlock() + if err := store.ReloadIfChanged(); err != nil { + return nil, err + } + storeLayers, err := m(store, d) + if err != nil { + if errors.Cause(err) != ErrLayerUnknown { + return nil, err + } + continue + } + layers = append(layers, storeLayers...) + } + if len(layers) == 0 { + return nil, ErrLayerUnknown + } + return layers, nil +} + +func (s *store) LayersByCompressedDigest(d digest.Digest) ([]Layer, error) { + if err := d.Validate(); err != nil { + return nil, errors.Wrapf(err, "error looking for compressed layers matching digest %q", d) + } + return s.layersByMappedDigest(func(r ROLayerStore, d digest.Digest) ([]Layer, error) { return r.LayersByCompressedDigest(d) }, d) +} + +func (s *store) LayersByUncompressedDigest(d digest.Digest) ([]Layer, error) { + if err := d.Validate(); err != nil { + return nil, errors.Wrapf(err, "error looking for layers matching digest %q", d) + } + return s.layersByMappedDigest(func(r ROLayerStore, d digest.Digest) ([]Layer, error) { return r.LayersByUncompressedDigest(d) }, d) +} + +func (s *store) LayerSize(id string) (int64, error) { + lstore, err := s.LayerStore() + if err != nil { + return -1, err + } + lstores, err := s.ROLayerStores() + if err != nil { + return -1, err + } + for _, s := range append([]ROLayerStore{lstore}, lstores...) { + store := s + store.RLock() + defer store.Unlock() + if err := store.ReloadIfChanged(); err != nil { + return -1, err + } + if store.Exists(id) { + return store.Size(id) + } + } + return -1, ErrLayerUnknown +} + +func (s *store) LayerParentOwners(id string) ([]int, []int, error) { + rlstore, err := s.LayerStore() + if err != nil { + return nil, nil, err + } + rlstore.RLock() + defer rlstore.Unlock() + if err := rlstore.ReloadIfChanged(); err != nil { + return nil, nil, err + } + if rlstore.Exists(id) { + return rlstore.ParentOwners(id) + } + return nil, nil, ErrLayerUnknown +} + +func (s *store) ContainerParentOwners(id string) ([]int, []int, error) { + rlstore, err := s.LayerStore() + if err != nil { + return nil, nil, err + } + rcstore, err := s.ContainerStore() + if err != nil { + return nil, nil, err + } + rlstore.RLock() + defer rlstore.Unlock() + if err := rlstore.ReloadIfChanged(); err != nil { + return nil, nil, err + } + rcstore.RLock() + defer rcstore.Unlock() + if err := rcstore.ReloadIfChanged(); err != nil { + return nil, nil, err + } + container, err := rcstore.Get(id) + if err != nil { + return nil, nil, err + } + if rlstore.Exists(container.LayerID) { + return rlstore.ParentOwners(container.LayerID) + } + return nil, nil, ErrLayerUnknown +} + +func (s *store) Layers() ([]Layer, error) { + lstore, err := s.LayerStore() + if err != nil { + return nil, err + } + + layers, err := func() ([]Layer, error) { + lstore.Lock() + defer lstore.Unlock() + if err := lstore.Load(); err != nil { + return nil, err + } + return lstore.Layers() + }() + if err != nil { + return nil, err + } + + lstores, err := s.ROLayerStores() + if err != nil { + return nil, err + } + + for _, s := range lstores { + store := s + store.RLock() + defer store.Unlock() + if err := store.ReloadIfChanged(); err != nil { + return nil, err + } + storeLayers, err := store.Layers() + if err != nil { + return nil, err + } + layers = append(layers, storeLayers...) + } + return layers, nil +} + +func (s *store) Images() ([]Image, error) { + var images []Image + istore, err := s.ImageStore() + if err != nil { + return nil, err + } + + istores, err := s.ROImageStores() + if err != nil { + return nil, err + } + for _, s := range append([]ROImageStore{istore}, istores...) { + store := s + store.RLock() + defer store.Unlock() + if err := store.ReloadIfChanged(); err != nil { + return nil, err + } + storeImages, err := store.Images() + if err != nil { + return nil, err + } + images = append(images, storeImages...) + } + return images, nil +} + +func (s *store) Containers() ([]Container, error) { + rcstore, err := s.ContainerStore() + if err != nil { + return nil, err + } + + rcstore.RLock() + defer rcstore.Unlock() + if err := rcstore.ReloadIfChanged(); err != nil { + return nil, err + } + + return rcstore.Containers() +} + +func (s *store) Layer(id string) (*Layer, error) { + lstore, err := s.LayerStore() + if err != nil { + return nil, err + } + lstores, err := s.ROLayerStores() + if err != nil { + return nil, err + } + for _, s := range append([]ROLayerStore{lstore}, lstores...) { + store := s + store.RLock() + defer store.Unlock() + if err := store.ReloadIfChanged(); err != nil { + return nil, err + } + layer, err := store.Get(id) + if err == nil { + return layer, nil + } + } + return nil, ErrLayerUnknown +} + +func (s *store) LookupAdditionalLayer(d digest.Digest, imageref string) (AdditionalLayer, error) { + adriver, ok := s.graphDriver.(drivers.AdditionalLayerStoreDriver) + if !ok { + return nil, ErrLayerUnknown + } + + al, err := adriver.LookupAdditionalLayer(d, imageref) + if err != nil { + if errors.Is(err, drivers.ErrLayerUnknown) { + return nil, ErrLayerUnknown + } + return nil, err + } + info, err := al.Info() + if err != nil { + return nil, err + } + defer info.Close() + var layer Layer + if err := json.NewDecoder(info).Decode(&layer); err != nil { + return nil, err + } + return &additionalLayer{&layer, al, s}, nil +} + +type additionalLayer struct { + layer *Layer + handler drivers.AdditionalLayer + s *store +} + +func (al *additionalLayer) UncompressedDigest() digest.Digest { + return al.layer.UncompressedDigest +} + +func (al *additionalLayer) CompressedSize() int64 { + return al.layer.CompressedSize +} + +func (al *additionalLayer) PutAs(id, parent string, names []string) (*Layer, error) { + rlstore, err := al.s.LayerStore() + if err != nil { + return nil, err + } + rlstore.Lock() + defer rlstore.Unlock() + if err := rlstore.ReloadIfChanged(); err != nil { + return nil, err + } + rlstores, err := al.s.ROLayerStores() + if err != nil { + return nil, err + } + + var parentLayer *Layer + if parent != "" { + for _, lstore := range append([]ROLayerStore{rlstore}, rlstores...) { + if lstore != rlstore { + lstore.RLock() + defer lstore.Unlock() + if err := lstore.ReloadIfChanged(); err != nil { + return nil, err + } + } + parentLayer, err = lstore.Get(parent) + if err == nil { + break + } + } + if parentLayer == nil { + return nil, ErrLayerUnknown + } + } + + return rlstore.PutAdditionalLayer(id, parentLayer, names, al.handler) +} + +func (al *additionalLayer) Release() { + al.handler.Release() +} + +func (s *store) Image(id string) (*Image, error) { + istore, err := s.ImageStore() + if err != nil { + return nil, err + } + istores, err := s.ROImageStores() + if err != nil { + return nil, err + } + for _, s := range append([]ROImageStore{istore}, istores...) { + store := s + store.RLock() + defer store.Unlock() + if err := store.ReloadIfChanged(); err != nil { + return nil, err + } + image, err := store.Get(id) + if err == nil { + return image, nil + } + } + return nil, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) +} + +func (s *store) ImagesByTopLayer(id string) ([]*Image, error) { + images := []*Image{} + layer, err := s.Layer(id) + if err != nil { + return nil, err + } + + istore, err := s.ImageStore() + if err != nil { + return nil, err + } + + istores, err := s.ROImageStores() + if err != nil { + return nil, err + } + for _, s := range append([]ROImageStore{istore}, istores...) { + store := s + store.RLock() + defer store.Unlock() + if err := store.ReloadIfChanged(); err != nil { + return nil, err + } + imageList, err := store.Images() + if err != nil { + return nil, err + } + for _, image := range imageList { + if image.TopLayer == layer.ID || stringutils.InSlice(image.MappedTopLayers, layer.ID) { + images = append(images, &image) + } + } + } + return images, nil +} + +func (s *store) ImagesByDigest(d digest.Digest) ([]*Image, error) { + images := []*Image{} + + istore, err := s.ImageStore() + if err != nil { + return nil, err + } + + istores, err := s.ROImageStores() + if err != nil { + return nil, err + } + for _, store := range append([]ROImageStore{istore}, istores...) { + store.RLock() + defer store.Unlock() + if err := store.ReloadIfChanged(); err != nil { + return nil, err + } + imageList, err := store.ByDigest(d) + if err != nil && errors.Cause(err) != ErrImageUnknown { + return nil, err + } + images = append(images, imageList...) + } + return images, nil +} + +func (s *store) Container(id string) (*Container, error) { + rcstore, err := s.ContainerStore() + if err != nil { + return nil, err + } + rcstore.RLock() + defer rcstore.Unlock() + if err := rcstore.ReloadIfChanged(); err != nil { + return nil, err + } + + return rcstore.Get(id) +} + +func (s *store) ContainerLayerID(id string) (string, error) { + rcstore, err := s.ContainerStore() + if err != nil { + return "", err + } + rcstore.RLock() + defer rcstore.Unlock() + if err := rcstore.ReloadIfChanged(); err != nil { + return "", err + } + container, err := rcstore.Get(id) + if err != nil { + return "", err + } + return container.LayerID, nil +} + +func (s *store) ContainerByLayer(id string) (*Container, error) { + layer, err := s.Layer(id) + if err != nil { + return nil, err + } + rcstore, err := s.ContainerStore() + if err != nil { + return nil, err + } + rcstore.RLock() + defer rcstore.Unlock() + if err := rcstore.ReloadIfChanged(); err != nil { + return nil, err + } + containerList, err := rcstore.Containers() + if err != nil { + return nil, err + } + for _, container := range containerList { + if container.LayerID == layer.ID { + return &container, nil + } + } + + return nil, ErrContainerUnknown +} + +func (s *store) ContainerDirectory(id string) (string, error) { + rcstore, err := s.ContainerStore() + if err != nil { + return "", err + } + rcstore.RLock() + defer rcstore.Unlock() + if err := rcstore.ReloadIfChanged(); err != nil { + return "", err + } + + id, err = rcstore.Lookup(id) + if err != nil { + return "", err + } + + middleDir := s.graphDriverName + "-containers" + gcpath := filepath.Join(s.GraphRoot(), middleDir, id, "userdata") + if err := os.MkdirAll(gcpath, 0700); err != nil { + return "", err + } + return gcpath, nil +} + +func (s *store) ContainerRunDirectory(id string) (string, error) { + rcstore, err := s.ContainerStore() + if err != nil { + return "", err + } + + rcstore.RLock() + defer rcstore.Unlock() + if err := rcstore.ReloadIfChanged(); err != nil { + return "", err + } + + id, err = rcstore.Lookup(id) + if err != nil { + return "", err + } + + middleDir := s.graphDriverName + "-containers" + rcpath := filepath.Join(s.RunRoot(), middleDir, id, "userdata") + if err := os.MkdirAll(rcpath, 0700); err != nil { + return "", err + } + return rcpath, nil +} + +func (s *store) SetContainerDirectoryFile(id, file string, data []byte) error { + dir, err := s.ContainerDirectory(id) + if err != nil { + return err + } + err = os.MkdirAll(filepath.Dir(filepath.Join(dir, file)), 0700) + if err != nil { + return err + } + return ioutils.AtomicWriteFile(filepath.Join(dir, file), data, 0600) +} + +func (s *store) FromContainerDirectory(id, file string) ([]byte, error) { + dir, err := s.ContainerDirectory(id) + if err != nil { + return nil, err + } + return ioutil.ReadFile(filepath.Join(dir, file)) +} + +func (s *store) SetContainerRunDirectoryFile(id, file string, data []byte) error { + dir, err := s.ContainerRunDirectory(id) + if err != nil { + return err + } + err = os.MkdirAll(filepath.Dir(filepath.Join(dir, file)), 0700) + if err != nil { + return err + } + return ioutils.AtomicWriteFile(filepath.Join(dir, file), data, 0600) +} + +func (s *store) FromContainerRunDirectory(id, file string) ([]byte, error) { + dir, err := s.ContainerRunDirectory(id) + if err != nil { + return nil, err + } + return ioutil.ReadFile(filepath.Join(dir, file)) +} + +func (s *store) Shutdown(force bool) ([]string, error) { + mounted := []string{} + modified := false + + rlstore, err := s.LayerStore() + if err != nil { + return mounted, err + } + + s.graphLock.Lock() + defer s.graphLock.Unlock() + + rlstore.Lock() + defer rlstore.Unlock() + if err := rlstore.ReloadIfChanged(); err != nil { + return nil, err + } + + layers, err := rlstore.Layers() + if err != nil { + return mounted, err + } + for _, layer := range layers { + if layer.MountCount == 0 { + continue + } + mounted = append(mounted, layer.ID) + if force { + for layer.MountCount > 0 { + _, err2 := rlstore.Unmount(layer.ID, force) + if err2 != nil { + if err == nil { + err = err2 + } + break + } + modified = true + } + } + } + if len(mounted) > 0 && err == nil { + err = errors.Wrap(ErrLayerUsedByContainer, "A layer is mounted") + } + if err == nil { + err = s.graphDriver.Cleanup() + s.graphLock.Touch() + modified = true + } + if modified { + rlstore.Touch() + } + return mounted, err +} + +// Convert a BigData key name into an acceptable file name. +func makeBigDataBaseName(key string) string { + reader := strings.NewReader(key) + for reader.Len() > 0 { + ch, size, err := reader.ReadRune() + if err != nil || size != 1 { + break + } + if ch != '.' && !(ch >= '0' && ch <= '9') && !(ch >= 'a' && ch <= 'z') { + break + } + } + if reader.Len() > 0 { + return "=" + base64.StdEncoding.EncodeToString([]byte(key)) + } + return key +} + +func stringSliceWithoutValue(slice []string, value string) []string { + modified := make([]string, 0, len(slice)) + for _, v := range slice { + if v == value { + continue + } + modified = append(modified, v) + } + return modified +} + +func copyStringSlice(slice []string) []string { + if len(slice) == 0 { + return nil + } + ret := make([]string, len(slice)) + copy(ret, slice) + return ret +} + +func copyStringInt64Map(m map[string]int64) map[string]int64 { + ret := make(map[string]int64, len(m)) + for k, v := range m { + ret[k] = v + } + return ret +} + +func copyStringDigestMap(m map[string]digest.Digest) map[string]digest.Digest { + ret := make(map[string]digest.Digest, len(m)) + for k, v := range m { + ret[k] = v + } + return ret +} + +func copyDigestSlice(slice []digest.Digest) []digest.Digest { + if len(slice) == 0 { + return nil + } + ret := make([]digest.Digest, len(slice)) + copy(ret, slice) + return ret +} + +// copyStringInterfaceMap still forces us to assume that the interface{} is +// a non-pointer scalar value +func copyStringInterfaceMap(m map[string]interface{}) map[string]interface{} { + ret := make(map[string]interface{}, len(m)) + for k, v := range m { + ret[k] = v + } + return ret +} + +// AutoUserNsMinSize is the minimum size for automatically created user namespaces +const AutoUserNsMinSize = 1024 + +// AutoUserNsMaxSize is the maximum size for automatically created user namespaces +const AutoUserNsMaxSize = 65536 + +// RootAutoUserNsUser is the default user used for root containers when automatically +// creating a user namespace. +const RootAutoUserNsUser = "containers" + +// SetDefaultConfigFilePath sets the default configuration to the specified path +func SetDefaultConfigFilePath(path string) { + types.SetDefaultConfigFilePath(path) +} + +// DefaultConfigFile returns the path to the storage config file used +func DefaultConfigFile(rootless bool) (string, error) { + return types.DefaultConfigFile(rootless) +} + +// ReloadConfigurationFile parses the specified configuration file and overrides +// the configuration in storeOptions. +func ReloadConfigurationFile(configFile string, storeOptions *types.StoreOptions) { + types.ReloadConfigurationFile(configFile, storeOptions) +} + +// GetDefaultMountOptions returns the default mountoptions defined in container/storage +func GetDefaultMountOptions() ([]string, error) { + defaultStoreOptions := types.Options() + return GetMountOptions(defaultStoreOptions.GraphDriverName, defaultStoreOptions.GraphDriverOptions) +} + +// GetMountOptions returns the mountoptions for the specified driver and graphDriverOptions +func GetMountOptions(driver string, graphDriverOptions []string) ([]string, error) { + mountOpts := []string{ + ".mountopt", + fmt.Sprintf("%s.mountopt", driver), + } + for _, option := range graphDriverOptions { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return nil, err + } + key = strings.ToLower(key) + for _, m := range mountOpts { + if m == key { + return strings.Split(val, ","), nil + } + } + } + return nil, nil +} + +// Free removes the store from the list of stores +func (s *store) Free() { + for i := 0; i < len(stores); i++ { + if stores[i] == s { + stores = append(stores[:i], stores[i+1:]...) + return + } + } +} diff --git a/vendor/github.com/containers/storage/types/default_override_test.conf b/vendor/github.com/containers/storage/types/default_override_test.conf new file mode 100644 index 00000000000..caa537ba98b --- /dev/null +++ b/vendor/github.com/containers/storage/types/default_override_test.conf @@ -0,0 +1,11 @@ +[storage] + +# Default Storage Driver +driver = "" + +# Primary Read/Write location of container storage +graphroot = "environment_override_graphroot" + +# Storage path for rootless users +# +rootless_storage_path = "environment_override_rootless_storage_path" diff --git a/vendor/github.com/containers/storage/types/errors.go b/vendor/github.com/containers/storage/types/errors.go new file mode 100644 index 00000000000..d920d12eb50 --- /dev/null +++ b/vendor/github.com/containers/storage/types/errors.go @@ -0,0 +1,58 @@ +package types + +import ( + "errors" +) + +var ( + // ErrContainerUnknown indicates that there was no container with the specified name or ID. + ErrContainerUnknown = errors.New("container not known") + // ErrDigestUnknown indicates that we were unable to compute the digest of a specified item. + ErrDigestUnknown = errors.New("could not compute digest of item") + // ErrDuplicateID indicates that an ID which is to be assigned to a new item is already being used. + ErrDuplicateID = errors.New("that ID is already in use") + // ErrDuplicateImageNames indicates that the read-only store uses the same name for multiple images. + ErrDuplicateImageNames = errors.New("read-only image store assigns the same name to multiple images") + // ErrDuplicateLayerNames indicates that the read-only store uses the same name for multiple layers. + ErrDuplicateLayerNames = errors.New("read-only layer store assigns the same name to multiple layers") + // ErrDuplicateName indicates that a name which is to be assigned to a new item is already being used. + ErrDuplicateName = errors.New("that name is already in use") + // ErrImageUnknown indicates that there was no image with the specified name or ID. + ErrImageUnknown = errors.New("image not known") + // ErrImageUsedByContainer is returned when the caller attempts to delete an image that is a container's image. + ErrImageUsedByContainer = errors.New("image is in use by a container") + // ErrIncompleteOptions is returned when the caller attempts to initialize a Store without providing required information. + ErrIncompleteOptions = errors.New("missing necessary StoreOptions") + // ErrInvalidBigDataName indicates that the name for a big data item is not acceptable; it may be empty. + ErrInvalidBigDataName = errors.New("not a valid name for a big data item") + // ErrLayerHasChildren is returned when the caller attempts to delete a layer that has children. + ErrLayerHasChildren = errors.New("layer has children") + // ErrLayerNotMounted is returned when the requested information can only be computed for a mounted layer, and the layer is not mounted. + ErrLayerNotMounted = errors.New("layer is not mounted") + // ErrLayerUnknown indicates that there was no layer with the specified name or ID. + ErrLayerUnknown = errors.New("layer not known") + // ErrLayerUsedByContainer is returned when the caller attempts to delete a layer that is a container's layer. + ErrLayerUsedByContainer = errors.New("layer is in use by a container") + // ErrLayerUsedByImage is returned when the caller attempts to delete a layer that is an image's top layer. + ErrLayerUsedByImage = errors.New("layer is in use by an image") + // ErrLoadError indicates that there was an initialization error. + ErrLoadError = errors.New("error loading storage metadata") + // ErrNotAContainer is returned when the caller attempts to delete a container that isn't a container. + ErrNotAContainer = errors.New("identifier is not a container") + // ErrNotALayer is returned when the caller attempts to delete a layer that isn't a layer. + ErrNotALayer = errors.New("identifier is not a layer") + // ErrNotAnID is returned when the caller attempts to read or write metadata from an item that doesn't exist. + ErrNotAnID = errors.New("identifier is not a layer, image, or container") + // ErrNotAnImage is returned when the caller attempts to delete an image that isn't an image. + ErrNotAnImage = errors.New("identifier is not an image") + // ErrParentIsContainer is returned when a caller attempts to create a layer as a child of a container's layer. + ErrParentIsContainer = errors.New("would-be parent layer is a container") + // ErrParentUnknown indicates that we didn't record the ID of the parent of the specified layer. + ErrParentUnknown = errors.New("parent of layer not known") + // ErrSizeUnknown is returned when the caller asks for the size of a big data item, but the Store couldn't determine the answer. + ErrSizeUnknown = errors.New("size is not known") + // ErrStoreIsReadOnly is returned when the caller makes a call to a read-only store that would require modifying its contents. + ErrStoreIsReadOnly = errors.New("called a write method on a read-only store") + // ErrNotSupported is returned when the requested functionality is not supported. + ErrNotSupported = errors.New("not supported") +) diff --git a/vendor/github.com/containers/storage/types/idmappings.go b/vendor/github.com/containers/storage/types/idmappings.go new file mode 100644 index 00000000000..82824ae2b22 --- /dev/null +++ b/vendor/github.com/containers/storage/types/idmappings.go @@ -0,0 +1,102 @@ +package types + +import ( + "fmt" + "os" + + "github.com/containers/storage/pkg/idtools" + "github.com/pkg/errors" +) + +// AutoUserNsOptions defines how to automatically create a user namespace. +type AutoUserNsOptions struct { + // Size defines the size for the user namespace. If it is set to a + // value bigger than 0, the user namespace will have exactly this size. + // If it is not set, some heuristics will be used to find its size. + Size uint32 + // InitialSize defines the minimum size for the user namespace. + // The created user namespace will have at least this size. + InitialSize uint32 + // PasswdFile to use if the container uses a volume. + PasswdFile string + // GroupFile to use if the container uses a volume. + GroupFile string + // AdditionalUIDMappings specified additional UID mappings to include in + // the generated user namespace. + AdditionalUIDMappings []idtools.IDMap + // AdditionalGIDMappings specified additional GID mappings to include in + // the generated user namespace. + AdditionalGIDMappings []idtools.IDMap +} + +// IDMappingOptions are used for specifying how ID mapping should be set up for +// a layer or container. +type IDMappingOptions struct { + // UIDMap and GIDMap are used for setting up a layer's root filesystem + // for use inside of a user namespace where ID mapping is being used. + // If HostUIDMapping/HostGIDMapping is true, no mapping of the + // respective type will be used. Otherwise, if UIDMap and/or GIDMap + // contain at least one mapping, one or both will be used. By default, + // if neither of those conditions apply, if the layer has a parent + // layer, the parent layer's mapping will be used, and if it does not + // have a parent layer, the mapping which was passed to the Store + // object when it was initialized will be used. + HostUIDMapping bool + HostGIDMapping bool + UIDMap []idtools.IDMap + GIDMap []idtools.IDMap + AutoUserNs bool + AutoUserNsOpts AutoUserNsOptions +} + +// ParseIDMapping takes idmappings and subuid and subgid maps and returns a storage mapping +func ParseIDMapping(UIDMapSlice, GIDMapSlice []string, subUIDMap, subGIDMap string) (*IDMappingOptions, error) { + options := IDMappingOptions{ + HostUIDMapping: true, + HostGIDMapping: true, + } + if subGIDMap == "" && subUIDMap != "" { + subGIDMap = subUIDMap + } + if subUIDMap == "" && subGIDMap != "" { + subUIDMap = subGIDMap + } + if len(GIDMapSlice) == 0 && len(UIDMapSlice) != 0 { + GIDMapSlice = UIDMapSlice + } + if len(UIDMapSlice) == 0 && len(GIDMapSlice) != 0 { + UIDMapSlice = GIDMapSlice + } + if len(UIDMapSlice) == 0 && subUIDMap == "" && os.Getuid() != 0 { + UIDMapSlice = []string{fmt.Sprintf("0:%d:1", os.Getuid())} + } + if len(GIDMapSlice) == 0 && subGIDMap == "" && os.Getuid() != 0 { + GIDMapSlice = []string{fmt.Sprintf("0:%d:1", os.Getgid())} + } + + if subUIDMap != "" && subGIDMap != "" { + mappings, err := idtools.NewIDMappings(subUIDMap, subGIDMap) + if err != nil { + return nil, errors.Wrapf(err, "failed to create NewIDMappings for uidmap=%s gidmap=%s", subUIDMap, subGIDMap) + } + options.UIDMap = mappings.UIDs() + options.GIDMap = mappings.GIDs() + } + parsedUIDMap, err := idtools.ParseIDMap(UIDMapSlice, "UID") + if err != nil { + return nil, errors.Wrapf(err, "failed to create ParseUIDMap UID=%s", UIDMapSlice) + } + parsedGIDMap, err := idtools.ParseIDMap(GIDMapSlice, "GID") + if err != nil { + return nil, errors.Wrapf(err, "failed to create ParseGIDMap GID=%s", UIDMapSlice) + } + options.UIDMap = append(options.UIDMap, parsedUIDMap...) + options.GIDMap = append(options.GIDMap, parsedGIDMap...) + if len(options.UIDMap) > 0 { + options.HostUIDMapping = false + } + if len(options.GIDMap) > 0 { + options.HostGIDMapping = false + } + return &options, nil +} diff --git a/vendor/github.com/containers/storage/types/options.go b/vendor/github.com/containers/storage/types/options.go new file mode 100644 index 00000000000..ad8377dab8a --- /dev/null +++ b/vendor/github.com/containers/storage/types/options.go @@ -0,0 +1,461 @@ +package types + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + "sync" + "time" + + "github.com/BurntSushi/toml" + "github.com/containers/storage/drivers/overlay" + cfg "github.com/containers/storage/pkg/config" + "github.com/containers/storage/pkg/idtools" + "github.com/sirupsen/logrus" +) + +// TOML-friendly explicit tables used for conversions. +type TomlConfig struct { + Storage struct { + Driver string `toml:"driver"` + RunRoot string `toml:"runroot"` + GraphRoot string `toml:"graphroot"` + RootlessStoragePath string `toml:"rootless_storage_path"` + Options cfg.OptionsConfig `toml:"options"` + } `toml:"storage"` +} + +const ( + // these are default path for run and graph root for rootful users + // for rootless path is constructed via getRootlessStorageOpts + defaultRunRoot string = "/run/containers/storage" + defaultGraphRoot string = "/var/lib/containers/storage" +) + +// defaultConfigFile path to the system wide storage.conf file +var ( + defaultConfigFile = "/usr/share/containers/storage.conf" + defaultOverrideConfigFile = "/etc/containers/storage.conf" + defaultConfigFileSet = false + // DefaultStoreOptions is a reasonable default set of options. + defaultStoreOptions StoreOptions +) + +const ( + overlayDriver = "overlay" + overlay2 = "overlay2" +) + +func init() { + defaultStoreOptions.RunRoot = defaultRunRoot + defaultStoreOptions.GraphRoot = defaultGraphRoot + defaultStoreOptions.GraphDriverName = "" + + if _, err := os.Stat(defaultOverrideConfigFile); err == nil { + // The DefaultConfigFile(rootless) function returns the path + // of the used storage.conf file, by returning defaultConfigFile + // If override exists containers/storage uses it by default. + defaultConfigFile = defaultOverrideConfigFile + ReloadConfigurationFileIfNeeded(defaultOverrideConfigFile, &defaultStoreOptions) + } else { + if !os.IsNotExist(err) { + logrus.Warningf("Attempting to use %s, %v", defaultConfigFile, err) + } + ReloadConfigurationFileIfNeeded(defaultConfigFile, &defaultStoreOptions) + } + // reload could set values to empty for run and graph root if config does not contains anything + if defaultStoreOptions.RunRoot == "" { + defaultStoreOptions.RunRoot = defaultRunRoot + } + if defaultStoreOptions.GraphRoot == "" { + defaultStoreOptions.GraphRoot = defaultGraphRoot + } +} + +// defaultStoreOptionsIsolated is an internal implementation detail of DefaultStoreOptions to allow testing. +// Everyone but the tests this is intended for should only call DefaultStoreOptions, never this function. +func defaultStoreOptionsIsolated(rootless bool, rootlessUID int, storageConf string) (StoreOptions, error) { + var ( + defaultRootlessRunRoot string + defaultRootlessGraphRoot string + err error + ) + storageOpts := defaultStoreOptions + if rootless && rootlessUID != 0 { + storageOpts, err = getRootlessStorageOpts(rootlessUID, storageOpts) + if err != nil { + return storageOpts, err + } + } + _, err = os.Stat(storageConf) + if err != nil && !os.IsNotExist(err) { + return storageOpts, err + } + if err == nil && !defaultConfigFileSet { + defaultRootlessRunRoot = storageOpts.RunRoot + defaultRootlessGraphRoot = storageOpts.GraphRoot + storageOpts = StoreOptions{} + reloadConfigurationFileIfNeeded(storageConf, &storageOpts) + if rootless && rootlessUID != 0 { + // If the file did not specify a graphroot or runroot, + // set sane defaults so we don't try and use root-owned + // directories + if storageOpts.RunRoot == "" { + storageOpts.RunRoot = defaultRootlessRunRoot + } + if storageOpts.GraphRoot == "" { + if storageOpts.RootlessStoragePath != "" { + storageOpts.GraphRoot = storageOpts.RootlessStoragePath + } else { + storageOpts.GraphRoot = defaultRootlessGraphRoot + } + } + } + } + if storageOpts.RunRoot != "" { + runRoot, err := expandEnvPath(storageOpts.RunRoot, rootlessUID) + if err != nil { + return storageOpts, err + } + storageOpts.RunRoot = runRoot + } + if storageOpts.GraphRoot != "" { + graphRoot, err := expandEnvPath(storageOpts.GraphRoot, rootlessUID) + if err != nil { + return storageOpts, err + } + storageOpts.GraphRoot = graphRoot + } + if storageOpts.RootlessStoragePath != "" { + storagePath, err := expandEnvPath(storageOpts.RootlessStoragePath, rootlessUID) + if err != nil { + return storageOpts, err + } + storageOpts.RootlessStoragePath = storagePath + } + + return storageOpts, nil +} + +// DefaultStoreOptions returns the default storage ops for containers +func DefaultStoreOptions(rootless bool, rootlessUID int) (StoreOptions, error) { + storageConf, err := DefaultConfigFile(rootless && rootlessUID != 0) + if err != nil { + return defaultStoreOptions, err + } + return defaultStoreOptionsIsolated(rootless, rootlessUID, storageConf) +} + +// StoreOptions is used for passing initialization options to GetStore(), for +// initializing a Store object and the underlying storage that it controls. +type StoreOptions struct { + // RunRoot is the filesystem path under which we can store run-time + // information, such as the locations of active mount points, that we + // want to lose if the host is rebooted. + RunRoot string `json:"runroot,omitempty"` + // GraphRoot is the filesystem path under which we will store the + // contents of layers, images, and containers. + GraphRoot string `json:"root,omitempty"` + // RootlessStoragePath is the storage path for rootless users + // default $HOME/.local/share/containers/storage + RootlessStoragePath string `toml:"rootless_storage_path"` + // GraphDriverName is the underlying storage driver that we'll be + // using. It only needs to be specified the first time a Store is + // initialized for a given RunRoot and GraphRoot. + GraphDriverName string `json:"driver,omitempty"` + // GraphDriverOptions are driver-specific options. + GraphDriverOptions []string `json:"driver-options,omitempty"` + // UIDMap and GIDMap are used for setting up a container's root filesystem + // for use inside of a user namespace where UID mapping is being used. + UIDMap []idtools.IDMap `json:"uidmap,omitempty"` + GIDMap []idtools.IDMap `json:"gidmap,omitempty"` + // RootAutoNsUser is the user used to pick a subrange when automatically setting + // a user namespace for the root user. + RootAutoNsUser string `json:"root_auto_ns_user,omitempty"` + // AutoNsMinSize is the minimum size for an automatic user namespace. + AutoNsMinSize uint32 `json:"auto_userns_min_size,omitempty"` + // AutoNsMaxSize is the maximum size for an automatic user namespace. + AutoNsMaxSize uint32 `json:"auto_userns_max_size,omitempty"` + // PullOptions specifies options to be handed to pull managers + // This API is experimental and can be changed without bumping the major version number. + PullOptions map[string]string `toml:"pull_options"` + // DisableVolatile doesn't allow volatile mounts when it is set. + DisableVolatile bool `json:"disable-volatile,omitempty"` +} + +// isRootlessDriver returns true if the given storage driver is valid for containers running as non root +func isRootlessDriver(driver string) bool { + validDrivers := map[string]bool{ + "btrfs": true, + "overlay": true, + "overlay2": true, + "vfs": true, + } + return validDrivers[driver] +} + +// getRootlessStorageOpts returns the storage opts for containers running as non root +func getRootlessStorageOpts(rootlessUID int, systemOpts StoreOptions) (StoreOptions, error) { + var opts StoreOptions + + dataDir, rootlessRuntime, err := getRootlessDirInfo(rootlessUID) + if err != nil { + return opts, err + } + opts.RunRoot = rootlessRuntime + if systemOpts.RootlessStoragePath != "" { + opts.GraphRoot, err = expandEnvPath(systemOpts.RootlessStoragePath, rootlessUID) + if err != nil { + return opts, err + } + } else { + opts.GraphRoot = filepath.Join(dataDir, "containers", "storage") + } + + if driver := systemOpts.GraphDriverName; isRootlessDriver(driver) { + opts.GraphDriverName = driver + } + if driver := os.Getenv("STORAGE_DRIVER"); driver != "" { + opts.GraphDriverName = driver + } + if opts.GraphDriverName == overlay2 { + logrus.Warnf("Switching default driver from overlay2 to the equivalent overlay driver.") + opts.GraphDriverName = overlayDriver + } + + if opts.GraphDriverName == "" || opts.GraphDriverName == overlayDriver { + supported, err := overlay.SupportsNativeOverlay(opts.GraphRoot, rootlessRuntime) + if err != nil { + return opts, err + } + if supported { + opts.GraphDriverName = overlayDriver + } else { + if path, err := exec.LookPath("fuse-overlayfs"); err == nil { + opts.GraphDriverName = overlayDriver + opts.GraphDriverOptions = []string{fmt.Sprintf("overlay.mount_program=%s", path)} + } + } + if opts.GraphDriverName == overlayDriver { + for _, o := range systemOpts.GraphDriverOptions { + if strings.Contains(o, "ignore_chown_errors") { + opts.GraphDriverOptions = append(opts.GraphDriverOptions, o) + break + } + } + } + } + if opts.GraphDriverName == "" { + opts.GraphDriverName = "vfs" + } + + if os.Getenv("STORAGE_OPTS") != "" { + opts.GraphDriverOptions = append(opts.GraphDriverOptions, strings.Split(os.Getenv("STORAGE_OPTS"), ",")...) + } + + return opts, nil +} + +// DefaultStoreOptionsAutoDetectUID returns the default storage ops for containers +func DefaultStoreOptionsAutoDetectUID() (StoreOptions, error) { + uid := getRootlessUID() + return DefaultStoreOptions(uid != 0, uid) +} + +var prevReloadConfig = struct { + storeOptions *StoreOptions + mod time.Time + mutex sync.Mutex + configFile string +}{} + +// SetDefaultConfigFilePath sets the default configuration to the specified path +func SetDefaultConfigFilePath(path string) { + defaultConfigFile = path + defaultConfigFileSet = true + ReloadConfigurationFileIfNeeded(defaultConfigFile, &defaultStoreOptions) +} + +func ReloadConfigurationFileIfNeeded(configFile string, storeOptions *StoreOptions) { + prevReloadConfig.mutex.Lock() + defer prevReloadConfig.mutex.Unlock() + + fi, err := os.Stat(configFile) + if err != nil { + if !os.IsNotExist(err) { + fmt.Printf("Failed to read %s %v\n", configFile, err.Error()) + } + return + } + + mtime := fi.ModTime() + if prevReloadConfig.storeOptions != nil && prevReloadConfig.mod == mtime && prevReloadConfig.configFile == configFile { + *storeOptions = *prevReloadConfig.storeOptions + return + } + + ReloadConfigurationFile(configFile, storeOptions) + + prevReloadConfig.storeOptions = storeOptions + prevReloadConfig.mod = mtime + prevReloadConfig.configFile = configFile +} + +// ReloadConfigurationFile parses the specified configuration file and overrides +// the configuration in storeOptions. +func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) { + config := new(TomlConfig) + + meta, err := toml.DecodeFile(configFile, &config) + if err == nil { + keys := meta.Undecoded() + if len(keys) > 0 { + logrus.Warningf("Failed to decode the keys %q from %q.", keys, configFile) + } + } else { + if !os.IsNotExist(err) { + fmt.Printf("Failed to read %s %v\n", configFile, err.Error()) + return + } + } + + // Clear storeOptions of previous settings + *storeOptions = StoreOptions{} + if config.Storage.Driver != "" { + storeOptions.GraphDriverName = config.Storage.Driver + } + if os.Getenv("STORAGE_DRIVER") != "" { + config.Storage.Driver = os.Getenv("STORAGE_DRIVER") + storeOptions.GraphDriverName = config.Storage.Driver + } + if storeOptions.GraphDriverName == overlay2 { + logrus.Warnf("Switching default driver from overlay2 to the equivalent overlay driver.") + storeOptions.GraphDriverName = overlayDriver + } + if storeOptions.GraphDriverName == "" { + logrus.Errorf("The storage 'driver' option must be set in %s, guarantee proper operation.", configFile) + } + if config.Storage.RunRoot != "" { + storeOptions.RunRoot = config.Storage.RunRoot + } + if config.Storage.GraphRoot != "" { + storeOptions.GraphRoot = config.Storage.GraphRoot + } + if config.Storage.RootlessStoragePath != "" { + storeOptions.RootlessStoragePath = config.Storage.RootlessStoragePath + } + for _, s := range config.Storage.Options.AdditionalImageStores { + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.imagestore=%s", config.Storage.Driver, s)) + } + for _, s := range config.Storage.Options.AdditionalLayerStores { + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.additionallayerstore=%s", config.Storage.Driver, s)) + } + if config.Storage.Options.Size != "" { + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.size=%s", config.Storage.Driver, config.Storage.Options.Size)) + } + if config.Storage.Options.MountProgram != "" { + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.mount_program=%s", config.Storage.Driver, config.Storage.Options.MountProgram)) + } + if config.Storage.Options.SkipMountHome != "" { + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.skip_mount_home=%s", config.Storage.Driver, config.Storage.Options.SkipMountHome)) + } + if config.Storage.Options.IgnoreChownErrors != "" { + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.ignore_chown_errors=%s", config.Storage.Driver, config.Storage.Options.IgnoreChownErrors)) + } + if config.Storage.Options.ForceMask != 0 { + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.force_mask=%o", config.Storage.Driver, config.Storage.Options.ForceMask)) + } + if config.Storage.Options.MountOpt != "" { + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.mountopt=%s", config.Storage.Driver, config.Storage.Options.MountOpt)) + } + if config.Storage.Options.RemapUser != "" && config.Storage.Options.RemapGroup == "" { + config.Storage.Options.RemapGroup = config.Storage.Options.RemapUser + } + if config.Storage.Options.RemapGroup != "" && config.Storage.Options.RemapUser == "" { + config.Storage.Options.RemapUser = config.Storage.Options.RemapGroup + } + if config.Storage.Options.RemapUser != "" && config.Storage.Options.RemapGroup != "" { + mappings, err := idtools.NewIDMappings(config.Storage.Options.RemapUser, config.Storage.Options.RemapGroup) + if err != nil { + fmt.Printf("Error initializing ID mappings for %s:%s %v\n", config.Storage.Options.RemapUser, config.Storage.Options.RemapGroup, err) + return + } + storeOptions.UIDMap = mappings.UIDs() + storeOptions.GIDMap = mappings.GIDs() + } + + uidmap, err := idtools.ParseIDMap([]string{config.Storage.Options.RemapUIDs}, "remap-uids") + if err != nil { + fmt.Print(err) + } else { + storeOptions.UIDMap = uidmap + } + gidmap, err := idtools.ParseIDMap([]string{config.Storage.Options.RemapGIDs}, "remap-gids") + if err != nil { + fmt.Print(err) + } else { + storeOptions.GIDMap = gidmap + } + storeOptions.RootAutoNsUser = config.Storage.Options.RootAutoUsernsUser + if config.Storage.Options.AutoUsernsMinSize > 0 { + storeOptions.AutoNsMinSize = config.Storage.Options.AutoUsernsMinSize + } + if config.Storage.Options.AutoUsernsMaxSize > 0 { + storeOptions.AutoNsMaxSize = config.Storage.Options.AutoUsernsMaxSize + } + if config.Storage.Options.PullOptions != nil { + storeOptions.PullOptions = config.Storage.Options.PullOptions + } + + storeOptions.DisableVolatile = config.Storage.Options.DisableVolatile + + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, cfg.GetGraphDriverOptions(storeOptions.GraphDriverName, config.Storage.Options)...) + + if opts, ok := os.LookupEnv("STORAGE_OPTS"); ok { + storeOptions.GraphDriverOptions = strings.Split(opts, ",") + } + if len(storeOptions.GraphDriverOptions) == 1 && storeOptions.GraphDriverOptions[0] == "" { + storeOptions.GraphDriverOptions = nil + } +} + +func Options() StoreOptions { + return defaultStoreOptions +} + +// Save overwrites the tomlConfig in storage.conf with the given conf +func Save(conf TomlConfig, rootless bool) error { + configFile, err := DefaultConfigFile(rootless) + if err != nil { + return err + } + if err = os.Remove(configFile); !os.IsNotExist(err) { + return err + } + + f, err := os.Open(configFile) + if err != nil { + return err + } + + return toml.NewEncoder(f).Encode(conf) +} + +// StorageConfig is used to retrieve the storage.conf toml in order to overwrite it +func StorageConfig(rootless bool) (*TomlConfig, error) { + config := new(TomlConfig) + + configFile, err := DefaultConfigFile(rootless) + if err != nil { + return nil, err + } + + _, err = toml.DecodeFile(configFile, &config) + if err != nil { + return nil, err + } + + return config, nil +} diff --git a/vendor/github.com/containers/storage/types/storage_broken.conf b/vendor/github.com/containers/storage/types/storage_broken.conf new file mode 100644 index 00000000000..3bca1d97847 --- /dev/null +++ b/vendor/github.com/containers/storage/types/storage_broken.conf @@ -0,0 +1,18 @@ +# This file is is a TEST configuration file for all tools +# that use the containers/storage library. +# See man 5 containers-storage.conf for more information +# The "container storage" table contains all of the server options. +foo = "bar" + +[storage] + +# Default Storage Driver +driver = "" + +# Temporary storage location +runroot = "/run/containers/test" + +[storage.options] +# Primary Read/Write location of container storage +graphroot = "/var/lib/containers/storage" + diff --git a/vendor/github.com/containers/storage/types/storage_test.conf b/vendor/github.com/containers/storage/types/storage_test.conf new file mode 100644 index 00000000000..9b682fe159c --- /dev/null +++ b/vendor/github.com/containers/storage/types/storage_test.conf @@ -0,0 +1,35 @@ +# This file is is a TEST configuration file for all tools +# that use the containers/storage library. +# See man 5 containers-storage.conf for more information +# The "container storage" table contains all of the server options. +[storage] + +# Default Storage Driver +driver = "" + +# Temporary storage location +runroot = "$HOME/$UID/containers/storage" + +# Primary Read/Write location of container storage +graphroot = "$HOME/$UID/containers/storage" + +# Storage path for rootless users +# +rootless_storage_path = "$HOME/$UID/containers/storage" + +[storage.options] +# Storage options to be passed to underlying storage drivers + +# AdditionalImageStores is used to pass paths to additional Read/Only image stores +# Must be comma separated list. +additionalimagestores = [ +] + +[storage.options.overlay] + +# mountopt specifies comma separated list of extra mount options +mountopt = "nodev" + + +[storage.options.thinpool] +# Storage Options for thinpool diff --git a/vendor/github.com/containers/storage/types/utils.go b/vendor/github.com/containers/storage/types/utils.go new file mode 100644 index 00000000000..4dd1a786ede --- /dev/null +++ b/vendor/github.com/containers/storage/types/utils.go @@ -0,0 +1,213 @@ +package types + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/containers/storage/pkg/homedir" + "github.com/containers/storage/pkg/system" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// GetRootlessRuntimeDir returns the runtime directory when running as non root +func GetRootlessRuntimeDir(rootlessUID int) (string, error) { + path, err := getRootlessRuntimeDir(rootlessUID) + if err != nil { + return "", err + } + path = filepath.Join(path, "containers") + if err := os.MkdirAll(path, 0700); err != nil { + return "", errors.Wrapf(err, "unable to make rootless runtime") + } + return path, nil +} + +type rootlessRuntimeDirEnvironment interface { + getProcCommandFile() string + getRunUserDir() string + getTmpPerUserDir() string + + homeDirGetRuntimeDir() (string, error) + systemLstat(string) (*system.StatT, error) + homedirGet() string +} + +type rootlessRuntimeDirEnvironmentImplementation struct { + procCommandFile string + runUserDir string + tmpPerUserDir string +} + +func (env rootlessRuntimeDirEnvironmentImplementation) getProcCommandFile() string { + return env.procCommandFile +} +func (env rootlessRuntimeDirEnvironmentImplementation) getRunUserDir() string { + return env.runUserDir +} +func (env rootlessRuntimeDirEnvironmentImplementation) getTmpPerUserDir() string { + return env.tmpPerUserDir +} +func (rootlessRuntimeDirEnvironmentImplementation) homeDirGetRuntimeDir() (string, error) { + return homedir.GetRuntimeDir() +} +func (rootlessRuntimeDirEnvironmentImplementation) systemLstat(path string) (*system.StatT, error) { + return system.Lstat(path) +} +func (rootlessRuntimeDirEnvironmentImplementation) homedirGet() string { + return homedir.Get() +} + +func isRootlessRuntimeDirOwner(dir string, env rootlessRuntimeDirEnvironment) bool { + st, err := env.systemLstat(dir) + return err == nil && int(st.UID()) == os.Getuid() && st.Mode()&0700 == 0700 && st.Mode()&0066 == 0000 +} + +// getRootlessRuntimeDirIsolated is an internal implementation detail of getRootlessRuntimeDir to allow testing. +// Everyone but the tests this is intended for should only call getRootlessRuntimeDir, never this function. +func getRootlessRuntimeDirIsolated(env rootlessRuntimeDirEnvironment) (string, error) { + runtimeDir, err := env.homeDirGetRuntimeDir() + if err == nil { + return runtimeDir, nil + } + + initCommand, err := ioutil.ReadFile(env.getProcCommandFile()) + if err != nil || string(initCommand) == "systemd" { + runUserDir := env.getRunUserDir() + if isRootlessRuntimeDirOwner(runUserDir, env) { + return runUserDir, nil + } + } + + tmpPerUserDir := env.getTmpPerUserDir() + if tmpPerUserDir != "" { + if _, err := env.systemLstat(tmpPerUserDir); os.IsNotExist(err) { + if err := os.Mkdir(tmpPerUserDir, 0700); err != nil { + logrus.Errorf("Failed to create temp directory for user: %v", err) + } else { + return tmpPerUserDir, nil + } + } else if isRootlessRuntimeDirOwner(tmpPerUserDir, env) { + return tmpPerUserDir, nil + } + } + + homeDir := env.homedirGet() + if homeDir == "" { + return "", errors.New("neither XDG_RUNTIME_DIR nor temp dir nor HOME was set non-empty") + } + resolvedHomeDir, err := filepath.EvalSymlinks(homeDir) + if err != nil { + return "", err + } + return filepath.Join(resolvedHomeDir, "rundir"), nil +} + +func getRootlessRuntimeDir(rootlessUID int) (string, error) { + return getRootlessRuntimeDirIsolated( + rootlessRuntimeDirEnvironmentImplementation{ + "/proc/1/comm", + fmt.Sprintf("/run/user/%d", rootlessUID), + fmt.Sprintf("%s/containers-user-%d", os.TempDir(), rootlessUID), + }, + ) +} + +// getRootlessDirInfo returns the parent path of where the storage for containers and +// volumes will be in rootless mode +func getRootlessDirInfo(rootlessUID int) (string, string, error) { + rootlessRuntime, err := GetRootlessRuntimeDir(rootlessUID) + if err != nil { + return "", "", err + } + + dataDir, err := homedir.GetDataHome() + if err == nil { + return dataDir, rootlessRuntime, nil + } + + home := homedir.Get() + if home == "" { + return "", "", errors.Wrapf(err, "neither XDG_DATA_HOME nor HOME was set non-empty") + } + // runc doesn't like symlinks in the rootfs path, and at least + // on CoreOS /home is a symlink to /var/home, so resolve any symlink. + resolvedHome, err := filepath.EvalSymlinks(home) + if err != nil { + return "", "", err + } + dataDir = filepath.Join(resolvedHome, ".local", "share") + + return dataDir, rootlessRuntime, nil +} + +func getRootlessUID() int { + uidEnv := os.Getenv("_CONTAINERS_ROOTLESS_UID") + if uidEnv != "" { + u, _ := strconv.Atoi(uidEnv) + return u + } + return os.Geteuid() +} + +func expandEnvPath(path string, rootlessUID int) (string, error) { + var err error + path = strings.Replace(path, "$UID", strconv.Itoa(rootlessUID), -1) + path = os.ExpandEnv(path) + newpath, err := filepath.EvalSymlinks(path) + if err != nil { + newpath = filepath.Clean(path) + } + return newpath, nil +} + +func DefaultConfigFile(rootless bool) (string, error) { + if defaultConfigFileSet { + return defaultConfigFile, nil + } + + if path, ok := os.LookupEnv("CONTAINERS_STORAGE_CONF"); ok { + return path, nil + } + if !rootless { + return defaultConfigFile, nil + } + + if configHome := os.Getenv("XDG_CONFIG_HOME"); configHome != "" { + return filepath.Join(configHome, "containers/storage.conf"), nil + } + home := homedir.Get() + if home == "" { + return "", errors.New("cannot determine user's homedir") + } + return filepath.Join(home, ".config/containers/storage.conf"), nil +} + +func reloadConfigurationFileIfNeeded(configFile string, storeOptions *StoreOptions) { + prevReloadConfig.mutex.Lock() + defer prevReloadConfig.mutex.Unlock() + + fi, err := os.Stat(configFile) + if err != nil { + if !os.IsNotExist(err) { + fmt.Printf("Failed to read %s %v\n", configFile, err.Error()) + } + return + } + + mtime := fi.ModTime() + if prevReloadConfig.storeOptions != nil && prevReloadConfig.mod == mtime && prevReloadConfig.configFile == configFile { + *storeOptions = *prevReloadConfig.storeOptions + return + } + + ReloadConfigurationFile(configFile, storeOptions) + + prevReloadConfig.storeOptions = storeOptions + prevReloadConfig.mod = mtime + prevReloadConfig.configFile = configFile +} diff --git a/vendor/github.com/containers/storage/userns.go b/vendor/github.com/containers/storage/userns.go new file mode 100644 index 00000000000..523c92dc8b6 --- /dev/null +++ b/vendor/github.com/containers/storage/userns.go @@ -0,0 +1,302 @@ +package storage + +import ( + "os" + "os/user" + "path/filepath" + "strconv" + + drivers "github.com/containers/storage/drivers" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/unshare" + "github.com/containers/storage/types" + libcontainerUser "github.com/opencontainers/runc/libcontainer/user" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// getAdditionalSubIDs looks up the additional IDs configured for +// the specified user. +// The argument USERNAME is ignored for rootless users, as it is not +// possible to use an arbitrary entry in /etc/sub*id. +// Differently, if the username is not specified for root users, a +// default name is used. +func getAdditionalSubIDs(username string) (*idSet, *idSet, error) { + var uids, gids *idSet + + if unshare.IsRootless() { + username = os.Getenv("USER") + if username == "" { + var id string + if os.Geteuid() == 0 { + id = strconv.Itoa(unshare.GetRootlessUID()) + } else { + id = strconv.Itoa(os.Geteuid()) + } + userID, err := user.LookupId(id) + if err == nil { + username = userID.Username + } + } + } else if username == "" { + username = RootAutoUserNsUser + } + mappings, err := idtools.NewIDMappings(username, username) + if err != nil { + logrus.Errorf("Cannot find mappings for user %q: %v", username, err) + } else { + uids = getHostIDs(mappings.UIDs()) + gids = getHostIDs(mappings.GIDs()) + } + return uids, gids, nil +} + +// getAvailableIDs returns the list of ranges that are usable by the current user. +// When running as root, it looks up the additional IDs assigned to the specified user. +// When running as rootless, the mappings assigned to the unprivileged user are converted +// to the IDs inside of the initial rootless user namespace. +func (s *store) getAvailableIDs() (*idSet, *idSet, error) { + if s.additionalUIDs == nil { + uids, gids, err := getAdditionalSubIDs(s.autoUsernsUser) + if err != nil { + return nil, nil, err + } + // Store the result so we don't need to look it up again next time + s.additionalUIDs, s.additionalGIDs = uids, gids + } + + if !unshare.IsRootless() { + // No mapping to inner namespace needed + return s.additionalUIDs, s.additionalGIDs, nil + } + + // We are already inside of the rootless user namespace. + // We need to remap the configured mappings to what is available + // inside of the rootless userns. + u := newIDSet([]interval{{start: 1, end: s.additionalUIDs.size() + 1}}) + g := newIDSet([]interval{{start: 1, end: s.additionalGIDs.size() + 1}}) + return u, g, nil +} + +// parseMountedFiles returns the maximum UID and GID found in the /etc/passwd and +// /etc/group files. +func parseMountedFiles(containerMount, passwdFile, groupFile string) uint32 { + if passwdFile == "" { + passwdFile = filepath.Join(containerMount, "etc/passwd") + } + if groupFile == "" { + groupFile = filepath.Join(groupFile, "etc/group") + } + + size := 0 + + users, err := libcontainerUser.ParsePasswdFile(passwdFile) + if err == nil { + for _, u := range users { + // Skip the "nobody" user otherwise we end up with 65536 + // ids with most images + if u.Name == "nobody" { + continue + } + if u.Uid > size { + size = u.Uid + } + if u.Gid > size { + size = u.Gid + } + } + } + + groups, err := libcontainerUser.ParseGroupFile(groupFile) + if err == nil { + for _, g := range groups { + if g.Name == "nobody" { + continue + } + if g.Gid > size { + size = g.Gid + } + } + } + + return uint32(size) +} + +// getMaxSizeFromImage returns the maximum ID used by the specified image. +// The layer stores must be already locked. +func (s *store) getMaxSizeFromImage(id string, image *Image, passwdFile, groupFile string) (uint32, error) { + lstore, err := s.LayerStore() + if err != nil { + return 0, err + } + lstores, err := s.ROLayerStores() + if err != nil { + return 0, err + } + + size := uint32(0) + + var topLayer *Layer + layerName := image.TopLayer +outer: + for { + for _, ls := range append([]ROLayerStore{lstore}, lstores...) { + layer, err := ls.Get(layerName) + if err != nil { + continue + } + if image.TopLayer == layerName { + topLayer = layer + } + for _, uid := range layer.UIDs { + if uid >= size { + size = uid + 1 + } + } + for _, gid := range layer.GIDs { + if gid >= size { + size = gid + 1 + } + } + layerName = layer.Parent + if layerName == "" { + break outer + } + continue outer + } + return 0, errors.Errorf("cannot find layer %q", layerName) + } + + rlstore, err := s.LayerStore() + if err != nil { + return 0, err + } + + layerOptions := &LayerOptions{ + IDMappingOptions: types.IDMappingOptions{ + HostUIDMapping: true, + HostGIDMapping: true, + UIDMap: nil, + GIDMap: nil, + }, + } + + // We need to create a temporary layer so we can mount it and lookup the + // maximum IDs used. + clayer, err := rlstore.Create(id, topLayer, nil, "", nil, layerOptions, false) + if err != nil { + return 0, err + } + defer rlstore.Delete(clayer.ID) + + mountOptions := drivers.MountOpts{ + MountLabel: "", + UidMaps: nil, + GidMaps: nil, + Options: nil, + } + + mountpoint, err := rlstore.Mount(clayer.ID, mountOptions) + if err != nil { + return 0, err + } + defer rlstore.Unmount(clayer.ID, true) + + userFilesSize := parseMountedFiles(mountpoint, passwdFile, groupFile) + if userFilesSize > size { + size = userFilesSize + } + + return size, nil +} + +// getAutoUserNS creates an automatic user namespace +func (s *store) getAutoUserNS(id string, options *types.AutoUserNsOptions, image *Image) ([]idtools.IDMap, []idtools.IDMap, error) { + requestedSize := uint32(0) + initialSize := uint32(1) + if options.Size > 0 { + requestedSize = options.Size + } + if options.InitialSize > 0 { + initialSize = options.InitialSize + } + + availableUIDs, availableGIDs, err := s.getAvailableIDs() + if err != nil { + return nil, nil, errors.Wrapf(err, "cannot read mappings") + } + + // Look every container that is using a user namespace and store + // the intervals that are already used. + containers, err := s.Containers() + if err != nil { + return nil, nil, err + } + var usedUIDs, usedGIDs []idtools.IDMap + for _, c := range containers { + usedUIDs = append(usedUIDs, c.UIDMap...) + usedGIDs = append(usedGIDs, c.GIDMap...) + } + + size := requestedSize + + // If there is no requestedSize, lookup the maximum used IDs in the layers + // metadata. Make sure the size is at least s.autoNsMinSize and it is not + // bigger than s.autoNsMaxSize. + // This is a best effort heuristic. + if requestedSize == 0 { + size = initialSize + if s.autoNsMinSize > size { + size = s.autoNsMinSize + } + if image != nil { + sizeFromImage, err := s.getMaxSizeFromImage(id, image, options.PasswdFile, options.GroupFile) + if err != nil { + return nil, nil, err + } + if sizeFromImage > size { + size = sizeFromImage + } + } + if s.autoNsMaxSize > 0 && size > s.autoNsMaxSize { + return nil, nil, errors.Errorf("the container needs a user namespace with size %q that is bigger than the maximum value allowed with userns=auto %q", size, s.autoNsMaxSize) + } + } + + return getAutoUserNSIDMappings( + int(size), + availableUIDs, availableGIDs, + usedUIDs, usedGIDs, + options.AdditionalUIDMappings, options.AdditionalGIDMappings, + ) +} + +// getAutoUserNSIDMappings computes the user/group id mappings for the automatic user namespace. +func getAutoUserNSIDMappings( + size int, + availableUIDs, availableGIDs *idSet, + usedUIDMappings, usedGIDMappings, additionalUIDMappings, additionalGIDMappings []idtools.IDMap, +) ([]idtools.IDMap, []idtools.IDMap, error) { + usedUIDs := getHostIDs(append(usedUIDMappings, additionalUIDMappings...)) + usedGIDs := getHostIDs(append(usedGIDMappings, additionalGIDMappings...)) + + // Exclude additional uids and gids from requested range. + targetIDs := newIDSet([]interval{{start: 0, end: size}}) + requestedContainerUIDs := targetIDs.subtract(getContainerIDs(additionalUIDMappings)) + requestedContainerGIDs := targetIDs.subtract(getContainerIDs(additionalGIDMappings)) + + // Make sure the specified additional IDs are not used as part of the automatic + // mapping + availableUIDs, err := availableUIDs.subtract(usedUIDs).findAvailable(requestedContainerUIDs.size()) + if err != nil { + return nil, nil, err + } + availableGIDs, err = availableGIDs.subtract(usedGIDs).findAvailable(requestedContainerGIDs.size()) + if err != nil { + return nil, nil, err + } + + uidMap := append(availableUIDs.zip(requestedContainerUIDs), additionalUIDMappings...) + gidMap := append(availableGIDs.zip(requestedContainerGIDs), additionalGIDMappings...) + return uidMap, gidMap, nil +} diff --git a/vendor/github.com/containers/storage/utils.go b/vendor/github.com/containers/storage/utils.go new file mode 100644 index 00000000000..cec377f26a9 --- /dev/null +++ b/vendor/github.com/containers/storage/utils.go @@ -0,0 +1,74 @@ +package storage + +import ( + "fmt" + + "github.com/containers/storage/types" +) + +// ParseIDMapping takes idmappings and subuid and subgid maps and returns a storage mapping +func ParseIDMapping(UIDMapSlice, GIDMapSlice []string, subUIDMap, subGIDMap string) (*types.IDMappingOptions, error) { + return types.ParseIDMapping(UIDMapSlice, GIDMapSlice, subUIDMap, subGIDMap) +} + +// GetRootlessRuntimeDir returns the runtime directory when running as non root +func GetRootlessRuntimeDir(rootlessUID int) (string, error) { + return types.GetRootlessRuntimeDir(rootlessUID) +} + +// DefaultStoreOptionsAutoDetectUID returns the default storage ops for containers +func DefaultStoreOptionsAutoDetectUID() (types.StoreOptions, error) { + return types.DefaultStoreOptionsAutoDetectUID() +} + +// DefaultStoreOptions returns the default storage ops for containers +func DefaultStoreOptions(rootless bool, rootlessUID int) (types.StoreOptions, error) { + return types.DefaultStoreOptions(rootless, rootlessUID) +} + +func validateMountOptions(mountOptions []string) error { + var Empty struct{} + // Add invalid options for ImageMount() here. + invalidOptions := map[string]struct{}{ + "rw": Empty, + } + + for _, opt := range mountOptions { + if _, ok := invalidOptions[opt]; ok { + return fmt.Errorf(" %q option not supported", opt) + } + } + return nil +} + +func applyNameOperation(oldNames []string, opParameters []string, op updateNameOperation) ([]string, error) { + result := make([]string, 0) + switch op { + case setNames: + // ignore all old names and just return new names + return dedupeNames(opParameters), nil + case removeNames: + // remove given names from old names + for _, name := range oldNames { + // only keep names in final result which do not intersect with input names + // basically `result = oldNames - opParameters` + nameShouldBeRemoved := false + for _, opName := range opParameters { + if name == opName { + nameShouldBeRemoved = true + } + } + if !nameShouldBeRemoved { + result = append(result, name) + } + } + return dedupeNames(result), nil + case addNames: + result = append(result, opParameters...) + result = append(result, oldNames...) + return dedupeNames(result), nil + default: + return result, errInvalidUpdateNameOperation + } + return dedupeNames(result), nil +} diff --git a/vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go b/vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go new file mode 100644 index 00000000000..cff5af1a64c --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go @@ -0,0 +1,261 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Integration with the systemd D-Bus API. See http://www.freedesktop.org/wiki/Software/systemd/dbus/ +package dbus + +import ( + "context" + "encoding/hex" + "fmt" + "os" + "strconv" + "strings" + "sync" + + "github.com/godbus/dbus/v5" +) + +const ( + alpha = `abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ` + num = `0123456789` + alphanum = alpha + num + signalBuffer = 100 +) + +// needsEscape checks whether a byte in a potential dbus ObjectPath needs to be escaped +func needsEscape(i int, b byte) bool { + // Escape everything that is not a-z-A-Z-0-9 + // Also escape 0-9 if it's the first character + return strings.IndexByte(alphanum, b) == -1 || + (i == 0 && strings.IndexByte(num, b) != -1) +} + +// PathBusEscape sanitizes a constituent string of a dbus ObjectPath using the +// rules that systemd uses for serializing special characters. +func PathBusEscape(path string) string { + // Special case the empty string + if len(path) == 0 { + return "_" + } + n := []byte{} + for i := 0; i < len(path); i++ { + c := path[i] + if needsEscape(i, c) { + e := fmt.Sprintf("_%x", c) + n = append(n, []byte(e)...) + } else { + n = append(n, c) + } + } + return string(n) +} + +// pathBusUnescape is the inverse of PathBusEscape. +func pathBusUnescape(path string) string { + if path == "_" { + return "" + } + n := []byte{} + for i := 0; i < len(path); i++ { + c := path[i] + if c == '_' && i+2 < len(path) { + res, err := hex.DecodeString(path[i+1 : i+3]) + if err == nil { + n = append(n, res...) + } + i += 2 + } else { + n = append(n, c) + } + } + return string(n) +} + +// Conn is a connection to systemd's dbus endpoint. +type Conn struct { + // sysconn/sysobj are only used to call dbus methods + sysconn *dbus.Conn + sysobj dbus.BusObject + + // sigconn/sigobj are only used to receive dbus signals + sigconn *dbus.Conn + sigobj dbus.BusObject + + jobListener struct { + jobs map[dbus.ObjectPath]chan<- string + sync.Mutex + } + subStateSubscriber struct { + updateCh chan<- *SubStateUpdate + errCh chan<- error + sync.Mutex + ignore map[dbus.ObjectPath]int64 + cleanIgnore int64 + } + propertiesSubscriber struct { + updateCh chan<- *PropertiesUpdate + errCh chan<- error + sync.Mutex + } +} + +// Deprecated: use NewWithContext instead. +func New() (*Conn, error) { + return NewWithContext(context.Background()) +} + +// NewWithContext establishes a connection to any available bus and authenticates. +// Callers should call Close() when done with the connection. +func NewWithContext(ctx context.Context) (*Conn, error) { + conn, err := NewSystemConnectionContext(ctx) + if err != nil && os.Geteuid() == 0 { + return NewSystemdConnectionContext(ctx) + } + return conn, err +} + +// Deprecated: use NewSystemConnectionContext instead. +func NewSystemConnection() (*Conn, error) { + return NewSystemConnectionContext(context.Background()) +} + +// NewSystemConnectionContext establishes a connection to the system bus and authenticates. +// Callers should call Close() when done with the connection. +func NewSystemConnectionContext(ctx context.Context) (*Conn, error) { + return NewConnection(func() (*dbus.Conn, error) { + return dbusAuthHelloConnection(ctx, dbus.SystemBusPrivate) + }) +} + +// Deprecated: use NewUserConnectionContext instead. +func NewUserConnection() (*Conn, error) { + return NewUserConnectionContext(context.Background()) +} + +// NewUserConnectionContext establishes a connection to the session bus and +// authenticates. This can be used to connect to systemd user instances. +// Callers should call Close() when done with the connection. +func NewUserConnectionContext(ctx context.Context) (*Conn, error) { + return NewConnection(func() (*dbus.Conn, error) { + return dbusAuthHelloConnection(ctx, dbus.SessionBusPrivate) + }) +} + +// Deprecated: use NewSystemdConnectionContext instead. +func NewSystemdConnection() (*Conn, error) { + return NewSystemdConnectionContext(context.Background()) +} + +// NewSystemdConnectionContext establishes a private, direct connection to systemd. +// This can be used for communicating with systemd without a dbus daemon. +// Callers should call Close() when done with the connection. +func NewSystemdConnectionContext(ctx context.Context) (*Conn, error) { + return NewConnection(func() (*dbus.Conn, error) { + // We skip Hello when talking directly to systemd. + return dbusAuthConnection(ctx, func(opts ...dbus.ConnOption) (*dbus.Conn, error) { + return dbus.Dial("unix:path=/run/systemd/private", opts...) + }) + }) +} + +// Close closes an established connection. +func (c *Conn) Close() { + c.sysconn.Close() + c.sigconn.Close() +} + +// NewConnection establishes a connection to a bus using a caller-supplied function. +// This allows connecting to remote buses through a user-supplied mechanism. +// The supplied function may be called multiple times, and should return independent connections. +// The returned connection must be fully initialised: the org.freedesktop.DBus.Hello call must have succeeded, +// and any authentication should be handled by the function. +func NewConnection(dialBus func() (*dbus.Conn, error)) (*Conn, error) { + sysconn, err := dialBus() + if err != nil { + return nil, err + } + + sigconn, err := dialBus() + if err != nil { + sysconn.Close() + return nil, err + } + + c := &Conn{ + sysconn: sysconn, + sysobj: systemdObject(sysconn), + sigconn: sigconn, + sigobj: systemdObject(sigconn), + } + + c.subStateSubscriber.ignore = make(map[dbus.ObjectPath]int64) + c.jobListener.jobs = make(map[dbus.ObjectPath]chan<- string) + + // Setup the listeners on jobs so that we can get completions + c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, + "type='signal', interface='org.freedesktop.systemd1.Manager', member='JobRemoved'") + + c.dispatch() + return c, nil +} + +// GetManagerProperty returns the value of a property on the org.freedesktop.systemd1.Manager +// interface. The value is returned in its string representation, as defined at +// https://developer.gnome.org/glib/unstable/gvariant-text.html. +func (c *Conn) GetManagerProperty(prop string) (string, error) { + variant, err := c.sysobj.GetProperty("org.freedesktop.systemd1.Manager." + prop) + if err != nil { + return "", err + } + return variant.String(), nil +} + +func dbusAuthConnection(ctx context.Context, createBus func(opts ...dbus.ConnOption) (*dbus.Conn, error)) (*dbus.Conn, error) { + conn, err := createBus(dbus.WithContext(ctx)) + if err != nil { + return nil, err + } + + // Only use EXTERNAL method, and hardcode the uid (not username) + // to avoid a username lookup (which requires a dynamically linked + // libc) + methods := []dbus.Auth{dbus.AuthExternal(strconv.Itoa(os.Getuid()))} + + err = conn.Auth(methods) + if err != nil { + conn.Close() + return nil, err + } + + return conn, nil +} + +func dbusAuthHelloConnection(ctx context.Context, createBus func(opts ...dbus.ConnOption) (*dbus.Conn, error)) (*dbus.Conn, error) { + conn, err := dbusAuthConnection(ctx, createBus) + if err != nil { + return nil, err + } + + if err = conn.Hello(); err != nil { + conn.Close() + return nil, err + } + + return conn, nil +} + +func systemdObject(conn *dbus.Conn) dbus.BusObject { + return conn.Object("org.freedesktop.systemd1", dbus.ObjectPath("/org/freedesktop/systemd1")) +} diff --git a/vendor/github.com/coreos/go-systemd/v22/dbus/methods.go b/vendor/github.com/coreos/go-systemd/v22/dbus/methods.go new file mode 100644 index 00000000000..fa04afc708e --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/v22/dbus/methods.go @@ -0,0 +1,830 @@ +// Copyright 2015, 2018 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dbus + +import ( + "context" + "errors" + "fmt" + "path" + "strconv" + + "github.com/godbus/dbus/v5" +) + +// Who can be used to specify which process to kill in the unit via the KillUnitWithTarget API +type Who string + +const ( + // All sends the signal to all processes in the unit + All Who = "all" + // Main sends the signal to the main process of the unit + Main Who = "main" + // Control sends the signal to the control process of the unit + Control Who = "control" +) + +func (c *Conn) jobComplete(signal *dbus.Signal) { + var id uint32 + var job dbus.ObjectPath + var unit string + var result string + dbus.Store(signal.Body, &id, &job, &unit, &result) + c.jobListener.Lock() + out, ok := c.jobListener.jobs[job] + if ok { + out <- result + delete(c.jobListener.jobs, job) + } + c.jobListener.Unlock() +} + +func (c *Conn) startJob(ctx context.Context, ch chan<- string, job string, args ...interface{}) (int, error) { + if ch != nil { + c.jobListener.Lock() + defer c.jobListener.Unlock() + } + + var p dbus.ObjectPath + err := c.sysobj.CallWithContext(ctx, job, 0, args...).Store(&p) + if err != nil { + return 0, err + } + + if ch != nil { + c.jobListener.jobs[p] = ch + } + + // ignore error since 0 is fine if conversion fails + jobID, _ := strconv.Atoi(path.Base(string(p))) + + return jobID, nil +} + +// Deprecated: use StartUnitContext instead. +func (c *Conn) StartUnit(name string, mode string, ch chan<- string) (int, error) { + return c.StartUnitContext(context.Background(), name, mode, ch) +} + +// StartUnitContext enqueues a start job and depending jobs, if any (unless otherwise +// specified by the mode string). +// +// Takes the unit to activate, plus a mode string. The mode needs to be one of +// replace, fail, isolate, ignore-dependencies, ignore-requirements. If +// "replace" the call will start the unit and its dependencies, possibly +// replacing already queued jobs that conflict with this. If "fail" the call +// will start the unit and its dependencies, but will fail if this would change +// an already queued job. If "isolate" the call will start the unit in question +// and terminate all units that aren't dependencies of it. If +// "ignore-dependencies" it will start a unit but ignore all its dependencies. +// If "ignore-requirements" it will start a unit but only ignore the +// requirement dependencies. It is not recommended to make use of the latter +// two options. +// +// If the provided channel is non-nil, a result string will be sent to it upon +// job completion: one of done, canceled, timeout, failed, dependency, skipped. +// done indicates successful execution of a job. canceled indicates that a job +// has been canceled before it finished execution. timeout indicates that the +// job timeout was reached. failed indicates that the job failed. dependency +// indicates that a job this job has been depending on failed and the job hence +// has been removed too. skipped indicates that a job was skipped because it +// didn't apply to the units current state. +// +// If no error occurs, the ID of the underlying systemd job will be returned. There +// does exist the possibility for no error to be returned, but for the returned job +// ID to be 0. In this case, the actual underlying ID is not 0 and this datapoint +// should not be considered authoritative. +// +// If an error does occur, it will be returned to the user alongside a job ID of 0. +func (c *Conn) StartUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.StartUnit", name, mode) +} + +// Deprecated: use StopUnitContext instead. +func (c *Conn) StopUnit(name string, mode string, ch chan<- string) (int, error) { + return c.StopUnitContext(context.Background(), name, mode, ch) +} + +// StopUnitContext is similar to StartUnitContext, but stops the specified unit +// rather than starting it. +func (c *Conn) StopUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.StopUnit", name, mode) +} + +// Deprecated: use ReloadUnitContext instead. +func (c *Conn) ReloadUnit(name string, mode string, ch chan<- string) (int, error) { + return c.ReloadUnitContext(context.Background(), name, mode, ch) +} + +// ReloadUnitContext reloads a unit. Reloading is done only if the unit +// is already running, and fails otherwise. +func (c *Conn) ReloadUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.ReloadUnit", name, mode) +} + +// Deprecated: use RestartUnitContext instead. +func (c *Conn) RestartUnit(name string, mode string, ch chan<- string) (int, error) { + return c.RestartUnitContext(context.Background(), name, mode, ch) +} + +// RestartUnitContext restarts a service. If a service is restarted that isn't +// running it will be started. +func (c *Conn) RestartUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.RestartUnit", name, mode) +} + +// Deprecated: use TryRestartUnitContext instead. +func (c *Conn) TryRestartUnit(name string, mode string, ch chan<- string) (int, error) { + return c.TryRestartUnitContext(context.Background(), name, mode, ch) +} + +// TryRestartUnitContext is like RestartUnitContext, except that a service that +// isn't running is not affected by the restart. +func (c *Conn) TryRestartUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.TryRestartUnit", name, mode) +} + +// Deprecated: use ReloadOrRestartUnitContext instead. +func (c *Conn) ReloadOrRestartUnit(name string, mode string, ch chan<- string) (int, error) { + return c.ReloadOrRestartUnitContext(context.Background(), name, mode, ch) +} + +// ReloadOrRestartUnitContext attempts a reload if the unit supports it and use +// a restart otherwise. +func (c *Conn) ReloadOrRestartUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.ReloadOrRestartUnit", name, mode) +} + +// Deprecated: use ReloadOrTryRestartUnitContext instead. +func (c *Conn) ReloadOrTryRestartUnit(name string, mode string, ch chan<- string) (int, error) { + return c.ReloadOrTryRestartUnitContext(context.Background(), name, mode, ch) +} + +// ReloadOrTryRestartUnitContext attempts a reload if the unit supports it, +// and use a "Try" flavored restart otherwise. +func (c *Conn) ReloadOrTryRestartUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.ReloadOrTryRestartUnit", name, mode) +} + +// Deprecated: use StartTransientUnitContext instead. +func (c *Conn) StartTransientUnit(name string, mode string, properties []Property, ch chan<- string) (int, error) { + return c.StartTransientUnitContext(context.Background(), name, mode, properties, ch) +} + +// StartTransientUnitContext may be used to create and start a transient unit, which +// will be released as soon as it is not running or referenced anymore or the +// system is rebooted. name is the unit name including suffix, and must be +// unique. mode is the same as in StartUnitContext, properties contains properties +// of the unit. +func (c *Conn) StartTransientUnitContext(ctx context.Context, name string, mode string, properties []Property, ch chan<- string) (int, error) { + return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.StartTransientUnit", name, mode, properties, make([]PropertyCollection, 0)) +} + +// Deprecated: use KillUnitContext instead. +func (c *Conn) KillUnit(name string, signal int32) { + c.KillUnitContext(context.Background(), name, signal) +} + +// KillUnitContext takes the unit name and a UNIX signal number to send. +// All of the unit's processes are killed. +func (c *Conn) KillUnitContext(ctx context.Context, name string, signal int32) { + c.KillUnitWithTarget(ctx, name, All, signal) +} + +// KillUnitWithTarget is like KillUnitContext, but allows you to specify which +// process in the unit to send the signal to. +func (c *Conn) KillUnitWithTarget(ctx context.Context, name string, target Who, signal int32) error { + return c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.KillUnit", 0, name, string(target), signal).Store() +} + +// Deprecated: use ResetFailedUnitContext instead. +func (c *Conn) ResetFailedUnit(name string) error { + return c.ResetFailedUnitContext(context.Background(), name) +} + +// ResetFailedUnitContext resets the "failed" state of a specific unit. +func (c *Conn) ResetFailedUnitContext(ctx context.Context, name string) error { + return c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ResetFailedUnit", 0, name).Store() +} + +// Deprecated: use SystemStateContext instead. +func (c *Conn) SystemState() (*Property, error) { + return c.SystemStateContext(context.Background()) +} + +// SystemStateContext returns the systemd state. Equivalent to +// systemctl is-system-running. +func (c *Conn) SystemStateContext(ctx context.Context) (*Property, error) { + var err error + var prop dbus.Variant + + obj := c.sysconn.Object("org.freedesktop.systemd1", "/org/freedesktop/systemd1") + err = obj.CallWithContext(ctx, "org.freedesktop.DBus.Properties.Get", 0, "org.freedesktop.systemd1.Manager", "SystemState").Store(&prop) + if err != nil { + return nil, err + } + + return &Property{Name: "SystemState", Value: prop}, nil +} + +// getProperties takes the unit path and returns all of its dbus object properties, for the given dbus interface. +func (c *Conn) getProperties(ctx context.Context, path dbus.ObjectPath, dbusInterface string) (map[string]interface{}, error) { + var err error + var props map[string]dbus.Variant + + if !path.IsValid() { + return nil, fmt.Errorf("invalid unit name: %v", path) + } + + obj := c.sysconn.Object("org.freedesktop.systemd1", path) + err = obj.CallWithContext(ctx, "org.freedesktop.DBus.Properties.GetAll", 0, dbusInterface).Store(&props) + if err != nil { + return nil, err + } + + out := make(map[string]interface{}, len(props)) + for k, v := range props { + out[k] = v.Value() + } + + return out, nil +} + +// Deprecated: use GetUnitPropertiesContext instead. +func (c *Conn) GetUnitProperties(unit string) (map[string]interface{}, error) { + return c.GetUnitPropertiesContext(context.Background(), unit) +} + +// GetUnitPropertiesContext takes the (unescaped) unit name and returns all of +// its dbus object properties. +func (c *Conn) GetUnitPropertiesContext(ctx context.Context, unit string) (map[string]interface{}, error) { + path := unitPath(unit) + return c.getProperties(ctx, path, "org.freedesktop.systemd1.Unit") +} + +// Deprecated: use GetUnitPathPropertiesContext instead. +func (c *Conn) GetUnitPathProperties(path dbus.ObjectPath) (map[string]interface{}, error) { + return c.GetUnitPathPropertiesContext(context.Background(), path) +} + +// GetUnitPathPropertiesContext takes the (escaped) unit path and returns all +// of its dbus object properties. +func (c *Conn) GetUnitPathPropertiesContext(ctx context.Context, path dbus.ObjectPath) (map[string]interface{}, error) { + return c.getProperties(ctx, path, "org.freedesktop.systemd1.Unit") +} + +// Deprecated: use GetAllPropertiesContext instead. +func (c *Conn) GetAllProperties(unit string) (map[string]interface{}, error) { + return c.GetAllPropertiesContext(context.Background(), unit) +} + +// GetAllPropertiesContext takes the (unescaped) unit name and returns all of +// its dbus object properties. +func (c *Conn) GetAllPropertiesContext(ctx context.Context, unit string) (map[string]interface{}, error) { + path := unitPath(unit) + return c.getProperties(ctx, path, "") +} + +func (c *Conn) getProperty(ctx context.Context, unit string, dbusInterface string, propertyName string) (*Property, error) { + var err error + var prop dbus.Variant + + path := unitPath(unit) + if !path.IsValid() { + return nil, errors.New("invalid unit name: " + unit) + } + + obj := c.sysconn.Object("org.freedesktop.systemd1", path) + err = obj.CallWithContext(ctx, "org.freedesktop.DBus.Properties.Get", 0, dbusInterface, propertyName).Store(&prop) + if err != nil { + return nil, err + } + + return &Property{Name: propertyName, Value: prop}, nil +} + +// Deprecated: use GetUnitPropertyContext instead. +func (c *Conn) GetUnitProperty(unit string, propertyName string) (*Property, error) { + return c.GetUnitPropertyContext(context.Background(), unit, propertyName) +} + +// GetUnitPropertyContext takes an (unescaped) unit name, and a property name, +// and returns the property value. +func (c *Conn) GetUnitPropertyContext(ctx context.Context, unit string, propertyName string) (*Property, error) { + return c.getProperty(ctx, unit, "org.freedesktop.systemd1.Unit", propertyName) +} + +// Deprecated: use GetServicePropertyContext instead. +func (c *Conn) GetServiceProperty(service string, propertyName string) (*Property, error) { + return c.GetServicePropertyContext(context.Background(), service, propertyName) +} + +// GetServiceProperty returns property for given service name and property name. +func (c *Conn) GetServicePropertyContext(ctx context.Context, service string, propertyName string) (*Property, error) { + return c.getProperty(ctx, service, "org.freedesktop.systemd1.Service", propertyName) +} + +// Deprecated: use GetUnitTypePropertiesContext instead. +func (c *Conn) GetUnitTypeProperties(unit string, unitType string) (map[string]interface{}, error) { + return c.GetUnitTypePropertiesContext(context.Background(), unit, unitType) +} + +// GetUnitTypePropertiesContext returns the extra properties for a unit, specific to the unit type. +// Valid values for unitType: Service, Socket, Target, Device, Mount, Automount, Snapshot, Timer, Swap, Path, Slice, Scope. +// Returns "dbus.Error: Unknown interface" error if the unitType is not the correct type of the unit. +func (c *Conn) GetUnitTypePropertiesContext(ctx context.Context, unit string, unitType string) (map[string]interface{}, error) { + path := unitPath(unit) + return c.getProperties(ctx, path, "org.freedesktop.systemd1."+unitType) +} + +// Deprecated: use SetUnitPropertiesContext instead. +func (c *Conn) SetUnitProperties(name string, runtime bool, properties ...Property) error { + return c.SetUnitPropertiesContext(context.Background(), name, runtime, properties...) +} + +// SetUnitPropertiesContext may be used to modify certain unit properties at runtime. +// Not all properties may be changed at runtime, but many resource management +// settings (primarily those in systemd.cgroup(5)) may. The changes are applied +// instantly, and stored on disk for future boots, unless runtime is true, in which +// case the settings only apply until the next reboot. name is the name of the unit +// to modify. properties are the settings to set, encoded as an array of property +// name and value pairs. +func (c *Conn) SetUnitPropertiesContext(ctx context.Context, name string, runtime bool, properties ...Property) error { + return c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.SetUnitProperties", 0, name, runtime, properties).Store() +} + +// Deprecated: use GetUnitTypePropertyContext instead. +func (c *Conn) GetUnitTypeProperty(unit string, unitType string, propertyName string) (*Property, error) { + return c.GetUnitTypePropertyContext(context.Background(), unit, unitType, propertyName) +} + +// GetUnitTypePropertyContext takes a property name, a unit name, and a unit type, +// and returns a property value. For valid values of unitType, see GetUnitTypePropertiesContext. +func (c *Conn) GetUnitTypePropertyContext(ctx context.Context, unit string, unitType string, propertyName string) (*Property, error) { + return c.getProperty(ctx, unit, "org.freedesktop.systemd1."+unitType, propertyName) +} + +type UnitStatus struct { + Name string // The primary unit name as string + Description string // The human readable description string + LoadState string // The load state (i.e. whether the unit file has been loaded successfully) + ActiveState string // The active state (i.e. whether the unit is currently started or not) + SubState string // The sub state (a more fine-grained version of the active state that is specific to the unit type, which the active state is not) + Followed string // A unit that is being followed in its state by this unit, if there is any, otherwise the empty string. + Path dbus.ObjectPath // The unit object path + JobId uint32 // If there is a job queued for the job unit the numeric job id, 0 otherwise + JobType string // The job type as string + JobPath dbus.ObjectPath // The job object path +} + +type storeFunc func(retvalues ...interface{}) error + +func (c *Conn) listUnitsInternal(f storeFunc) ([]UnitStatus, error) { + result := make([][]interface{}, 0) + err := f(&result) + if err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + status := make([]UnitStatus, len(result)) + statusInterface := make([]interface{}, len(status)) + for i := range status { + statusInterface[i] = &status[i] + } + + err = dbus.Store(resultInterface, statusInterface...) + if err != nil { + return nil, err + } + + return status, nil +} + +// Deprecated: use ListUnitsContext instead. +func (c *Conn) ListUnits() ([]UnitStatus, error) { + return c.ListUnitsContext(context.Background()) +} + +// ListUnitsContext returns an array with all currently loaded units. Note that +// units may be known by multiple names at the same time, and hence there might +// be more unit names loaded than actual units behind them. +// Also note that a unit is only loaded if it is active and/or enabled. +// Units that are both disabled and inactive will thus not be returned. +func (c *Conn) ListUnitsContext(ctx context.Context) ([]UnitStatus, error) { + return c.listUnitsInternal(c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnits", 0).Store) +} + +// Deprecated: use ListUnitsFilteredContext instead. +func (c *Conn) ListUnitsFiltered(states []string) ([]UnitStatus, error) { + return c.ListUnitsFilteredContext(context.Background(), states) +} + +// ListUnitsFilteredContext returns an array with units filtered by state. +// It takes a list of units' statuses to filter. +func (c *Conn) ListUnitsFilteredContext(ctx context.Context, states []string) ([]UnitStatus, error) { + return c.listUnitsInternal(c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnitsFiltered", 0, states).Store) +} + +// Deprecated: use ListUnitsByPatternsContext instead. +func (c *Conn) ListUnitsByPatterns(states []string, patterns []string) ([]UnitStatus, error) { + return c.ListUnitsByPatternsContext(context.Background(), states, patterns) +} + +// ListUnitsByPatternsContext returns an array with units. +// It takes a list of units' statuses and names to filter. +// Note that units may be known by multiple names at the same time, +// and hence there might be more unit names loaded than actual units behind them. +func (c *Conn) ListUnitsByPatternsContext(ctx context.Context, states []string, patterns []string) ([]UnitStatus, error) { + return c.listUnitsInternal(c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnitsByPatterns", 0, states, patterns).Store) +} + +// Deprecated: use ListUnitsByNamesContext instead. +func (c *Conn) ListUnitsByNames(units []string) ([]UnitStatus, error) { + return c.ListUnitsByNamesContext(context.Background(), units) +} + +// ListUnitsByNamesContext returns an array with units. It takes a list of units' +// names and returns an UnitStatus array. Comparing to ListUnitsByPatternsContext +// method, this method returns statuses even for inactive or non-existing +// units. Input array should contain exact unit names, but not patterns. +// +// Requires systemd v230 or higher. +func (c *Conn) ListUnitsByNamesContext(ctx context.Context, units []string) ([]UnitStatus, error) { + return c.listUnitsInternal(c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnitsByNames", 0, units).Store) +} + +type UnitFile struct { + Path string + Type string +} + +func (c *Conn) listUnitFilesInternal(f storeFunc) ([]UnitFile, error) { + result := make([][]interface{}, 0) + err := f(&result) + if err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + files := make([]UnitFile, len(result)) + fileInterface := make([]interface{}, len(files)) + for i := range files { + fileInterface[i] = &files[i] + } + + err = dbus.Store(resultInterface, fileInterface...) + if err != nil { + return nil, err + } + + return files, nil +} + +// Deprecated: use ListUnitFilesContext instead. +func (c *Conn) ListUnitFiles() ([]UnitFile, error) { + return c.ListUnitFilesContext(context.Background()) +} + +// ListUnitFiles returns an array of all available units on disk. +func (c *Conn) ListUnitFilesContext(ctx context.Context) ([]UnitFile, error) { + return c.listUnitFilesInternal(c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnitFiles", 0).Store) +} + +// Deprecated: use ListUnitFilesByPatternsContext instead. +func (c *Conn) ListUnitFilesByPatterns(states []string, patterns []string) ([]UnitFile, error) { + return c.ListUnitFilesByPatternsContext(context.Background(), states, patterns) +} + +// ListUnitFilesByPatternsContext returns an array of all available units on disk matched the patterns. +func (c *Conn) ListUnitFilesByPatternsContext(ctx context.Context, states []string, patterns []string) ([]UnitFile, error) { + return c.listUnitFilesInternal(c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnitFilesByPatterns", 0, states, patterns).Store) +} + +type LinkUnitFileChange EnableUnitFileChange + +// Deprecated: use LinkUnitFilesContext instead. +func (c *Conn) LinkUnitFiles(files []string, runtime bool, force bool) ([]LinkUnitFileChange, error) { + return c.LinkUnitFilesContext(context.Background(), files, runtime, force) +} + +// LinkUnitFilesContext links unit files (that are located outside of the +// usual unit search paths) into the unit search path. +// +// It takes a list of absolute paths to unit files to link and two +// booleans. +// +// The first boolean controls whether the unit shall be +// enabled for runtime only (true, /run), or persistently (false, +// /etc). +// +// The second controls whether symlinks pointing to other units shall +// be replaced if necessary. +// +// This call returns a list of the changes made. The list consists of +// structures with three strings: the type of the change (one of symlink +// or unlink), the file name of the symlink and the destination of the +// symlink. +func (c *Conn) LinkUnitFilesContext(ctx context.Context, files []string, runtime bool, force bool) ([]LinkUnitFileChange, error) { + result := make([][]interface{}, 0) + err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.LinkUnitFiles", 0, files, runtime, force).Store(&result) + if err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + changes := make([]LinkUnitFileChange, len(result)) + changesInterface := make([]interface{}, len(changes)) + for i := range changes { + changesInterface[i] = &changes[i] + } + + err = dbus.Store(resultInterface, changesInterface...) + if err != nil { + return nil, err + } + + return changes, nil +} + +// Deprecated: use EnableUnitFilesContext instead. +func (c *Conn) EnableUnitFiles(files []string, runtime bool, force bool) (bool, []EnableUnitFileChange, error) { + return c.EnableUnitFilesContext(context.Background(), files, runtime, force) +} + +// EnableUnitFilesContext may be used to enable one or more units in the system +// (by creating symlinks to them in /etc or /run). +// +// It takes a list of unit files to enable (either just file names or full +// absolute paths if the unit files are residing outside the usual unit +// search paths), and two booleans: the first controls whether the unit shall +// be enabled for runtime only (true, /run), or persistently (false, /etc). +// The second one controls whether symlinks pointing to other units shall +// be replaced if necessary. +// +// This call returns one boolean and an array with the changes made. The +// boolean signals whether the unit files contained any enablement +// information (i.e. an [Install]) section. The changes list consists of +// structures with three strings: the type of the change (one of symlink +// or unlink), the file name of the symlink and the destination of the +// symlink. +func (c *Conn) EnableUnitFilesContext(ctx context.Context, files []string, runtime bool, force bool) (bool, []EnableUnitFileChange, error) { + var carries_install_info bool + + result := make([][]interface{}, 0) + err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.EnableUnitFiles", 0, files, runtime, force).Store(&carries_install_info, &result) + if err != nil { + return false, nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + changes := make([]EnableUnitFileChange, len(result)) + changesInterface := make([]interface{}, len(changes)) + for i := range changes { + changesInterface[i] = &changes[i] + } + + err = dbus.Store(resultInterface, changesInterface...) + if err != nil { + return false, nil, err + } + + return carries_install_info, changes, nil +} + +type EnableUnitFileChange struct { + Type string // Type of the change (one of symlink or unlink) + Filename string // File name of the symlink + Destination string // Destination of the symlink +} + +// Deprecated: use DisableUnitFilesContext instead. +func (c *Conn) DisableUnitFiles(files []string, runtime bool) ([]DisableUnitFileChange, error) { + return c.DisableUnitFilesContext(context.Background(), files, runtime) +} + +// DisableUnitFilesContext may be used to disable one or more units in the +// system (by removing symlinks to them from /etc or /run). +// +// It takes a list of unit files to disable (either just file names or full +// absolute paths if the unit files are residing outside the usual unit +// search paths), and one boolean: whether the unit was enabled for runtime +// only (true, /run), or persistently (false, /etc). +// +// This call returns an array with the changes made. The changes list +// consists of structures with three strings: the type of the change (one of +// symlink or unlink), the file name of the symlink and the destination of the +// symlink. +func (c *Conn) DisableUnitFilesContext(ctx context.Context, files []string, runtime bool) ([]DisableUnitFileChange, error) { + result := make([][]interface{}, 0) + err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.DisableUnitFiles", 0, files, runtime).Store(&result) + if err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + changes := make([]DisableUnitFileChange, len(result)) + changesInterface := make([]interface{}, len(changes)) + for i := range changes { + changesInterface[i] = &changes[i] + } + + err = dbus.Store(resultInterface, changesInterface...) + if err != nil { + return nil, err + } + + return changes, nil +} + +type DisableUnitFileChange struct { + Type string // Type of the change (one of symlink or unlink) + Filename string // File name of the symlink + Destination string // Destination of the symlink +} + +// Deprecated: use MaskUnitFilesContext instead. +func (c *Conn) MaskUnitFiles(files []string, runtime bool, force bool) ([]MaskUnitFileChange, error) { + return c.MaskUnitFilesContext(context.Background(), files, runtime, force) +} + +// MaskUnitFilesContext masks one or more units in the system. +// +// The files argument contains a list of units to mask (either just file names +// or full absolute paths if the unit files are residing outside the usual unit +// search paths). +// +// The runtime argument is used to specify whether the unit was enabled for +// runtime only (true, /run/systemd/..), or persistently (false, +// /etc/systemd/..). +func (c *Conn) MaskUnitFilesContext(ctx context.Context, files []string, runtime bool, force bool) ([]MaskUnitFileChange, error) { + result := make([][]interface{}, 0) + err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.MaskUnitFiles", 0, files, runtime, force).Store(&result) + if err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + changes := make([]MaskUnitFileChange, len(result)) + changesInterface := make([]interface{}, len(changes)) + for i := range changes { + changesInterface[i] = &changes[i] + } + + err = dbus.Store(resultInterface, changesInterface...) + if err != nil { + return nil, err + } + + return changes, nil +} + +type MaskUnitFileChange struct { + Type string // Type of the change (one of symlink or unlink) + Filename string // File name of the symlink + Destination string // Destination of the symlink +} + +// Deprecated: use UnmaskUnitFilesContext instead. +func (c *Conn) UnmaskUnitFiles(files []string, runtime bool) ([]UnmaskUnitFileChange, error) { + return c.UnmaskUnitFilesContext(context.Background(), files, runtime) +} + +// UnmaskUnitFilesContext unmasks one or more units in the system. +// +// It takes the list of unit files to mask (either just file names or full +// absolute paths if the unit files are residing outside the usual unit search +// paths), and a boolean runtime flag to specify whether the unit was enabled +// for runtime only (true, /run/systemd/..), or persistently (false, +// /etc/systemd/..). +func (c *Conn) UnmaskUnitFilesContext(ctx context.Context, files []string, runtime bool) ([]UnmaskUnitFileChange, error) { + result := make([][]interface{}, 0) + err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.UnmaskUnitFiles", 0, files, runtime).Store(&result) + if err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + changes := make([]UnmaskUnitFileChange, len(result)) + changesInterface := make([]interface{}, len(changes)) + for i := range changes { + changesInterface[i] = &changes[i] + } + + err = dbus.Store(resultInterface, changesInterface...) + if err != nil { + return nil, err + } + + return changes, nil +} + +type UnmaskUnitFileChange struct { + Type string // Type of the change (one of symlink or unlink) + Filename string // File name of the symlink + Destination string // Destination of the symlink +} + +// Deprecated: use ReloadContext instead. +func (c *Conn) Reload() error { + return c.ReloadContext(context.Background()) +} + +// ReloadContext instructs systemd to scan for and reload unit files. This is +// an equivalent to systemctl daemon-reload. +func (c *Conn) ReloadContext(ctx context.Context) error { + return c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.Reload", 0).Store() +} + +func unitPath(name string) dbus.ObjectPath { + return dbus.ObjectPath("/org/freedesktop/systemd1/unit/" + PathBusEscape(name)) +} + +// unitName returns the unescaped base element of the supplied escaped path. +func unitName(dpath dbus.ObjectPath) string { + return pathBusUnescape(path.Base(string(dpath))) +} + +// JobStatus holds a currently queued job definition. +type JobStatus struct { + Id uint32 // The numeric job id + Unit string // The primary unit name for this job + JobType string // The job type as string + Status string // The job state as string + JobPath dbus.ObjectPath // The job object path + UnitPath dbus.ObjectPath // The unit object path +} + +// Deprecated: use ListJobsContext instead. +func (c *Conn) ListJobs() ([]JobStatus, error) { + return c.ListJobsContext(context.Background()) +} + +// ListJobsContext returns an array with all currently queued jobs. +func (c *Conn) ListJobsContext(ctx context.Context) ([]JobStatus, error) { + return c.listJobsInternal(ctx) +} + +func (c *Conn) listJobsInternal(ctx context.Context) ([]JobStatus, error) { + result := make([][]interface{}, 0) + if err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListJobs", 0).Store(&result); err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + status := make([]JobStatus, len(result)) + statusInterface := make([]interface{}, len(status)) + for i := range status { + statusInterface[i] = &status[i] + } + + if err := dbus.Store(resultInterface, statusInterface...); err != nil { + return nil, err + } + + return status, nil +} diff --git a/vendor/github.com/coreos/go-systemd/v22/dbus/properties.go b/vendor/github.com/coreos/go-systemd/v22/dbus/properties.go new file mode 100644 index 00000000000..fb42b627338 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/v22/dbus/properties.go @@ -0,0 +1,237 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dbus + +import ( + "github.com/godbus/dbus/v5" +) + +// From the systemd docs: +// +// The properties array of StartTransientUnit() may take many of the settings +// that may also be configured in unit files. Not all parameters are currently +// accepted though, but we plan to cover more properties with future release. +// Currently you may set the Description, Slice and all dependency types of +// units, as well as RemainAfterExit, ExecStart for service units, +// TimeoutStopUSec and PIDs for scope units, and CPUAccounting, CPUShares, +// BlockIOAccounting, BlockIOWeight, BlockIOReadBandwidth, +// BlockIOWriteBandwidth, BlockIODeviceWeight, MemoryAccounting, MemoryLimit, +// DevicePolicy, DeviceAllow for services/scopes/slices. These fields map +// directly to their counterparts in unit files and as normal D-Bus object +// properties. The exception here is the PIDs field of scope units which is +// used for construction of the scope only and specifies the initial PIDs to +// add to the scope object. + +type Property struct { + Name string + Value dbus.Variant +} + +type PropertyCollection struct { + Name string + Properties []Property +} + +type execStart struct { + Path string // the binary path to execute + Args []string // an array with all arguments to pass to the executed command, starting with argument 0 + UncleanIsFailure bool // a boolean whether it should be considered a failure if the process exits uncleanly +} + +// PropExecStart sets the ExecStart service property. The first argument is a +// slice with the binary path to execute followed by the arguments to pass to +// the executed command. See +// http://www.freedesktop.org/software/systemd/man/systemd.service.html#ExecStart= +func PropExecStart(command []string, uncleanIsFailure bool) Property { + execStarts := []execStart{ + { + Path: command[0], + Args: command, + UncleanIsFailure: uncleanIsFailure, + }, + } + + return Property{ + Name: "ExecStart", + Value: dbus.MakeVariant(execStarts), + } +} + +// PropRemainAfterExit sets the RemainAfterExit service property. See +// http://www.freedesktop.org/software/systemd/man/systemd.service.html#RemainAfterExit= +func PropRemainAfterExit(b bool) Property { + return Property{ + Name: "RemainAfterExit", + Value: dbus.MakeVariant(b), + } +} + +// PropType sets the Type service property. See +// http://www.freedesktop.org/software/systemd/man/systemd.service.html#Type= +func PropType(t string) Property { + return Property{ + Name: "Type", + Value: dbus.MakeVariant(t), + } +} + +// PropDescription sets the Description unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit#Description= +func PropDescription(desc string) Property { + return Property{ + Name: "Description", + Value: dbus.MakeVariant(desc), + } +} + +func propDependency(name string, units []string) Property { + return Property{ + Name: name, + Value: dbus.MakeVariant(units), + } +} + +// PropRequires sets the Requires unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Requires= +func PropRequires(units ...string) Property { + return propDependency("Requires", units) +} + +// PropRequiresOverridable sets the RequiresOverridable unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiresOverridable= +func PropRequiresOverridable(units ...string) Property { + return propDependency("RequiresOverridable", units) +} + +// PropRequisite sets the Requisite unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Requisite= +func PropRequisite(units ...string) Property { + return propDependency("Requisite", units) +} + +// PropRequisiteOverridable sets the RequisiteOverridable unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequisiteOverridable= +func PropRequisiteOverridable(units ...string) Property { + return propDependency("RequisiteOverridable", units) +} + +// PropWants sets the Wants unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Wants= +func PropWants(units ...string) Property { + return propDependency("Wants", units) +} + +// PropBindsTo sets the BindsTo unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#BindsTo= +func PropBindsTo(units ...string) Property { + return propDependency("BindsTo", units) +} + +// PropRequiredBy sets the RequiredBy unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiredBy= +func PropRequiredBy(units ...string) Property { + return propDependency("RequiredBy", units) +} + +// PropRequiredByOverridable sets the RequiredByOverridable unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiredByOverridable= +func PropRequiredByOverridable(units ...string) Property { + return propDependency("RequiredByOverridable", units) +} + +// PropWantedBy sets the WantedBy unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#WantedBy= +func PropWantedBy(units ...string) Property { + return propDependency("WantedBy", units) +} + +// PropBoundBy sets the BoundBy unit property. See +// http://www.freedesktop.org/software/systemd/main/systemd.unit.html#BoundBy= +func PropBoundBy(units ...string) Property { + return propDependency("BoundBy", units) +} + +// PropConflicts sets the Conflicts unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Conflicts= +func PropConflicts(units ...string) Property { + return propDependency("Conflicts", units) +} + +// PropConflictedBy sets the ConflictedBy unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#ConflictedBy= +func PropConflictedBy(units ...string) Property { + return propDependency("ConflictedBy", units) +} + +// PropBefore sets the Before unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Before= +func PropBefore(units ...string) Property { + return propDependency("Before", units) +} + +// PropAfter sets the After unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#After= +func PropAfter(units ...string) Property { + return propDependency("After", units) +} + +// PropOnFailure sets the OnFailure unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#OnFailure= +func PropOnFailure(units ...string) Property { + return propDependency("OnFailure", units) +} + +// PropTriggers sets the Triggers unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Triggers= +func PropTriggers(units ...string) Property { + return propDependency("Triggers", units) +} + +// PropTriggeredBy sets the TriggeredBy unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#TriggeredBy= +func PropTriggeredBy(units ...string) Property { + return propDependency("TriggeredBy", units) +} + +// PropPropagatesReloadTo sets the PropagatesReloadTo unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#PropagatesReloadTo= +func PropPropagatesReloadTo(units ...string) Property { + return propDependency("PropagatesReloadTo", units) +} + +// PropRequiresMountsFor sets the RequiresMountsFor unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiresMountsFor= +func PropRequiresMountsFor(units ...string) Property { + return propDependency("RequiresMountsFor", units) +} + +// PropSlice sets the Slice unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.resource-control.html#Slice= +func PropSlice(slice string) Property { + return Property{ + Name: "Slice", + Value: dbus.MakeVariant(slice), + } +} + +// PropPids sets the PIDs field of scope units used in the initial construction +// of the scope only and specifies the initial PIDs to add to the scope object. +// See https://www.freedesktop.org/wiki/Software/systemd/ControlGroupInterface/#properties +func PropPids(pids ...uint32) Property { + return Property{ + Name: "PIDs", + Value: dbus.MakeVariant(pids), + } +} diff --git a/vendor/github.com/coreos/go-systemd/v22/dbus/set.go b/vendor/github.com/coreos/go-systemd/v22/dbus/set.go new file mode 100644 index 00000000000..17c5d485657 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/v22/dbus/set.go @@ -0,0 +1,47 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dbus + +type set struct { + data map[string]bool +} + +func (s *set) Add(value string) { + s.data[value] = true +} + +func (s *set) Remove(value string) { + delete(s.data, value) +} + +func (s *set) Contains(value string) (exists bool) { + _, exists = s.data[value] + return +} + +func (s *set) Length() int { + return len(s.data) +} + +func (s *set) Values() (values []string) { + for val := range s.data { + values = append(values, val) + } + return +} + +func newSet() *set { + return &set{make(map[string]bool)} +} diff --git a/vendor/github.com/coreos/go-systemd/v22/dbus/subscription.go b/vendor/github.com/coreos/go-systemd/v22/dbus/subscription.go new file mode 100644 index 00000000000..7e370fea212 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/v22/dbus/subscription.go @@ -0,0 +1,333 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dbus + +import ( + "errors" + "log" + "time" + + "github.com/godbus/dbus/v5" +) + +const ( + cleanIgnoreInterval = int64(10 * time.Second) + ignoreInterval = int64(30 * time.Millisecond) +) + +// Subscribe sets up this connection to subscribe to all systemd dbus events. +// This is required before calling SubscribeUnits. When the connection closes +// systemd will automatically stop sending signals so there is no need to +// explicitly call Unsubscribe(). +func (c *Conn) Subscribe() error { + c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, + "type='signal',interface='org.freedesktop.systemd1.Manager',member='UnitNew'") + c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, + "type='signal',interface='org.freedesktop.DBus.Properties',member='PropertiesChanged'") + + return c.sigobj.Call("org.freedesktop.systemd1.Manager.Subscribe", 0).Store() +} + +// Unsubscribe this connection from systemd dbus events. +func (c *Conn) Unsubscribe() error { + return c.sigobj.Call("org.freedesktop.systemd1.Manager.Unsubscribe", 0).Store() +} + +func (c *Conn) dispatch() { + ch := make(chan *dbus.Signal, signalBuffer) + + c.sigconn.Signal(ch) + + go func() { + for { + signal, ok := <-ch + if !ok { + return + } + + if signal.Name == "org.freedesktop.systemd1.Manager.JobRemoved" { + c.jobComplete(signal) + } + + if c.subStateSubscriber.updateCh == nil && + c.propertiesSubscriber.updateCh == nil { + continue + } + + var unitPath dbus.ObjectPath + switch signal.Name { + case "org.freedesktop.systemd1.Manager.JobRemoved": + unitName := signal.Body[2].(string) + c.sysobj.Call("org.freedesktop.systemd1.Manager.GetUnit", 0, unitName).Store(&unitPath) + case "org.freedesktop.systemd1.Manager.UnitNew": + unitPath = signal.Body[1].(dbus.ObjectPath) + case "org.freedesktop.DBus.Properties.PropertiesChanged": + if signal.Body[0].(string) == "org.freedesktop.systemd1.Unit" { + unitPath = signal.Path + + if len(signal.Body) >= 2 { + if changed, ok := signal.Body[1].(map[string]dbus.Variant); ok { + c.sendPropertiesUpdate(unitPath, changed) + } + } + } + } + + if unitPath == dbus.ObjectPath("") { + continue + } + + c.sendSubStateUpdate(unitPath) + } + }() +} + +// SubscribeUnits returns two unbuffered channels which will receive all changed units every +// interval. Deleted units are sent as nil. +func (c *Conn) SubscribeUnits(interval time.Duration) (<-chan map[string]*UnitStatus, <-chan error) { + return c.SubscribeUnitsCustom(interval, 0, func(u1, u2 *UnitStatus) bool { return *u1 != *u2 }, nil) +} + +// SubscribeUnitsCustom is like SubscribeUnits but lets you specify the buffer +// size of the channels, the comparison function for detecting changes and a filter +// function for cutting down on the noise that your channel receives. +func (c *Conn) SubscribeUnitsCustom(interval time.Duration, buffer int, isChanged func(*UnitStatus, *UnitStatus) bool, filterUnit func(string) bool) (<-chan map[string]*UnitStatus, <-chan error) { + old := make(map[string]*UnitStatus) + statusChan := make(chan map[string]*UnitStatus, buffer) + errChan := make(chan error, buffer) + + go func() { + for { + timerChan := time.After(interval) + + units, err := c.ListUnits() + if err == nil { + cur := make(map[string]*UnitStatus) + for i := range units { + if filterUnit != nil && filterUnit(units[i].Name) { + continue + } + cur[units[i].Name] = &units[i] + } + + // add all new or changed units + changed := make(map[string]*UnitStatus) + for n, u := range cur { + if oldU, ok := old[n]; !ok || isChanged(oldU, u) { + changed[n] = u + } + delete(old, n) + } + + // add all deleted units + for oldN := range old { + changed[oldN] = nil + } + + old = cur + + if len(changed) != 0 { + statusChan <- changed + } + } else { + errChan <- err + } + + <-timerChan + } + }() + + return statusChan, errChan +} + +type SubStateUpdate struct { + UnitName string + SubState string +} + +// SetSubStateSubscriber writes to updateCh when any unit's substate changes. +// Although this writes to updateCh on every state change, the reported state +// may be more recent than the change that generated it (due to an unavoidable +// race in the systemd dbus interface). That is, this method provides a good +// way to keep a current view of all units' states, but is not guaranteed to +// show every state transition they go through. Furthermore, state changes +// will only be written to the channel with non-blocking writes. If updateCh +// is full, it attempts to write an error to errCh; if errCh is full, the error +// passes silently. +func (c *Conn) SetSubStateSubscriber(updateCh chan<- *SubStateUpdate, errCh chan<- error) { + if c == nil { + msg := "nil receiver" + select { + case errCh <- errors.New(msg): + default: + log.Printf("full error channel while reporting: %s\n", msg) + } + return + } + + c.subStateSubscriber.Lock() + defer c.subStateSubscriber.Unlock() + c.subStateSubscriber.updateCh = updateCh + c.subStateSubscriber.errCh = errCh +} + +func (c *Conn) sendSubStateUpdate(unitPath dbus.ObjectPath) { + c.subStateSubscriber.Lock() + defer c.subStateSubscriber.Unlock() + + if c.subStateSubscriber.updateCh == nil { + return + } + + isIgnored := c.shouldIgnore(unitPath) + defer c.cleanIgnore() + if isIgnored { + return + } + + info, err := c.GetUnitPathProperties(unitPath) + if err != nil { + select { + case c.subStateSubscriber.errCh <- err: + default: + log.Printf("full error channel while reporting: %s\n", err) + } + return + } + defer c.updateIgnore(unitPath, info) + + name, ok := info["Id"].(string) + if !ok { + msg := "failed to cast info.Id" + select { + case c.subStateSubscriber.errCh <- errors.New(msg): + default: + log.Printf("full error channel while reporting: %s\n", err) + } + return + } + substate, ok := info["SubState"].(string) + if !ok { + msg := "failed to cast info.SubState" + select { + case c.subStateSubscriber.errCh <- errors.New(msg): + default: + log.Printf("full error channel while reporting: %s\n", msg) + } + return + } + + update := &SubStateUpdate{name, substate} + select { + case c.subStateSubscriber.updateCh <- update: + default: + msg := "update channel is full" + select { + case c.subStateSubscriber.errCh <- errors.New(msg): + default: + log.Printf("full error channel while reporting: %s\n", msg) + } + return + } +} + +// The ignore functions work around a wart in the systemd dbus interface. +// Requesting the properties of an unloaded unit will cause systemd to send a +// pair of UnitNew/UnitRemoved signals. Because we need to get a unit's +// properties on UnitNew (as that's the only indication of a new unit coming up +// for the first time), we would enter an infinite loop if we did not attempt +// to detect and ignore these spurious signals. The signal themselves are +// indistinguishable from relevant ones, so we (somewhat hackishly) ignore an +// unloaded unit's signals for a short time after requesting its properties. +// This means that we will miss e.g. a transient unit being restarted +// *immediately* upon failure and also a transient unit being started +// immediately after requesting its status (with systemctl status, for example, +// because this causes a UnitNew signal to be sent which then causes us to fetch +// the properties). + +func (c *Conn) shouldIgnore(path dbus.ObjectPath) bool { + t, ok := c.subStateSubscriber.ignore[path] + return ok && t >= time.Now().UnixNano() +} + +func (c *Conn) updateIgnore(path dbus.ObjectPath, info map[string]interface{}) { + loadState, ok := info["LoadState"].(string) + if !ok { + return + } + + // unit is unloaded - it will trigger bad systemd dbus behavior + if loadState == "not-found" { + c.subStateSubscriber.ignore[path] = time.Now().UnixNano() + ignoreInterval + } +} + +// without this, ignore would grow unboundedly over time +func (c *Conn) cleanIgnore() { + now := time.Now().UnixNano() + if c.subStateSubscriber.cleanIgnore < now { + c.subStateSubscriber.cleanIgnore = now + cleanIgnoreInterval + + for p, t := range c.subStateSubscriber.ignore { + if t < now { + delete(c.subStateSubscriber.ignore, p) + } + } + } +} + +// PropertiesUpdate holds a map of a unit's changed properties +type PropertiesUpdate struct { + UnitName string + Changed map[string]dbus.Variant +} + +// SetPropertiesSubscriber writes to updateCh when any unit's properties +// change. Every property change reported by systemd will be sent; that is, no +// transitions will be "missed" (as they might be with SetSubStateSubscriber). +// However, state changes will only be written to the channel with non-blocking +// writes. If updateCh is full, it attempts to write an error to errCh; if +// errCh is full, the error passes silently. +func (c *Conn) SetPropertiesSubscriber(updateCh chan<- *PropertiesUpdate, errCh chan<- error) { + c.propertiesSubscriber.Lock() + defer c.propertiesSubscriber.Unlock() + c.propertiesSubscriber.updateCh = updateCh + c.propertiesSubscriber.errCh = errCh +} + +// we don't need to worry about shouldIgnore() here because +// sendPropertiesUpdate doesn't call GetProperties() +func (c *Conn) sendPropertiesUpdate(unitPath dbus.ObjectPath, changedProps map[string]dbus.Variant) { + c.propertiesSubscriber.Lock() + defer c.propertiesSubscriber.Unlock() + + if c.propertiesSubscriber.updateCh == nil { + return + } + + update := &PropertiesUpdate{unitName(unitPath), changedProps} + + select { + case c.propertiesSubscriber.updateCh <- update: + default: + msg := "update channel is full" + select { + case c.propertiesSubscriber.errCh <- errors.New(msg): + default: + log.Printf("full error channel while reporting: %s\n", msg) + } + return + } +} diff --git a/vendor/github.com/coreos/go-systemd/v22/dbus/subscription_set.go b/vendor/github.com/coreos/go-systemd/v22/dbus/subscription_set.go new file mode 100644 index 00000000000..5b408d5847a --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/v22/dbus/subscription_set.go @@ -0,0 +1,57 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dbus + +import ( + "time" +) + +// SubscriptionSet returns a subscription set which is like conn.Subscribe but +// can filter to only return events for a set of units. +type SubscriptionSet struct { + *set + conn *Conn +} + +func (s *SubscriptionSet) filter(unit string) bool { + return !s.Contains(unit) +} + +// Subscribe starts listening for dbus events for all of the units in the set. +// Returns channels identical to conn.SubscribeUnits. +func (s *SubscriptionSet) Subscribe() (<-chan map[string]*UnitStatus, <-chan error) { + // TODO: Make fully evented by using systemd 209 with properties changed values + return s.conn.SubscribeUnitsCustom(time.Second, 0, + mismatchUnitStatus, + func(unit string) bool { return s.filter(unit) }, + ) +} + +// NewSubscriptionSet returns a new subscription set. +func (conn *Conn) NewSubscriptionSet() *SubscriptionSet { + return &SubscriptionSet{newSet(), conn} +} + +// mismatchUnitStatus returns true if the provided UnitStatus objects +// are not equivalent. false is returned if the objects are equivalent. +// Only the Name, Description and state-related fields are used in +// the comparison. +func mismatchUnitStatus(u1, u2 *UnitStatus) bool { + return u1.Name != u2.Name || + u1.Description != u2.Description || + u1.LoadState != u2.LoadState || + u1.ActiveState != u2.ActiveState || + u1.SubState != u2.SubState +} diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go index 0668a66cf70..be2b3436062 100644 --- a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go +++ b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go @@ -15,7 +15,7 @@ type roffRenderer struct { extensions blackfriday.Extensions listCounters []int firstHeader bool - defineTerm bool + firstDD bool listDepth int } @@ -42,7 +42,8 @@ const ( quoteCloseTag = "\n.RE\n" listTag = "\n.RS\n" listCloseTag = "\n.RE\n" - arglistTag = "\n.TP\n" + dtTag = "\n.TP\n" + dd2Tag = "\n" tableStart = "\n.TS\nallbox;\n" tableEnd = ".TE\n" tableCellStart = "T{\n" @@ -90,7 +91,7 @@ func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering switch node.Type { case blackfriday.Text: - r.handleText(w, node, entering) + escapeSpecialChars(w, node.Literal) case blackfriday.Softbreak: out(w, crTag) case blackfriday.Hardbreak: @@ -150,40 +151,21 @@ func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering out(w, codeCloseTag) case blackfriday.Table: r.handleTable(w, node, entering) - case blackfriday.TableCell: - r.handleTableCell(w, node, entering) case blackfriday.TableHead: case blackfriday.TableBody: case blackfriday.TableRow: // no action as cell entries do all the nroff formatting return blackfriday.GoToNext + case blackfriday.TableCell: + r.handleTableCell(w, node, entering) + case blackfriday.HTMLSpan: + // ignore other HTML tags default: fmt.Fprintln(os.Stderr, "WARNING: go-md2man does not handle node type "+node.Type.String()) } return walkAction } -func (r *roffRenderer) handleText(w io.Writer, node *blackfriday.Node, entering bool) { - var ( - start, end string - ) - // handle special roff table cell text encapsulation - if node.Parent.Type == blackfriday.TableCell { - if len(node.Literal) > 30 { - start = tableCellStart - end = tableCellEnd - } else { - // end rows that aren't terminated by "tableCellEnd" with a cr if end of row - if node.Parent.Next == nil && !node.Parent.IsHeader { - end = crTag - } - } - } - out(w, start) - escapeSpecialChars(w, node.Literal) - out(w, end) -} - func (r *roffRenderer) handleHeading(w io.Writer, node *blackfriday.Node, entering bool) { if entering { switch node.Level { @@ -230,15 +212,20 @@ func (r *roffRenderer) handleItem(w io.Writer, node *blackfriday.Node, entering if node.ListFlags&blackfriday.ListTypeOrdered != 0 { out(w, fmt.Sprintf(".IP \"%3d.\" 5\n", r.listCounters[len(r.listCounters)-1])) r.listCounters[len(r.listCounters)-1]++ + } else if node.ListFlags&blackfriday.ListTypeTerm != 0 { + // DT (definition term): line just before DD (see below). + out(w, dtTag) + r.firstDD = true } else if node.ListFlags&blackfriday.ListTypeDefinition != 0 { - // state machine for handling terms and following definitions - // since blackfriday does not distinguish them properly, nor - // does it seperate them into separate lists as it should - if !r.defineTerm { - out(w, arglistTag) - r.defineTerm = true + // DD (definition description): line that starts with ": ". + // + // We have to distinguish between the first DD and the + // subsequent ones, as there should be no vertical + // whitespace between the DT and the first DD. + if r.firstDD { + r.firstDD = false } else { - r.defineTerm = false + out(w, dd2Tag) } } else { out(w, ".IP \\(bu 2\n") @@ -251,7 +238,7 @@ func (r *roffRenderer) handleItem(w io.Writer, node *blackfriday.Node, entering func (r *roffRenderer) handleTable(w io.Writer, node *blackfriday.Node, entering bool) { if entering { out(w, tableStart) - //call walker to count cells (and rows?) so format section can be produced + // call walker to count cells (and rows?) so format section can be produced columns := countColumns(node) out(w, strings.Repeat("l ", columns)+"\n") out(w, strings.Repeat("l ", columns)+".\n") @@ -261,28 +248,41 @@ func (r *roffRenderer) handleTable(w io.Writer, node *blackfriday.Node, entering } func (r *roffRenderer) handleTableCell(w io.Writer, node *blackfriday.Node, entering bool) { - var ( - start, end string - ) - if node.IsHeader { - start = codespanTag - end = codespanCloseTag - } if entering { + var start string if node.Prev != nil && node.Prev.Type == blackfriday.TableCell { - out(w, "\t"+start) - } else { - out(w, start) + start = "\t" + } + if node.IsHeader { + start += codespanTag + } else if nodeLiteralSize(node) > 30 { + start += tableCellStart } + out(w, start) } else { - // need to carriage return if we are at the end of the header row - if node.IsHeader && node.Next == nil { - end = end + crTag + var end string + if node.IsHeader { + end = codespanCloseTag + } else if nodeLiteralSize(node) > 30 { + end = tableCellEnd + } + if node.Next == nil && end != tableCellEnd { + // Last cell: need to carriage return if we are at the end of the + // header row and content isn't wrapped in a "tablecell" + end += crTag } out(w, end) } } +func nodeLiteralSize(node *blackfriday.Node) int { + total := 0 + for n := node.FirstChild; n != nil; n = n.FirstChild { + total += len(n.Literal) + } + return total +} + // because roff format requires knowing the column count before outputting any table // data we need to walk a table tree and count the columns func countColumns(node *blackfriday.Node) int { @@ -309,15 +309,6 @@ func out(w io.Writer, output string) { io.WriteString(w, output) // nolint: errcheck } -func needsBackslash(c byte) bool { - for _, r := range []byte("-_&\\~") { - if c == r { - return true - } - } - return false -} - func escapeSpecialChars(w io.Writer, text []byte) { for i := 0; i < len(text); i++ { // escape initial apostrophe or period @@ -328,7 +319,7 @@ func escapeSpecialChars(w io.Writer, text []byte) { // directly copy normal characters org := i - for i < len(text) && !needsBackslash(text[i]) { + for i < len(text) && text[i] != '\\' { i++ } if i > org { diff --git a/vendor/github.com/cyphar/filepath-securejoin/.travis.yml b/vendor/github.com/cyphar/filepath-securejoin/.travis.yml index 3938f383494..b94ff8cf92a 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/.travis.yml +++ b/vendor/github.com/cyphar/filepath-securejoin/.travis.yml @@ -4,10 +4,12 @@ language: go go: - - 1.7.x - - 1.8.x + - 1.13.x + - 1.16.x - tip - +arch: + - AMD64 + - ppc64le os: - linux - osx diff --git a/vendor/github.com/cyphar/filepath-securejoin/README.md b/vendor/github.com/cyphar/filepath-securejoin/README.md index 49b2baa9f35..3624617c89b 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/README.md +++ b/vendor/github.com/cyphar/filepath-securejoin/README.md @@ -7,6 +7,19 @@ standard library][go#20126]. The purpose of this function is to be a "secure" alternative to `filepath.Join`, and in particular it provides certain guarantees that are not provided by `filepath.Join`. +> **NOTE**: This code is *only* safe if you are not at risk of other processes +> modifying path components after you've used `SecureJoin`. If it is possible +> for a malicious process to modify path components of the resolved path, then +> you will be vulnerable to some fairly trivial TOCTOU race conditions. [There +> are some Linux kernel patches I'm working on which might allow for a better +> solution.][lwn-obeneath] +> +> In addition, with a slightly modified API it might be possible to use +> `O_PATH` and verify that the opened path is actually the resolved one -- but +> I have not done that yet. I might add it in the future as a helper function +> to help users verify the path (we can't just return `/proc/self/fd/` +> because that doesn't always work transparently for all users). + This is the function prototype: ```go @@ -16,8 +29,8 @@ func SecureJoin(root, unsafePath string) (string, error) This library **guarantees** the following: * If no error is set, the resulting string **must** be a child path of - `SecureJoin` and will not contain any symlink path components (they will all - be expanded). + `root` and will not contain any symlink path components (they will all be + expanded). * When expanding symlinks, all symlink path components **must** be resolved relative to the provided root. In particular, this can be considered a @@ -25,7 +38,7 @@ This library **guarantees** the following: these symlinks will **not** be expanded lexically (`filepath.Clean` is not called on the input before processing). -* Non-existant path components are unaffected by `SecureJoin` (similar to +* Non-existent path components are unaffected by `SecureJoin` (similar to `filepath.EvalSymlinks`'s semantics). * The returned path will always be `filepath.Clean`ed and thus not contain any @@ -57,6 +70,7 @@ func SecureJoin(root, unsafePath string) (string, error) { } ``` +[lwn-obeneath]: https://lwn.net/Articles/767547/ [go#20126]: https://github.com/golang/go/issues/20126 ### License ### diff --git a/vendor/github.com/cyphar/filepath-securejoin/VERSION b/vendor/github.com/cyphar/filepath-securejoin/VERSION index ee1372d33a2..7179039691c 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/VERSION +++ b/vendor/github.com/cyphar/filepath-securejoin/VERSION @@ -1 +1 @@ -0.2.2 +0.2.3 diff --git a/vendor/github.com/cyphar/filepath-securejoin/go.mod b/vendor/github.com/cyphar/filepath-securejoin/go.mod new file mode 100644 index 00000000000..0607c1fa060 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/go.mod @@ -0,0 +1,3 @@ +module github.com/cyphar/filepath-securejoin + +go 1.13 diff --git a/vendor/github.com/cyphar/filepath-securejoin/join.go b/vendor/github.com/cyphar/filepath-securejoin/join.go index c4ca3d71300..7dd08dbbdf7 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/join.go +++ b/vendor/github.com/cyphar/filepath-securejoin/join.go @@ -12,39 +12,20 @@ package securejoin import ( "bytes" + "errors" "os" "path/filepath" "strings" "syscall" - - "github.com/pkg/errors" ) -// ErrSymlinkLoop is returned by SecureJoinVFS when too many symlinks have been -// evaluated in attempting to securely join the two given paths. -var ErrSymlinkLoop = errors.Wrap(syscall.ELOOP, "secure join") - // IsNotExist tells you if err is an error that implies that either the path // accessed does not exist (or path components don't exist). This is // effectively a more broad version of os.IsNotExist. func IsNotExist(err error) bool { - // If it's a bone-fide ENOENT just bail. - if os.IsNotExist(errors.Cause(err)) { - return true - } - // Check that it's not actually an ENOTDIR, which in some cases is a more // convoluted case of ENOENT (usually involving weird paths). - var errno error - switch err := errors.Cause(err).(type) { - case *os.PathError: - errno = err.Err - case *os.LinkError: - errno = err.Err - case *os.SyscallError: - errno = err.Err - } - return errno == syscall.ENOTDIR || errno == syscall.ENOENT + return errors.Is(err, os.ErrNotExist) || errors.Is(err, syscall.ENOTDIR) || errors.Is(err, syscall.ENOENT) } // SecureJoinVFS joins the two given path components (similar to Join) except @@ -68,7 +49,7 @@ func SecureJoinVFS(root, unsafePath string, vfs VFS) (string, error) { n := 0 for unsafePath != "" { if n > 255 { - return "", ErrSymlinkLoop + return "", &os.PathError{Op: "SecureJoin", Path: root + "/" + unsafePath, Err: syscall.ELOOP} } // Next path component, p. diff --git a/vendor/github.com/cyphar/filepath-securejoin/vendor.conf b/vendor/github.com/cyphar/filepath-securejoin/vendor.conf deleted file mode 100644 index 66bb574b955..00000000000 --- a/vendor/github.com/cyphar/filepath-securejoin/vendor.conf +++ /dev/null @@ -1 +0,0 @@ -github.com/pkg/errors v0.8.0 diff --git a/vendor/github.com/disiqueira/gotree/v3/.gitignore b/vendor/github.com/disiqueira/gotree/v3/.gitignore new file mode 100644 index 00000000000..3236c30ab6a --- /dev/null +++ b/vendor/github.com/disiqueira/gotree/v3/.gitignore @@ -0,0 +1,137 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + +.idea/ +GoTree.iml +### Linux template +*~ + +# temporary files which can be created if a process still has a handle open of a deleted file +.fuse_hidden* + +# KDE directory preferences +.directory + +# Linux trash folder which might appear on any partition or disk +.Trash-* +### Windows template +# Windows image file caches +Thumbs.db +ehthumbs.db + +# Folder config file +Desktop.ini + +# Recycle Bin used on file shares +$RECYCLE.BIN/ + +# Windows Installer files +*.cab +*.msi +*.msm +*.msp + +# Windows shortcuts +*.lnk +### JetBrains template +# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and Webstorm +# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 + +# User-specific stuff: +.idea/workspace.xml +.idea/tasks.xml +.idea/dictionaries +.idea/vcs.xml +.idea/jsLibraryMappings.xml + +# Sensitive or high-churn files: +.idea/dataSources.ids +.idea/dataSources.xml +.idea/dataSources.local.xml +.idea/sqlDataSources.xml +.idea/dynamic.xml +.idea/uiDesigner.xml + +# Gradle: +.idea/gradle.xml +.idea/libraries + +# Mongo Explorer plugin: +.idea/mongoSettings.xml + +## File-based project format: +*.iws + +## Plugin-specific files: + +# IntelliJ +/out/ + +# mpeltonen/sbt-idea plugin +.idea_modules/ + +# JIRA plugin +atlassian-ide-plugin.xml + +# Crashlytics plugin (for Android Studio and IntelliJ) +com_crashlytics_export_strings.xml +crashlytics.properties +crashlytics-build.properties +fabric.properties +### Go template +# Compiled Object files, Static and Dynamic libs (Shared Objects) + +# Folders + +# Architecture specific extensions/prefixes + + + +### OSX template +*.DS_Store +.AppleDouble +.LSOverride + +# Icon must end with two \r +Icon + +# Thumbnails +._* + +# Files that might appear in the root of a volume +.DocumentRevisions-V100 +.fseventsd +.Spotlight-V100 +.TemporaryItems +.Trashes +.VolumeIcon.icns +.com.apple.timemachine.donotpresent + +# Directories potentially created on remote AFP share +.AppleDB +.AppleDesktop +Network Trash Folder +Temporary Items +.apdisk diff --git a/vendor/github.com/disiqueira/gotree/v3/.travis.yml b/vendor/github.com/disiqueira/gotree/v3/.travis.yml new file mode 100644 index 00000000000..29261dfffe1 --- /dev/null +++ b/vendor/github.com/disiqueira/gotree/v3/.travis.yml @@ -0,0 +1,11 @@ +language: go +go_import_path: github.com/disiqueira/gotree +git: + depth: 1 +env: + - GO111MODULE=on + - GO111MODULE=off +go: [ 1.11.x, 1.12.x, 1.13.x ] +os: [ linux, osx ] +script: + - go test -race -v ./... diff --git a/vendor/github.com/disiqueira/gotree/v3/LICENSE b/vendor/github.com/disiqueira/gotree/v3/LICENSE new file mode 100644 index 00000000000..e790b5a5231 --- /dev/null +++ b/vendor/github.com/disiqueira/gotree/v3/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2017 Diego Siqueira + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/disiqueira/gotree/v3/README.md b/vendor/github.com/disiqueira/gotree/v3/README.md new file mode 100644 index 00000000000..d09d4a98cda --- /dev/null +++ b/vendor/github.com/disiqueira/gotree/v3/README.md @@ -0,0 +1,104 @@ +# ![GoTree](https://rawgit.com/DiSiqueira/GoTree/master/gotree-logo.png) + +# GoTree ![Language Badge](https://img.shields.io/badge/Language-Go-blue.svg) ![Go Report](https://goreportcard.com/badge/github.com/DiSiqueira/GoTree) ![License Badge](https://img.shields.io/badge/License-MIT-blue.svg) ![Status Badge](https://img.shields.io/badge/Status-Beta-brightgreen.svg) [![GoDoc](https://godoc.org/github.com/DiSiqueira/GoTree?status.svg)](https://godoc.org/github.com/DiSiqueira/GoTree) [![Build Status](https://travis-ci.org/DiSiqueira/GoTree.svg?branch=master)](https://travis-ci.org/DiSiqueira/GoTree) + +Simple Go module to print tree structures in terminal. Heavily inpired by [The Tree Command for Linux][treecommand] + +The GoTree's goal is to be a simple tool providing a stupidly easy-to-use and fast way to print recursive structures. + +[treecommand]: http://mama.indstate.edu/users/ice/tree/ + +## Project Status + +GoTree is on beta. Pull Requests [are welcome](https://github.com/DiSiqueira/GoTree#social-coding) + +![](http://image.prntscr.com/image/2a0dbf0777454446b8083fb6a0dc51fe.png) + +## Features + +- Very simple and fast code +- Intuitive names +- Easy to extend +- Uses only native libs +- STUPIDLY [EASY TO USE](https://github.com/DiSiqueira/GoTree#usage) + +## Installation + +### Go Get + +```bash +$ go get github.com/disiqueira/gotree +``` + +## Usage + +### Simple create, populate and print example + +![](http://image.prntscr.com/image/dd2fe3737e6543f7b21941a6953598c2.png) + +```golang +package main + +import ( + "fmt" + + "github.com/disiqueira/gotree" +) + +func main() { + artist := gotree.New("Pantera") + album := artist.Add("Far Beyond Driven") + album.Add("5 minutes Alone") + + fmt.Println(artist.Print()) +} +``` + +## Contributing + +### Bug Reports & Feature Requests + +Please use the [issue tracker](https://github.com/DiSiqueira/GoTree/issues) to report any bugs or file feature requests. + +### Developing + +PRs are welcome. To begin developing, do this: + +```bash +$ git clone --recursive git@github.com:DiSiqueira/GoTree.git +$ cd GoTree/ +``` + +## Social Coding + +1. Create an issue to discuss about your idea +2. [Fork it] (https://github.com/DiSiqueira/GoTree/fork) +3. Create your feature branch (`git checkout -b my-new-feature`) +4. Commit your changes (`git commit -am 'Add some feature'`) +5. Push to the branch (`git push origin my-new-feature`) +6. Create a new Pull Request +7. Profit! :white_check_mark: + +## License + +The MIT License (MIT) + +Copyright (c) 2013-2018 Diego Siqueira + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/disiqueira/gotree/v3/_config.yml b/vendor/github.com/disiqueira/gotree/v3/_config.yml new file mode 100644 index 00000000000..c7418817439 --- /dev/null +++ b/vendor/github.com/disiqueira/gotree/v3/_config.yml @@ -0,0 +1 @@ +theme: jekyll-theme-slate \ No newline at end of file diff --git a/vendor/github.com/disiqueira/gotree/v3/go.mod b/vendor/github.com/disiqueira/gotree/v3/go.mod new file mode 100644 index 00000000000..7e17c637e45 --- /dev/null +++ b/vendor/github.com/disiqueira/gotree/v3/go.mod @@ -0,0 +1,3 @@ +module github.com/disiqueira/gotree/v3 + +go 1.13 diff --git a/vendor/github.com/disiqueira/gotree/v3/gotree-logo.png b/vendor/github.com/disiqueira/gotree/v3/gotree-logo.png new file mode 100644 index 0000000000000000000000000000000000000000..1735c6008d6f1f4a92944dcf32337b33534ec27e GIT binary patch literal 24183 zcmV)=K!m@EP)4Tx062|}Rb6NtRTMtEb7vzY&QokOg>Hg1+lHrgWS zWcKdPn90sKGrRqvPeo9CG3uKX#J{(IASm?@+di}}l?o-=)F3E6wD^Ni=!>T7nL9I? zX}YoAW$t|Qo$sD|?zw001?ah|SeB6#0T!CBEf+H4bBB+JJu8rehoBb*p;u8ID_yBf z0ya+zcePvJL&AGs+11_tpRKn>9TgyPA7ZoSs0)aX0r00)%XR^J`jH<$>RKN5V(7Oq zK*TS4xZz{h!*f1C3ECFkK$#7nA@pGN!$;%jYvwjAKwmYb0gKL(K8 z-kPtb5${A?tlI~wzMrJ6wTdBr=Y%%%EaEMQ&o}4FQ^DA)s*}Z>!FI&AHCpoWI|RUq zx?7s@$8!5^Q=anY%X@i5{QA6kNcMelpE>R6eCYFpmMsVTrI(b06~u#xf1yS} z_UGdMvD``!0~u->P=lA4?YN`hilQ|3tHka)7T{2CGqw zjZfMwx$5irQN_*|e4l)UHmiYuz74Yp1t^#>hrJ3-SOXDcC_o0^7T9R1gAN8V6s;5) zieI5-7aQlmJn}lUna#nz!j%5V$X|o`xX!dHWQRV27P1=rj;t2bW$~+pTw@bIek?Zv zKPDL<64`^#UNTAck#RBsB6*5DP4<%UA_FqU$I>2EH_cM;u)Q~SI+rg`Rn{L_AC5qq~L$#SMj%U z$6Cz0vP{G5Y*=%5RT^yu;}-DInZ=349rJPVM6C3K^oO)8y(fJr{l>k`ead~!ea?NsT>_Ci%bnxC;Vy6=b6>{xYV#Ue-+LB$ z7`JEXmTRm^AtP)R9u{)KHsMiWGV&)32xCG~*nyU<>-!d;FP=Re4r3qYr~6#KE>;1F z`>_J_P5xC?ROxV(DIHdCO*p$HRQI@7^PwV@Pvuf+5K}u-6REM(K@W$s zrgorh0{i?O)v0c>QtHxU-hBdD(>iYJ4b2sIOVX2K8m~4gmYVA5h^QEb$V`rCQ-|7Z zS{nuL-t>?3n=-o(6I(7vocj#GzCZEo`!3>+v;dYIfPu#&ZWzzX2i^rZ^Mu;6+rb@? zNPG+6)c5T6zxpzGe*M(x+{AON=PiJ>H#?ob-|uwRK0yDg0B4PV0id6JRZw95ZvX&5 z07*naRCodHod=v%wbH-?NLzaEO_U~00a5e`s33NI3ijT6#rEuCL$T}kEIfO~h6*AU zik+fT1OcV@URMN!FaKQTY;JDOEnDv1#hv}#J?EsIoFtRXOlBq}rc9Z#qf(_xs!5Y3 zsu+q}ptuF1TcBk0WARgQ3lz72-2x@IZr!T3Y}sOdJ9g}_uh3+NUov3~t} z!=x9tMsW)iw?J$Ql+bimG2;>2F8LNR`}Y}Y<+3Ge-kd+wiWSS$>Xpl)%3if%g<8F8 zWmH)88Z}joTD79XYSvM;YSmFqnl)E7>(o=b@6j?UzWAYFTA&y~@~K^yFI}vDo$|9r zgjs6gygAA@@bl?aK{0|z4VyI6h_k2KtKHtJcHR0xeDO`*Tc8+0a@mH05YwklQnRK{ zjadb9S!|xOfi!!zZmU|i?V$GE?;uq{?{O7F?pvT3K~iWt$fdDgeX3?oo2nKqn4dz< z!b{qqQDfDjOE{Wa6sZT*Z`fF6Rmf5`YSvb@YS#%0s9C$N zey$fhuUNiBtz5n|c;;rs@?~n(%H{gELakY~Dk?6w4;}XItoH7DKrtDXpaF{!#A+ZA zWBAZ_)R&)qtp3c-wqlaM1rZuGX{wqu-9t6meGfBJ=QraFskcH)#^!5*0LU}*w_kIO zIOVgl)FFo*t&TXhUopSp>E>bt(T$Q9!~h{$Y8tuQG)09_? zcA7ltwD10E-~D@}j1W0ZuhiU?lhmM(&QX7E*{BZhbe=k)%Z+xlx&M9jE9$331Jr&^ z2CBxDkBf;93lM~vIPQBjB?U535T*N}hZplHx^e%p2r}fg!RnI_-VPgIG2wf(Y^4tD z(JRkLAq9gR1K#Z`kD&-MVR3);+pSqRdlD#p4hVRCzZ?vFc@xtV!Z-OX& zPd_i&*X)w%{&5I0^_K~1@H3AjwQo_be3m-2&ylM8p+|aWQ)YLdah0Q0nNrn~9w3la zH@)7|tQ^%U)m3XYELF|xbx=1SJ0zxzk-s0UmTwrZDwJuUwr<~`j%YbDA-NS(h!p^` zz-N8`^=P&E&%}Hd`zq&MdaY{Pv9ndmKj-!zgCGoy*IsxkDXCPo8a33hC!U&cwHQ6; zRJCyJSE^#!25SE%gH)XgJ(3zZ?~Q*@eL8WlL5e??Ofq@GNGw5+s#z`7+Uyx>_nOD4 zZjJt%)O+G3itt@;sc*-Al@x=I)a&%~FZWIq@!nI^RlmmWYSo4%CTY3S71cp4PxEGLRr4|vzGd5b1+tVW z6^%&GkGe{&SU=aB*A1j@qvxEa#{O}!+PEbr50e3tLLx+=4dL5pQ`+{Q0HyBKuq%Ej zyvBfAuTjgELttboIS12>WZ>CRI7BhMF?>YqeX+GU}|Jcd4ee+6VFWO_@?vRaV*BYWb!gRIQ3# zRsAYGRh6=lRfTl__t{nI%PFs_^_!Qg=5;!0MD^SCfDm8)af({Lal9$MQME&3cp2K@ zUp)@fhI2c#sCb%Pji~$8D_5$oMt-82?%d~zirmE>c1a#283`4l+DopwIUyP4_BS)9 ztpFJC2<7$cu2+%WtM^Sx(u5Ri;+zqB$mAq1H^uSoI3x~>vAQPq5O>-H$*A@G5Eflf0<#{Th%#3wXbtaltha> zsKKvXwp1;c8yVb(G~dl2){2W~aGRFFghmk-`YZQi=kkZg-r%~n0y z_EV=HI6$J!RYb+Y=o7e4!~du@wXTRt=X+o!e&LxxaT)nNTeVi#+%zC+;_#JK6vK8I z1VQ~`;9WPyRT1gg`v}$d)bn!Hk(57C>u15(h5^VT;@(l84^!Wc9vK%|lNF)Lw;giW zkumRo?9+49)Op{URRmEn_b*&LGpdRoPU~ctdzgGg#z3zYZzU~t13CZi@%!VhChVKs zaL0pDlSfn~is-{G!K5?G#X}BwjZ3e+MKhsStI}E9Qtm1S=%b86d;jjBqyJyofa<$c+LFlNukAG}@lJMFxL zdjJgP0sn~z1{g>eUfExDIdX5cs_abFrQ^Y>M~`Dvhfe!y#Q9ZiSidfK$FGxrR%J?; zRxPxllMTH4fjh1@X>Y#!VO61Gv;>Q^wJJAJRVq|hGZ#-()hpLgZ5r>db-E6;i&0BA zjZsai9jj)qdf$Y1sNY}Jt=K&(O)?MgJzcvUq<)?JQ&jyWFMaUc*e_MpYE^S3^pJ8g z=~|>$5Rl`gXP*phPiE}a9US2dD*?8`H6T|lNJpdH%-X1Fqoel zc$XpZK#m*ld{Aw!zDPataYR+)?sxxf-h(aqtpgs7*>`y7_5Udl%!Zerf86Z&qvjG% zBe;9()6`Sv{iJ%g_v>?YsCS(f>&`d3@yYTYc0KHS>Oq^jtD&8SKA~@R1t_vS$V<;X zrbc}>JhTsx>D$b#B>Zl{#cKA-59QHZYggJ=^=S5Tz&}W&ARy#f*8~jpGmi{VKYlyb zWR>(+{BIx4JU3UXH!d^kB-b82IMCPy8Sc6|;PY8G(+d~TXKYlY>J^jdi>Y58r&3FEQ z2?*p3<4c&p=&Pa(MGiq2K%++bR}LX<;wjcOsd`ksO0A=0)hNi z6#+>gC*ALmchgsb^b%H-u0;w#Xy`DW17`f5ho4C(W~^4OqoKv6lMs8de*r99H$sAy ztR6u?oSUB}d>6#ZY%i7>4*T9&*6=+B@9wzM)g2+l)RE zX;?*eD_O-L&6d9-`bF>uzQGr-Oo@}(URDdlu6?^*7qfau`Xi1#S?f0qkw*eQy z=goiq8QgQ@n8rYSxLz6Lk?7C9{Iml%^rylIoP~hwc|^5{#BP5enILn_N$!L zQo?e3zKll@7~rr?KxWPhuIO(%28NZi$pSVN2Q~k%*+9ej zDmqHdt49X%w__uG0A=OH|J8*DKM}QZgbj{PU8DCceBarf;{%X?EyV*H!2-) z%)d`oAHManI_reqMv{YAl>0{p+>(^UhLxm8vp3Z7drdT}3j7J3XCnoj3QrIZ%4g;5 zUdQYFd9$NcK%`8hUNUec%)SnL|1I_QtAm3ez8&O%LH2=66u+;2O@aLISt8oT5Dc(a zDJcU>$5enlBvj{7eq5D3CCHixNzf@PDv;3#f{4%_tahVjY26<-*L;4f2t$P<6=oD3 z2gE{T>cY?=uf;_;95@I})t*Jd;AibXAq-ly+AAzdeXLbcY*OpwGtY~Q93mpd8wvoQ zg&r1Q``|JG*1v%tovg;s{@4l$zeNFZ^5Q3h1oAEk5G0Xj`-&hUmA=_Wrjr{Begc*f2i3iO1AO;BKLpJh)q{O3R5!2K!k`Uf%4HzZu!u6BdFefM+kH<2zlWZFNx`@Z#1q%@ zo_lY*CK$MoIc(ms(MUO5A!TGQgqaV1$VC!(0LVRnG|xPGpS<2&$7vER?0Y0(qRmLs z@m)vi_l%kTx|!+UA3spCn=7Oh@2^lHLh@BD*Up4v`vLg~%$xnYx#HYT0E8GlCF0=) zrN35c;es}MuQ_} zg>ef|sgv_&7tO>I}F}wbVq_HjeVdgNek1kWF{)admFDthB zue|{)>EwFW1y`vB^XA659HNiuYf-O}^qF+cxD^E2lv&UP2FQWh@iGIR87YiGm3)_8 zduu@U;G)xoA#V&0_#0??7dhPYAQ&P{fR1Hf!3Ewa>Lb*X?+`?1bvo_b%iJtc&#iW9 z97hH2d;o!2soweiArmrV(RkIMW^*-q>N|l=y7=^kXJP_i)sU5gOfdV@2}HCfjPGWC zxbrQM3#u{k$Se?}PUSAzLUD%40Doh@C#o?0K)tWI`R<@U?0D)!y)qRt3PB(dzWi*M zx2+h&+1VRR+=-{2>rLYZ)~H$24KmM=NW$MFqUr4dps6A205;u3T4ble<^=MLa%DFg zqB$^Rz*lhjJ5#27ZnqP8kBX> zl{cAi_uqakb9BSzt(@#e2hY3L=x21=g!%8$>=olB^0jYnQ}++;t04U*&W`R!R^xLo zxjHDrzCj4#YfC##VclmGiG|-PnQ`p~krm^fTdp+0Li*Tg+74 zLK>w4_RcptSmej5uV$r&>i90#8+&l-mrS`hQdqiitjgZHK-2SkX&Z_|yj79a13PvB z5T&}-@3MhaVdSv)4VlKbW3C+|~M1Zj! zJ@sw%qo&JAHFu*ACED@4oqxn*7tasEG?T`fdju zVm!jDa{73N_48HxX8+PgTR#UY9Bz%S?DK*t$NmQ&-^=S#ipC#ha$S=TZaYpw$ z%p=J0uO_D(k+zQ)BoX`-N{?6%UU9=6s#ev;YUTRH2038#d$vC*l*wm;fz(3T(gnuA zqtCtJ1{dmcMj!}`IgRcDVol#5c2^7r1FOWCFNUe#rcE(3JO|bEIi_z6-7lKA9ro$$ zzAkun%MT)U-ndBGs76bL3(?J6HkkqY?)QUK=iQHrala$Tfdr_uW^_@p2e20xXAdyy zwj^txE?sxNHFA;JUILHmbg|s#=?X%_<1C-JS>66l42o`D`@-NJ1h>Iu+$j_9B_JfLhi|2_^Cx>B5qn=3wfeGp9oO%4?<9rf z38tUP;)SOkQujR;OHRdmH+_5C^_RwI{vxCa(@Kt4UO~kbx)e*)BHRyzF?EqZq%>kw zp?1ZviamTXBl zS4g%!TeUIjC77zBVsgvX=bLAdjKQeebUq9z>g~TTo}>>qTlf4@A&?9db8TgC9ih#I}ULIs8B8>!0%UZ7RfKwF0u(BOFjKhnJdGk6r@jWd@`gK2qq4lm^CxBJrLox)unm!X zn}>ED+4RfSGa3jfh(Q{}&WKBGz}+Y6?U!_ZUe(;vO8q)%qWMXhj&*M}Y8fB}JO@&% z-g|r;@E)Yg-WWY7WbjED_SKbW*>R9^09UkaSy9Ah!%oLNUR&hbAgMr(MEf;%867)C zcqI4-$mLW3u*(0K0H9Z^Vc%%&f9<8%I)p_$W-M#l-#z$N|o zmLYEr4z&;GY>fkZ_ST+7&W!7~cly+yqpE~ND>q=f7)4Tss{86qzp37Peh`I_sLzN3 zB{Mm@-R`#~dVhrq6@ny*gd0IZ>xNb6^ASU%BzHt-3NKP<2$JmTv10vPHDT@u^{3W3 z0%}xjpt^TF&DfvAOtYgMCXO^2LjG|&Y%;(s7f&`1U*j|AVm9X3=j=_{W)LzEKh!E9 z=U?7Gr5QSzI=X4fl*-Yt%_>u0uUs>X#BB1i`!q=x;h{ikdqrjC@F7Q{y@_Po(XFHX z48`D@Q_CviBem_k1$|#A2$JmTfy%=0i8;~{PGM?NtF6&Js$I9Kb|-qPI`P!SioK0g z&flX^7tWuXQrkkJY}z_6O6Q30aFGM`5?2bt%qqw%V$o#C7$5mbrP)tel|4E1=r_(0 z5g)u9yaP$cNm!_;bKVFi<}yKvXdPy|cH^>Y^|`ve;~*vKlO|2X(^!rUCUKg@b8 z(@0!ies82vQAhVl^q}O5>bk9Voh;df?E{C-yfeAAnz!~7sj9i6uqcBV66KBupESY+ zm$9afATxC#{TnYn6ID*wgIh-=Q!w7cruQ*Sxa^&{`5RTqkq)7bj{h(AUVu$o=bEdK zx1w${dD#QTCVzFdRvy&l3v-XwFGy`m08IhyL>X(TUVW1A0|y=tgxTlWg>ZZ#rU zKovz0TQrPe=$CK!(AaqJj-M9ZqZ+q4&g_A_`SP;{Ne(&eXsxo+L6eOe)ktj^CZsKW z7p4FybxL&Y(d$U{k=|Qz4;E#t#N$a5m7CZ2w67qL8=?zkSDu}1nSq8*cp`{G_2GvZ z9|y=mMA&(LL8g!!*+-G+i9dcLw^3I}nBg;e#~??UQdNR7aowOqL@!6-l zn}VcAKtO$9v>EiAbPI4$j{s?0IH1Yn#^R6g$STsvB*Clu=fj7Zd%NDPYM*Dq*xfHt zA|e5ky%Z#e*tVlyf@LM18U;C6Il}lCQ(%(+=zE`b17jss=~$AlOe$I05kz#M+}fcQ z0>G5ZN`b=LtR)kT&XJwIbJHBPY&$&BJ({RCOxBIh3~d7|$LF*9nL))&3V9+>BP(K| z$Mf$qrb)0lCOUXXa3fNm?2VgZQu>%=Ry{~N_z1P5L{KmLymWVEq+qmck83k7aBs9a zd#X7!XW^XXfmjz_NdN~HczHqUC1PT6qzCfjS_p&!A^2hU0!fK-WU`ZkyesS~W5A6M z8#8XYf_P3liRIf+LgFKhZ1WfJ94O@3fU;h)N0!<7xR-ZGxX`%v5)LiSj0)-px90qN{%7_9V@h)<(qFiurq=W7JsodN4>g026)7(0alX+@B+Uw-QSMq$p0 zT(m0IJeF{fx5*KWf&+1vNwTH!%0|p)B=(XpNs3Viz*&nBA=vZN=UAknY z;2d(KWSF{w80d82%mF$+9=Xs(pWx(z?dj2tljk{ql=*Z3{%wiAz=21HF{N z6(mAU1~nd}U2GJ_YftuL@Q;6o!p-}h$Xg7O>u;b)@ z`qW?K)?5*EuxEseAOW$3u`Mi#0|^9)pBQ+jn)}Br)n)&PV+IEWWA^_Mn@V+rY8^lA@u=nIOJZ((6xTJ9_Y|og?A{X@@q_tEv1L>Ug zO+Fe3Ju88xySMOm9?V`m!yD*tdk=&15L@EO5&bK6nxVtv0R}{rfi#QsC?BFh8%TiG zZcyFY{nto&;*~=jk~>@Q03GhCUa%9-W}^?VOYzOH*o)vIs7-?dR91-^#t_REDu5Ut z4tdplJ9`=NJU05?OIXD+Imj#$Frj{ZS0<|hdHZtW|HdN)At8Wb1@da_876f7D^NDw*V|^dSAmxxt5QBk?E*1J#kQtCc z_?aRMq#=#Zk(=4y*s-?r|Is$&a7vIvXmAjK z<=>SY(FlZbcn)d8NDvT;W1O$M^o+ofnlvD*fOtZbFuC&nMCbW2xMUT=BZqILlJevV zSp{;yoI-<#LD(wdQ*>mtq#g`>K_Ey~94VZB-~hva5GO#|Fx@eP6z~$P0B8c^oSI`4 z;z6rg5j7RkkT0urV^c<09zk5QN8X`f#)z#HvAI8-xki@JAIVvhU%z%%# z5|9u6%c#QaEM)n{@73-#j*C*$k@C5M5H%|{G$zV-^nKrJo=m^W(I1&;AD+*!|cmXJ15qykG*drb&@kmeEMasu9i5k zmb~tj4M}*Ze(h$dUM=2=QjB!d$}_v}ZR=fbRt{8RYF9kK>?cV)S0+F9ShBX>vA) zQFciyJAK2((USvluGbekeL>vsRuF}{Wv*Vi(hXAXjy4G(t!z?2JPO8S_5?87_{cCc zQgO;gr$cTvY5JB?t7hQy12Bo*r_26^#&y-LiAPd}qqA?d+sxCoyBbIu8}Jr5;MQx5 z4iWXoBMJ`H1aa-O+_QSvfU}1%;F}-=#g{PYs1GM^*{*hEZ-Y2?-tEmucTn-&tfId5 za_fQqS)B@QsQ(%D6n^ZkS18@Yv^f&UkA}SNZS%;ZXa=%i16>tXgL)@;rZF*9&DF4`umFo%Vl z@*#))KGJc_8zNGhSrjr)NCLhW0fUc+!O7g7_4if_Rz-A+putZ&;2!%mG53&x?g|GY zQV#*zO47Irp)%>nN=F{{KMD%iSlybvTOfh-^*`?hQSmE4o4(``*ZYJxVPzH6w-h*n z*z~^r5$@n;z)44vu4S9HcJjFQs8HCd+pHSM;l*>IeSRJV@oq>iq+m9p#C?esiHOui z6jlouUqKR*gLH_g?KU{9{rRd<%~nR#$;tr@?(X|ouL^GAv+S~opdPkJS`_^lL=w*A z!Ky;uK1W5gg2~I^lr%gi0$U87C@o|wq^8}Uo z5>`;x0w=NLFc9)gA4Ilo_I>HegMYIy@|F1fgi?2E$j<`Zvc4btxti%5In2z_=jaoo zlDg?ywB9SqD|=L8>tXAbEvinv2v5My_1M`Lnb>l>)lysaNk~BLO8ct16%R7^zJGKC zx<^NA@@vnQtyRNDP1Kq-tJH?|>wL*$!k~+P(JJS|K72>5UA02(+pW9$W7Z6{ZQIr$ zEosJl@u`|KYr3jbxr%qN1c^g#5La*8x;dsjf6*%J9Xjo6+F^k{byZx>dw??7W|;f?B;&R2xgEIRnfhwjn! z+-gX^dmnjLb=3S_1?~932ZYo1)Rpfb-?;C_=r+{qMmlc9$L~e8BW2X7t6e;WY!Ue0 zk(aQNdC8niUY}c-Wc)q3jAhun?AdSpf;&wz+ccGc>RHK?Ys|8EU0ky|B3OW~hZI8# z*Cq+fi#-3}Q=qHE_!;6wie{5uo}#=#h<<%)GR(Y4+H5-yRtS+GqE!IMDk6pB7un{g zB%GDwsn0G6HuDG%Q=^wWT!o~gt=OZo`^Qd)dc4@sz%R1e(Z&EPi71cyBw0AnP8Sq7 zf^bHK3*z+I4tEc^ktNzr6YrkgUg0@VS!csapt@!nlK}<_`yNQF=p>{^bTWgAfywUu z*%w`r6rv=0A3Q)Zh~l%eKpN*F3H7DkY@`V@4lg3)R1>Y(DR6whGv(G?WiZ--uYY@+ zK@L~qW~cGpXK#$G+GrX0kc00*#|tkppU*r=ODTe-|jb##w$0G^S``(}5VFivL zz6$$1tGi1k9i9pkxpn(m^D0&k8#z#(3sd=!x`7x-gkZes7&?hn1L=|5$;@;h3Thh6 zcp^EHvQoVYqSz8CyNuCu&NLoPTy+m0l8_IcWb>b~GptRsgYbexM$S6k=y>sr<8GWqAW#u^M79nVQcd4vDrKePS||wO`!M(b zSEBUxtAoAEzc0VBxZ6!TIIDi5LLW$=Thl>u8*_ym5X6Al1{m}p0D~wVFyNT+&=2EG zLRJ=59KOMoHpMGP;&?yjwflDMu`|KTig+23JYh(}fVaVISK>H+z^*5xmPm)hHUmXR zaiGCU5^qya-iMPpz{g;t_$aG9$N}Ixmfp6r(gaVghrSfqs{g533 ztRlDfztkMiV$XmQ?_OKfI!miJG%^j26l=dG4+V}EBrOAQ+z(^b=fge-BrU{2vOvGn zv5)~_=B-kWeI<`OAb5|h;(;`8<+7#0QPFmOxraXqI~EyBMWl0p?a>4zQEGtEMKh6} ze{8t9Kz&g;!3ns)lLPNk0|q^-n$(P#C70iMBpdPly7^V#;Y&y_qH6ky6`1e0S6|w6 z=89L$t3=`MvSu1H#@EFYvMFMU`eTfKzjnR$WdUzxIH4blkmAc1Ppx~-AeELk+qNSZv^ z6w)pvRYIaH+cZ|K$^K21)k=d|+AZk(wZqk(HBVQiN|q0@BL!Nwc9pRN3<9m2LytJl zC~R6m=9c5A~vzzA^MqRHy z{pESB9Z*v>s20)cEvvtM%a*^QCXG33hp2gLJ~4SJmunt%MhcTz*M^UnIQojC-4a2c zqfbbbBwykSd4-9A%0d>$yl`EpG(>qG9y zzCpt(eT*qDmWq%iYqN1AH%V<;QZH&R0DcKcmF$by6}2^Er>%HOEnGL!9OQ!v4ti+z zp_;5>kez2;+Fu_Av_?I6dhAhhcG-)yTPBn*pB;Ifn$dQgIq4TZ&6UtC-}Ify1~SmL zHngpKW$+lbZx$rbH@-1t$LHsLfg^~ovhFhKrU6OV{CoX>PX|&MNdsNVk6r3i0mc-i zzC@d2;z1DxAEP1du`Te5p2|DfeV)Gfd*hkjc%|3p016~R63$zM$HDZsb|uY==|gd# z)gezXQkpPXTQ6#I#s&r^(y9mVyfF|n^UXr);A5Dh-%u)?z3M%awN0%n%yAMnX$wEp zdargy^lF#yyBC{s;xPW2wPb?2;h48#csPz<0OZh8^PvB=s&j^-4g!hJZIN{8x)ut8 zB=d1}KI$Maj<|f0gCmp4D`{X(8#US87~e39%0Q9$uxq_a|1wdVwk$Gg+;~NoV*|qS z*x|Ub;jD1ZG8e#r$pzH#7tEU*<0y@E_|F1C01n!L!A0%7OT)X=+-WN{^BNJSngrHA z+6~)sSxH38mrYcV1E0PN*M4D`WzjG|Ac2rU@=%=IgLKINXRYI&6t6VwykFHPhqocW zFy~ShlTW8cx5e20;{+p#A?tg^7RLL{ca_k9&_q zjF}p827@8U0yeXJ-c9Vhh(|G_Wso2uWzfhBiz5mbKv;CFP;7L;B_q(16?lHgRYq+^ zp53eM3{QBO1?z?d$z-s~N=XEWfZqTN&jeCI6222dD5PFf=6<8b&;Ho>7oaSAFNb!; zz)Cc%Y`7)IP5h+TEBIR-4%Z$%TCmVWp zJgcWmMnT&zm^UvbT=KDZB*?`cjqq5jHKPxrV@eBfzen}cC7ZeW$UcySv*(XI{^Y2% z^1!z)UNzf1Zq`U5&)?2?-$Iae#_J2}KJoUXl0E7`HWQ@|1$ixS1VOFDMUb!$5?gI% zBYa|gFyRriVO70i{m_Hy!^&cC&71SP5v{_!*3^!x-N6=?mv9gQ=L?V~*hC;*L7KKn zlQtFf3Aw1FpqyxvZ5Tr_Kx$N~Z)Q=-Nk*N$_mThB2$G|^PCg)e=dN_*De@oFUY{3q zNn@`bqTMTTHvNwX+$YNRAqlE8^XAT0XPtjV)b~)pn9mRg(O9y%sovq*B2bN8J=OEGJ3fI|&#)|>akL!CzN-Ge^fvo}(G#B1}<$1-K8GTPb&qP5|b!%~4P;Zc< zZ@23b*4vgc+3#>qY+|>Y)gjCaI&O|+^HYxZs%n(u*p6NGrs@kWIdoka*kb5EXMff0xBU^nD7dhnpAPJ1S4GdtU zU5J7CUVzT~TXoW4 z92Pc#A-Yc&6gYzTGtSJDCcBE z6>2?zKmwHQ&d3tS4mJqSB!KC!kNlCeVOQ~bXvYix00N5gp{u4&ddM9HsttHHIqQVp zc96N}n1qIno2Z6OnyFf~>V`T?a5I8zeCI)j93HxPi}y>QdX1W@My*<^W$TDO7s8AR zSrv@o7V$HtPY%9^XCFY?$wc5AE6G}$`pQlKwVfP^V56%FW6#&{?W^Kqj z7x{^Ik%RY0ySVQp9Ap!OLpsIme?a#`lR_4dpUqo$MYi8VyYiEF*l~&O5#OG2)f)X?Dd7ciE$|h@o%Q4+Tp54k8f_#h z>dNYIeCKO}Fk4zf!(Maq-En94Amh%P@>w}6l^d7aXI23a&n2%Csm;TCxFSrXPn>Up zEsOxC%-Snhs>tC^3Jd4YkJ(!QNnw_Mp76c;&pqL$vQj6mOgQjm{AdHL8QG1;iqz#h z?R?xbu{CPa)Og9ak-|=ga|R!{?d(gFUttA~AXJnY#@#GLUBpHne969C`i>}hI(d7x zKPk#k$M)xhJwq5HM_wQNnH`bWr!wpLq;7WEcx18p12z{YEqO$H7u1dtEEN$u53?iE zG#eN+o%H6T&09f_+XKSKbJ%6@U0CH|coa|n3^J9SYtA7TzKn8MQE=H44@u~g^7f8MYc!zBhLpXaoHnK}v-_@tMm`cC-%1kvZEePVXeRO7{&04AaFtlsD_A)UlCdr)|s8=D{TC4-Cv*;z3rFS|c5DFs1%Ks{_E zAPuV_JKuIVyod}q@*vHU@3+e(FY5a+(Ka&KY214TDP{9x_D7s5vp#JUX1FD8l_(E& z6?Wv;{`spu>fp&Jtz6=Hhdl`r2C(NfWAQk(PqW@$(S0P9K6T2YxR}mebm~#yJP0rG ziRFA}J;=EhzlOc}*X+aQiIb+uq=>#*E4LESd_8H`GPqbxV*`$DoL22wm+npH{gEr~i@BUk% zSt|fJ^UNay%vpgrV~~L-@xH68|ARBMcmQ&sC5tLYJYYjnRxwG(3IH!6$ibk;-!A~; zjJB^&nnSu|q<4HjPNU-?IqzaM>37<>+!}c6<>%zqT;&XTR*s5g8mgZb>eFp^dXKR+ zgQGGz4=Xk-FzQ6JmP|DL{Lc3eIVmEK)YbRRuJ;>n=6noH3yCEv*KWO`-CZ8yoSyeZ z**Hrco0f&V5;_(&lusr;qYm$Mp89lRgs~;;i4o_U^D)TJ%$f{R$h)BP-OjPC{3M-R zAv-`022N?M(jnju8um2UP;|jYMj7;c?;9^^?O?rn`G6G$B$9UUr2#RpGq=Bm-EUjz z5IYmJdPU0?vX#R=A-BlKK;<2fk77e!rc~t^lUY|Y*jFFI#50O$J+mRqs^XHya%7xa zPS{v-)8*H5T2>H{z{gzqHpo~-e5&EL4Tw#Lvf9wtt2Zt-GmE&5S5uT6kbeReMamJ_m#JdtvM+Y7M=%7FEq|FLcRIni2y_w zvWnG)4Lz9fXJ7j;V8m^Hj`HB0V7MWr?3GDqez7T+v}gu%5|!+DOWhBey{+ceMe*WHV3FbLDhePKJV%D!ICA5LK&oJ#$~Zg5C(Nae~5LKN;)F zpny0xOm6&KkkfkIa1qS7;RX4eHUxoogp6ROw1JaZlj(fhw=DU;{A`%nR1?vrG&X$4 zPjfy~GqraOOhH$ypC5`K%UTESBip+I`j!F|jlq#%d^Kr$h& zouid51;l8Jjlgul^l6i0Ud7l+wH;y#$wffg5QMsMD2|IDLS`jmzCZ+KblH(-7H3oK zoQtoH(%*r}x6k_v+Y^C_jVC3dzFo8net?bvN&Zi-1}yQc>b_i^PzE!?F+6!UcWobNNh#v9C;4`5W5kCVF;G|WZMrcyMdZrhu#pxdL2?9Q zz3|MS08#CBhLmD83R{uvwC+9cV<#OlBCj)L`K*Y(*@_iQ-Pf3B`@679$I1}W3aQ)Y zLq7{h!hO2rkXqh?3Lr3yykhJW2BZ~1+*Lq6I9W0a`g!EAp)sqFA$2slgX(q*y-GD{ zwx_CHyRHR@m}DFlzK@0VhH@kOXEMX&Wc#0@O`-yu8#U)x3-a7nnSRly>6^ z`Z=u#!pA_@yY|P)l3Cz3X6KprPoAArg&u|#Y1_L zJOFkX=?2=-*MA3UdzHe1w267;1BI@aXSre*kqj`CxQAsXlg~|qga@?~$R^)Fbx+~t z#9VZaIR6`QsC^Uxbz*g6#$D|>Fi76bOvw94>wBl9%k7mDn*iz~;%WFJj#UT~rxT9m zfE*e`erz*PK}TH&U8zsCzb{*LhcpENc+m7g*qHI9&F`?ZuOJKzW>I&R#Y_~bQX+DI z1`s59{>g`8$gM;T#K1YQN3VqY2_n6rJTz6=A;)d!>{Ss{+^I_+j}qsS4yl!I1|$y1 zsu0^^pUwC`q5Slu6X?RbRQ)-d6;r6I{O?}QstvbfP*UV{CW9p0LqL5ac zGF78e12t*xC^dY-bLzTdUJR88=Jjjnd)Km(G8rocF_|P=-?SqL6@$cZ5rn0Ev_5n` z8Idn#fFvAV16lONh+zS9O`Hqx7--8P+VnBC&dz2#udu_@bzdk5f}}={E4DxMixD3sjUbYb*^a#e zjDOq>FjS+lUk9Oy%_s!>PLC}!qm z5@93MHH?zFu+!eD>w(@j5>*gfFtOu}rh-qgP|C{7m3=K2nAi$qbI`6wp&$seGn-%{ zF_H=yF{$oMX!KNo^2n1)K4@#Sfa!DqSEt=Oo@OLkBI*t5P(r&c*RWB0dlx{RM1a-9 z*TE8B5Lb90LD+^>4+&16;$7^D+hDg1b;YEY6*v)Ku!aE}8n-BguFFY+^F-w& ziU)yxtB33SE>SrJ5ubJ@T|Rv%%#mtB4z(=ozH= za9US=deUbm>FC*K8^t>}AG%pMSH>Ac*OrIt17Cho!zaF=W-a=~B!%1nm}vz#<;ezC z4^h%fW%cm+c(Th)_MV9#Vt-OuafXQv*oWg!I6nZ51o#iY_Pv`QWlI^hVx@#}&4ks1 zdK4Oh@ba)#1l4eRfRZ*Iz^sm$eW9Vpcdy>pI80ywO~l8j-2QU zz^}lt+6G#KBMtDaNtJ`Mk4oG0YRJERPJ0^qS- zKn$MbIWJfG*k=ZiDXRzHR;UQVDuUjT3k*2OmSh2`jSLnHdc@nt>{Uafia?Q3cn)De zNdTF3e3$+~TBJGlAwSr4pwtI)^xAVslr}48d8YzwhUFDjXHN}^fW$JY=X?DS<1`^i zl*gOH+h3}#J$kU=L-LdV2qR51W%Z!h3WZ6x`wM5#-+w(iMobIS%L)S$#RJF<(MC1L zi>=G?3J=oQCqvl=R7hp^tiQK9uw~yUT`|8=&hu)Vm=HIl^{+?R@S%JMPKy z0B&A+u2`nAIgqACR{N;>NPIzDA-C@Co71*=eFqzqu8@W@GimjpK821Th$4LwEmjfn zm`+8*fD>N@Dwb^!Ad7^d8X|y^&V>idQ~ayDU*euC_F)y;mV& z`FRy@AGs;hXHuG*a*A3W_B?!~6@5L*}hAvdZ(p7W|Uf^9|^$ygv2E~G-~EBed_^l_q%U|+s;hGisz&;Up@F#YraHtp_C zHye5B+(7fKaK5_63$qUjjhC*d#<7Rx`X7*Qp>H!$Y9;lPYsMl76@(wbj@t*w((^;F z$yatVVI1t4SMZsb=C#aB$xq+7Cum9C;5+Esq5v6Oy@i7 zcFlMMp|WVdhB57hg(SXHv+RaV_CB`;V#$`D&UEpRgL=@1dDUc2=lAfgB7q=O6o&5X zi>}}{20)S*o_Z*)MKe)2(RH6ewMKk2N{_@WX~WD$f0#(!=+B;g?X@!yY8&9oG(TJ@R*(kCri zw^if58>2R?Uzg|dw{6|3y{+sBJ+3a#FUW@iP>;Fy)@zIx%dp}=j-pIf+22)ShXy;K zDBh!X^5nw~fGyj!_Z3lO!*+=kL|UJYeB}QBsNZM&CP6V* z&JjHMtP8!)nqv~D7;y7d=LeqDQ_LEZJX6RNR%gy!kEgwiZWU_lMQMjTrolr3b}0mr zZ{$b>pU)+U=gL_b7hZ8=zLGM~=eJ#dX(*{@zdzAEWZ|G^UM;ji$G6~`$7dpI*B!0g zqM~?c*E`w!U4q1NKbBqYJzKU`KaKz1OLCDOgQw4jf2c`@Evl(LA|XE{M-?hoQWYy! z2$ih4^^ErnDU+SOQ6a5MC$I_+9rBubdeFUUnMRT@fTX(S=DP}vuM2x`y2E$L6+{{_ z8Ip^%kX^_0J#`a|XM<58_&7gfn z)70+KZ=NIXT6Bk5gPLgo8zH|!saAT$JI*BCu;5}lIkHX0)0y*ux(TDRLf;mUILg{LI7TDkA= zJ$KM#&b9iS^<;o=8~^=SHRIRG2}QMbd4d?p96e#@+g0!X(Fh`)$_&e4E~z34`I*`G z)cRPR_w8nMsNBsmG<3r}d&v7!xrS{Z@(sUEo~Wk%`iq)0VO&B5$N#Bf_A@?4A{ zR=eZ{QKT67mWGX+sFtnUE6j4UDpZWCCnouly;A<=*WIeJH*Hk&=KP@+%+)`wlo@ZQ zJ6_qvh!L-K^ih4gg0f=#PfR@)M8P#^*xBN#mza4ukV}-fCBjAStvTB2tcBn=x%_ zkwF%aqGg-5MmxBuc@#;ZYYHrBF@of>X@v~q%)yzz{hDu*s$5nl$i}`!i&lH7)@?f! zlTSgb1@L2}=AQ8^*SkRq*IzEp8`$f}hq3LI%bh8i_%sa9>;shYLx8Yx;P8d^m) zR1te9MvwyTFvzkM%a$lkq}sG;lUguucGMevh$I8wPKYPR3Kc4-##-~ZR-JkY_h0P1 z#rL^yfno&7ecKi1{0Fo^k#zg>59pWTPt1@OC`OPB>4@TLWM&H#BS>cUOL46-qy>r* zBttr)xEh(+0>ucDnf+2+s|;y@Vg$*Mjwr51X12g?nOLnq|I9WUZtK^sQET;Y%U^#+ zs#&6>%F>2Bcn_&ryN;?-wYn;wRUzX(VXO9(f^iZ~>hg2rZriqPYW0exs#=ZOs&tt$ z8TZ}0VudmWK~^kXqCWfNJvHISZ`GRBiOpf9Gb(6x@&@`qO>~YLH`_xUtdB=1QKE!I z7s~agao?#I|L-yLZt9Yyk+^i(GOBdxGP$bTmMwp&ty{LLO&d3u`~#mHtZLV5SSa82 zPkU*hBgks4lJWdg1J$%Cle}#Y0_ag;Tweb5*XEcso3vfT%<0q2uUxtEs{0{FsNHre z+|NSgL;%;v)KP7cki1_@lqi+=LW)aBss##lBz^eKE9!%{UyG?|n|2*kpJPtcB+A}V zqy!|!l11~358lyVe5y9Ai}t8qx=iVqveF#@Ld>#!^n&13s#RBI^^pdUMPbdHNufI~yJ^7IO`|rO^IatT#mb?P_mMvbWmM>eZvN!4(U&EHI zf0;pDvvxhze2-R9$Z8UV{M)i+iz-#Bl)7`^Q>t;(=H66)Z{MaaJoVTpXVBAKWx(eg zeZnbffBoJtL|ngajas#0x$1ZNxuM8G*?;}HQ}AHDi28u+J`xdu#08gMr|!J*O0{Fh z-}<*h-EuX~vKN>VSebkP{{j$?xo)-UHB^Uvx~PNu9Iek*2|qI>+49|Y(k?;@U(x?I z)p@_j0Y;O59;XHkxYZ;l4XbEEfR)sTbMOsJ9)texwUqidVfjfUNV~G)?=z>Vdv1h(-r#2=$XN5=*&~kzEJhh$Di5aQ__Aj>NDM; zPeu)7nC{0OxKqI|Tz{laV7;9$zyoxLt4DIzf>KoRtkJ~B?_}j0)M9rU@Lryhry1QPzp71XD zl>4uP`=|qZ9;P0?=VsmcKSh=Q)A%3Noi|*r?tJj+v@iWp74bY&)q;UK4H_8ag*?0Q z&WF^uqeiOdo_aVc^?Pr=qQ9?KTyiTa!_J2+ZJW}5pRO_A-Aj|o@4xw?8aaHZs-%(6 z2SWDQac$anR4rP!vxCh2F(;m?E<3xgHXDwOi%DDn0cqPwXK;Qs;^UZ4I6x1+{DAW= zyIx&=^WAZadhUq_qmToV=8gxS@*;;_5Qx(M_6N)?XNPkS@AB^JgWcdlfA*cUbwJPF zs@p-)tK0VNf9sVc^+QNa`T9IXA8>A?kB{=Dm9TAFw#jYOwRGvSdPBIHDz#hTNGQ{W zAXwG85*SFEJzBJiYLJ3@nEdm2_1)O7qKZHDoQqQGNRV)MEeLP{Qf2L$m2PlGJ|BP5 znK83|*x=0V2ej?jMLqT6yHTWPZp&hFdia@F)B)NXOJP9T5M-&QCq*19A6mI;mC$8a z(&WdL)$6_2{})vht4EjpyGMm3dZ4o|xV(Ri1dH$b@SWEZWzUHCij^v<5+&2t?HTdS zd8tI&T593EIWhJ8Tkk!j1D0riR`V>rgLm%HqD?~5%2&7gjhd)dd&SbjzfAa1vA0pw z0Hm)~w_Z`JcG~KjHUwFry?qoGj2$&1MqJyrW2anK=ZNF_#pGSLcCA{sdQD7trUkGg zzhnFMpjx%+)b%pQL4uH*-QWpAq=u~6D_1NF$>|T?txUMQ2v0GHxC;ZqYn9KnDW&-h z7fPk5GsS)f2TNA0T*ZiE+59S_)k}8M;?KXgZqpl7h#UXX=U!}5b?Y@y|32j$^U8H= zVh3)SJZQr<(MpjF5StO+v17Xy1xGgJ-0#e!=kLG%G9YzXJ+8R^wiuoynfhjH4=s~_ z`q7Aa`F8e=MAZAScGyinJpFR=e8{hpe%5;?b<{SU50LVvXxKv?q@7$39r8x-DeOO?0i7FQx17D8FkoXZY{S!+=bO=AD1JUbnA3W%j=rb@4=Ypz zVYy$ka(UAGAPpKf4bI2-W7c%DBM$=DaLLs-#cb4-E}g$3!mO-{dIgJKLF_tY!abXr zqdxmstKYOVLWQ68iNDP3v==MKSr=X&(@v?@8(W;R<;ogWd?qh@e?wM+RLZQSZG5t` zv(2U~Wl7(%LKPOI4MFzmu#fuu(~qLQlLlb(?O?sTnkqEbN{eOmZth>3b99=RX?&Ya z($MxhZ1TtsPv(9%jc^!k{i!=MQ(=oHH(wq6jJJCA8Z^rD@y%gsF>RVv1X!_htUjn#H*R zv;y3fs&%`LDKVwFd2hTtsIl7s2`Qu{lfX@P42)XAiF@q++ZCqH$$(paygWU%*)w)K zVSuzMSwI{3$m36rs(Z3|^CMl_5QNGebK>cmZ-|}Er%#=%ZPCBU^Y@@vw02E2=|iEd z_UfSOHHhYO7$EOzrMld}mm`J-gOB_m|53UVGY#w+o)!A|lTMGSA}h;XH(yZ*dkh|W zDyn9g^N>~q!Fj=HAx2S8KYXuN{QEsmEyF;Cv~Yp&lg>Eb4OTO!{i-HuS$-LUsMNctT^-+47kdIkA% z(UrNdXR)XEoo`QO&?Q|_PeLY&8}EKNW)(*x`N@HId2)mYZPtwQ_HE(f*FR=XSN$(L z+ZalPmy#2+eqA(gniCVbyGz^fpst^8NIQb?f!F=#UWFB*3k=u??zq9Ik26yz0-t0h zEDwX@x?Atpa{Fj2KM?b_>n>G4jr%q%YY_g;=+D&9cXKw{C=(U?N_uBMRam}caSR^= zU*nSurCK)W!;pafsRL6y2cMe!GCZb{H@? z9)9L!g+{9lAm-x_-lZP7_dm)<7GQe%)WdhH=bs!DWMj|e*83h!S_%|o+Nyw3yattyrym&*Q!f&_<D^0z>{V zklDa}PfN%st8BS)G4CK?vuFQk#;;Qif=(;I(k&^5ubJfFO~+Vb*-|g}05FsU7a#)49*+f#C8I zl8znS+FF+jE#MvcZ_TPz5@oKCYFBCRBbckE3Xl@mqYu)Zj-v?|&=fxFqN~haLS)B2 zzHw;y@}qwDjn8xH{+J zYfLgGrT&+lm9n!2GSl`YwGGQ7&%UO=AzGXrR)_ow&lm);KZwR%IDfWUq+Qx9U$#gu zxob3Sz9Vp)z-(BnP94>F_ZBJntFqtW-mhA@OiR)J(C=GnqRMJlp_?{rR5kT(Go>|5Sv<2Sg9SiGtc3n!K(R6cXt4scL3!=x%XT7RBAkCB*(r_sxt0XsPKUDaA|mf0 zi*E&Sic)snh})|5q0FYJz88Hq%8!&CVbG~B>7?G|Ls4{trVOfShY}2K0qR9MLilDB zDv7thow5KakN2Wd#J42#4JLCw{>w-N$;a<2E~>Z%^3(!_!ldV^#>JV7TOf@sP>djH zZ0zD!6}=WHMv$V{XT>#6V+#}`NE#cv_*F%(1xjuxcB^0Xdaby|X={O!3l=Ph*(FWe z+lyaR+yWWi0wuR=7cfhgMr8jPUf1I46}LdzTfp!j_J&g0Yf${c;ugr@7WjW*KALIM SRH@tm0000 0 { + spacesChild := append(spaces, last) + result += p.printItems(f.Items(), spacesChild) + } + } + return result +} diff --git a/vendor/github.com/docker/distribution/.golangci.yml b/vendor/github.com/docker/distribution/.golangci.yml new file mode 100644 index 00000000000..1ba6cb91623 --- /dev/null +++ b/vendor/github.com/docker/distribution/.golangci.yml @@ -0,0 +1,20 @@ +linters: + enable: + - structcheck + - varcheck + - staticcheck + - unconvert + - gofmt + - goimports + - golint + - ineffassign + - vet + - unused + - misspell + disable: + - errcheck + +run: + deadline: 2m + skip-dirs: + - vendor diff --git a/vendor/github.com/docker/distribution/.gometalinter.json b/vendor/github.com/docker/distribution/.gometalinter.json deleted file mode 100644 index 9df5b14bcb2..00000000000 --- a/vendor/github.com/docker/distribution/.gometalinter.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "Vendor": true, - "Deadline": "2m", - "Sort": ["linter", "severity", "path", "line"], - "EnableGC": true, - "Enable": [ - "structcheck", - "staticcheck", - "unconvert", - - "gofmt", - "goimports", - "golint", - "vet" - ] -} diff --git a/vendor/github.com/docker/distribution/.mailmap b/vendor/github.com/docker/distribution/.mailmap index 0f48321d497..8f3738f3d0e 100644 --- a/vendor/github.com/docker/distribution/.mailmap +++ b/vendor/github.com/docker/distribution/.mailmap @@ -30,3 +30,20 @@ Helen Xie Helen-xie Mike Brown Mike Brown Manish Tomar Manish Tomar Sakeven Jiang sakeven +Milos Gajdos Milos Gajdos +Derek McGowan Derek McGowa +Adrian Plata Adrian Plata <@users.noreply.github.com> +Sebastiaan van Stijn Sebastiaan van Stijn +Vishesh Jindal Vishesh Jindal +Wang Yan Wang Yan +Chris Patterson Chris Patterson +Eohyung Lee Eohyung Lee +João Pereira <484633+joaodrp@users.noreply.github.com> +Smasherr Smasherr +Thomas Berger Thomas Berger +Samuel Karp Samuel Karp +Justin Cormack +sayboras +CrazyMax +CrazyMax <1951866+crazy-max@users.noreply.github.com> +CrazyMax diff --git a/vendor/github.com/docker/distribution/.travis.yml b/vendor/github.com/docker/distribution/.travis.yml deleted file mode 100644 index 44ced60451d..00000000000 --- a/vendor/github.com/docker/distribution/.travis.yml +++ /dev/null @@ -1,51 +0,0 @@ -dist: trusty -sudo: required -# setup travis so that we can run containers for integration tests -services: - - docker - -language: go - -go: - - "1.11.x" - -go_import_path: github.com/docker/distribution - -addons: - apt: - packages: - - python-minimal - - -env: - - TRAVIS_GOOS=linux DOCKER_BUILDTAGS="include_oss include_gcs" TRAVIS_CGO_ENABLED=1 - -before_install: - - uname -r - - sudo apt-get -q update - -install: - - go get -u github.com/vbatts/git-validation - # TODO: Add enforcement of license - # - go get -u github.com/kunalkushwaha/ltag - - cd $TRAVIS_BUILD_DIR - -script: - - export GOOS=$TRAVIS_GOOS - - export CGO_ENABLED=$TRAVIS_CGO_ENABLED - - DCO_VERBOSITY=-q script/validate/dco - - GOOS=linux script/setup/install-dev-tools - - script/validate/vendor - - go build -i . - - make check - - make build - - make binaries - # Currently takes too long - #- if [ "$GOOS" = "linux" ]; then make test-race ; fi - - if [ "$GOOS" = "linux" ]; then make coverage ; fi - -after_success: - - bash <(curl -s https://codecov.io/bash) -F linux - -before_deploy: - # Run tests with storage driver configurations diff --git a/vendor/github.com/docker/distribution/Dockerfile b/vendor/github.com/docker/distribution/Dockerfile index 9537817ca3a..ae8c040c735 100644 --- a/vendor/github.com/docker/distribution/Dockerfile +++ b/vendor/github.com/docker/distribution/Dockerfile @@ -1,22 +1,49 @@ -FROM golang:1.11-alpine AS build +# syntax=docker/dockerfile:1.3 -ENV DISTRIBUTION_DIR /go/src/github.com/docker/distribution -ENV BUILDTAGS include_oss include_gcs +ARG GO_VERSION=1.16.15 +ARG GORELEASER_XX_VERSION=1.2.5 -ARG GOOS=linux -ARG GOARCH=amd64 -ARG GOARM=6 +FROM --platform=$BUILDPLATFORM crazymax/goreleaser-xx:${GORELEASER_XX_VERSION} AS goreleaser-xx +FROM --platform=$BUILDPLATFORM golang:${GO_VERSION}-alpine AS base +COPY --from=goreleaser-xx / / +RUN apk add --no-cache file git +WORKDIR /go/src/github.com/docker/distribution -RUN set -ex \ - && apk add --no-cache make git file +FROM base AS build +ENV GO111MODULE=auto +ENV CGO_ENABLED=0 +# GIT_REF is used by goreleaser-xx to handle the proper git ref when available. +# It will fallback to the working tree info if empty and use "git tag --points-at" +# or "git describe" to define the version info. +ARG GIT_REF +ARG TARGETPLATFORM +ARG PKG="github.com/distribution/distribution" +ARG BUILDTAGS="include_oss include_gcs" +RUN --mount=type=bind,rw \ + --mount=type=cache,target=/root/.cache/go-build \ + --mount=target=/go/pkg/mod,type=cache \ + goreleaser-xx --debug \ + --name="registry" \ + --dist="/out" \ + --main="./cmd/registry" \ + --flags="-v" \ + --ldflags="-s -w -X '$PKG/version.Version={{.Version}}' -X '$PKG/version.Revision={{.Commit}}' -X '$PKG/version.Package=$PKG'" \ + --tags="$BUILDTAGS" \ + --files="LICENSE" \ + --files="README.md" -WORKDIR $DISTRIBUTION_DIR -COPY . $DISTRIBUTION_DIR -RUN CGO_ENABLED=0 make PREFIX=/go clean binaries && file ./bin/registry | grep "statically linked" +FROM scratch AS artifact +COPY --from=build /out/*.tar.gz / +COPY --from=build /out/*.zip / +COPY --from=build /out/*.sha256 / -FROM alpine +FROM scratch AS binary +COPY --from=build /usr/local/bin/registry* / + +FROM alpine:3.14 +RUN apk add --no-cache ca-certificates COPY cmd/registry/config-dev.yml /etc/docker/registry/config.yml -COPY --from=build /go/src/github.com/docker/distribution/bin/registry /bin/registry +COPY --from=build /usr/local/bin/registry /bin/registry VOLUME ["/var/lib/registry"] EXPOSE 5000 ENTRYPOINT ["registry"] diff --git a/vendor/github.com/docker/distribution/Makefile b/vendor/github.com/docker/distribution/Makefile index 4635c6eca87..331da27328d 100644 --- a/vendor/github.com/docker/distribution/Makefile +++ b/vendor/github.com/docker/distribution/Makefile @@ -50,7 +50,7 @@ version/version.go: check: ## run all linters (TODO: enable "unused", "varcheck", "ineffassign", "unconvert", "staticheck", "goimports", "structcheck") @echo "$(WHALE) $@" - gometalinter --config .gometalinter.json ./... + golangci-lint run test: ## run tests, except integration test with test.short @echo "$(WHALE) $@" diff --git a/vendor/github.com/docker/distribution/README.md b/vendor/github.com/docker/distribution/README.md index 998878850cd..e513c18e969 100644 --- a/vendor/github.com/docker/distribution/README.md +++ b/vendor/github.com/docker/distribution/README.md @@ -2,7 +2,7 @@ The Docker toolset to pack, ship, store, and deliver content. -This repository's main product is the Docker Registry 2.0 implementation +This repository provides the Docker Registry 2.0 implementation for storing and distributing Docker images. It supersedes the [docker/docker-registry](https://github.com/docker/docker-registry) project with a new API design, focused around security and performance. diff --git a/vendor/github.com/docker/distribution/blobs.go b/vendor/github.com/docker/distribution/blobs.go index c0e9261be93..2a659eaa368 100644 --- a/vendor/github.com/docker/distribution/blobs.go +++ b/vendor/github.com/docker/distribution/blobs.go @@ -10,7 +10,7 @@ import ( "github.com/docker/distribution/reference" "github.com/opencontainers/go-digest" - "github.com/opencontainers/image-spec/specs-go/v1" + v1 "github.com/opencontainers/image-spec/specs-go/v1" ) var ( diff --git a/vendor/github.com/docker/distribution/docker-bake.hcl b/vendor/github.com/docker/distribution/docker-bake.hcl new file mode 100644 index 00000000000..4dd5a100c1d --- /dev/null +++ b/vendor/github.com/docker/distribution/docker-bake.hcl @@ -0,0 +1,65 @@ +// GITHUB_REF is the actual ref that triggers the workflow +// https://docs.github.com/en/actions/learn-github-actions/environment-variables#default-environment-variables +variable "GITHUB_REF" { + default = "" +} + +target "_common" { + args = { + GIT_REF = GITHUB_REF + } +} + +group "default" { + targets = ["image-local"] +} + +// Special target: https://github.com/docker/metadata-action#bake-definition +target "docker-metadata-action" { + tags = ["registry:local"] +} + +target "binary" { + inherits = ["_common"] + target = "binary" + output = ["./bin"] +} + +target "artifact" { + inherits = ["_common"] + target = "artifact" + output = ["./bin"] +} + +target "artifact-all" { + inherits = ["artifact"] + platforms = [ + "linux/amd64", + "linux/arm/v6", + "linux/arm/v7", + "linux/arm64", + "linux/ppc64le", + "linux/s390x" + ] +} + +target "image" { + inherits = ["_common", "docker-metadata-action"] +} + +target "image-local" { + inherits = ["image"] + output = ["type=docker"] +} + +target "image-all" { + inherits = ["image"] + platforms = [ + "linux/amd64", + "linux/arm/v6", + "linux/arm/v7", + "linux/arm64", + "linux/ppc64le", + "linux/s390x" + ] +} diff --git a/vendor/github.com/docker/distribution/manifest/schema2/manifest.go b/vendor/github.com/docker/distribution/manifest/schema2/manifest.go index ee29438fead..41f48029201 100644 --- a/vendor/github.com/docker/distribution/manifest/schema2/manifest.go +++ b/vendor/github.com/docker/distribution/manifest/schema2/manifest.go @@ -106,7 +106,7 @@ func FromStruct(m Manifest) (*DeserializedManifest, error) { // UnmarshalJSON populates a new Manifest struct from JSON data. func (m *DeserializedManifest) UnmarshalJSON(b []byte) error { - m.canonical = make([]byte, len(b), len(b)) + m.canonical = make([]byte, len(b)) // store manifest in canonical copy(m.canonical, b) diff --git a/vendor/github.com/docker/distribution/manifests.go b/vendor/github.com/docker/distribution/manifests.go index 1816baea1d6..8f84a220a97 100644 --- a/vendor/github.com/docker/distribution/manifests.go +++ b/vendor/github.com/docker/distribution/manifests.go @@ -87,7 +87,7 @@ func ManifestMediaTypes() (mediaTypes []string) { // UnmarshalFunc implements manifest unmarshalling a given MediaType type UnmarshalFunc func([]byte) (Manifest, Descriptor, error) -var mappings = make(map[string]UnmarshalFunc, 0) +var mappings = make(map[string]UnmarshalFunc) // UnmarshalManifest looks up manifest unmarshal functions based on // MediaType diff --git a/vendor/github.com/docker/distribution/reference/normalize.go b/vendor/github.com/docker/distribution/reference/normalize.go index 2d71fc5e9ff..b3dfb7a6d7e 100644 --- a/vendor/github.com/docker/distribution/reference/normalize.go +++ b/vendor/github.com/docker/distribution/reference/normalize.go @@ -56,6 +56,35 @@ func ParseNormalizedNamed(s string) (Named, error) { return named, nil } +// ParseDockerRef normalizes the image reference following the docker convention. This is added +// mainly for backward compatibility. +// The reference returned can only be either tagged or digested. For reference contains both tag +// and digest, the function returns digested reference, e.g. docker.io/library/busybox:latest@ +// sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa will be returned as +// docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa. +func ParseDockerRef(ref string) (Named, error) { + named, err := ParseNormalizedNamed(ref) + if err != nil { + return nil, err + } + if _, ok := named.(NamedTagged); ok { + if canonical, ok := named.(Canonical); ok { + // The reference is both tagged and digested, only + // return digested. + newNamed, err := WithName(canonical.Name()) + if err != nil { + return nil, err + } + newCanonical, err := WithDigest(newNamed, canonical.Digest()) + if err != nil { + return nil, err + } + return newCanonical, nil + } + } + return TagNameOnly(named), nil +} + // splitDockerDomain splits a repository name to domain and remotename string. // If no valid domain is found, the default domain is used. Repository name // needs to be already validated before. diff --git a/vendor/github.com/docker/distribution/reference/reference.go b/vendor/github.com/docker/distribution/reference/reference.go index 2f66cca87a3..8c0c23b2fe1 100644 --- a/vendor/github.com/docker/distribution/reference/reference.go +++ b/vendor/github.com/docker/distribution/reference/reference.go @@ -205,7 +205,7 @@ func Parse(s string) (Reference, error) { var repo repository nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1]) - if nameMatch != nil && len(nameMatch) == 3 { + if len(nameMatch) == 3 { repo.domain = nameMatch[1] repo.path = nameMatch[2] } else { diff --git a/vendor/github.com/docker/distribution/registry/api/errcode/errors.go b/vendor/github.com/docker/distribution/registry/api/errcode/errors.go index 6d9bb4b62af..4c35b879afd 100644 --- a/vendor/github.com/docker/distribution/registry/api/errcode/errors.go +++ b/vendor/github.com/docker/distribution/registry/api/errcode/errors.go @@ -207,11 +207,11 @@ func (errs Errors) MarshalJSON() ([]byte, error) { for _, daErr := range errs { var err Error - switch daErr.(type) { + switch daErr := daErr.(type) { case ErrorCode: - err = daErr.(ErrorCode).WithDetail(nil) + err = daErr.WithDetail(nil) case Error: - err = daErr.(Error) + err = daErr default: err = ErrorCodeUnknown.WithDetail(daErr) diff --git a/vendor/github.com/docker/distribution/registry/api/v2/urls.go b/vendor/github.com/docker/distribution/registry/api/v2/urls.go index 1337bdb1276..3c3ec989330 100644 --- a/vendor/github.com/docker/distribution/registry/api/v2/urls.go +++ b/vendor/github.com/docker/distribution/registry/api/v2/urls.go @@ -252,15 +252,3 @@ func appendValuesURL(u *url.URL, values ...url.Values) *url.URL { u.RawQuery = merged.Encode() return u } - -// appendValues appends the parameters to the url. Panics if the string is not -// a url. -func appendValues(u string, values ...url.Values) string { - up, err := url.Parse(u) - - if err != nil { - panic(err) // should never happen - } - - return appendValuesURL(up, values...).String() -} diff --git a/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go b/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go index 6e3f1ccc410..fe238210cd7 100644 --- a/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go +++ b/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go @@ -117,8 +117,8 @@ func init() { var t octetType isCtl := c <= 31 || c == 127 isChar := 0 <= c && c <= 127 - isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0 - if strings.IndexRune(" \t\r\n", rune(c)) >= 0 { + isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) + if strings.ContainsRune(" \t\r\n", rune(c)) { t |= isSpace } if isChar && !isCtl && !isSeparator { diff --git a/vendor/github.com/docker/distribution/registry/client/repository.go b/vendor/github.com/docker/distribution/registry/client/repository.go index aa442e65406..3e2ae66d3cf 100644 --- a/vendor/github.com/docker/distribution/registry/client/repository.go +++ b/vendor/github.com/docker/distribution/registry/client/repository.go @@ -16,7 +16,7 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/api/v2" + v2 "github.com/docker/distribution/registry/api/v2" "github.com/docker/distribution/registry/client/transport" "github.com/docker/distribution/registry/storage/cache" "github.com/docker/distribution/registry/storage/cache/memory" @@ -736,7 +736,12 @@ func (bs *blobs) Create(ctx context.Context, options ...distribution.BlobCreateO return nil, err } - resp, err := bs.client.Post(u, "", nil) + req, err := http.NewRequest("POST", u, nil) + if err != nil { + return nil, err + } + + resp, err := bs.client.Do(req) if err != nil { return nil, err } diff --git a/vendor/github.com/docker/distribution/vendor.conf b/vendor/github.com/docker/distribution/vendor.conf index a249caf269d..bd1b4bff61c 100644 --- a/vendor/github.com/docker/distribution/vendor.conf +++ b/vendor/github.com/docker/distribution/vendor.conf @@ -8,7 +8,7 @@ github.com/bugsnag/bugsnag-go b1d153021fcd90ca3f080db36bec96dc690fb274 github.com/bugsnag/osext 0dd3f918b21bec95ace9dc86c7e70266cfc5c702 github.com/bugsnag/panicwrap e2c28503fcd0675329da73bf48b33404db873782 github.com/denverdino/aliyungo afedced274aa9a7fcdd47ac97018f0f8db4e5de2 -github.com/dgrijalva/jwt-go a601269ab70c205d26370c16f7c81e9017c14e04 +github.com/dgrijalva/jwt-go 4bbdd8ac624fc7a9ef7aec841c43d99b5fe65a29 https://github.com/golang-jwt/jwt.git # v3.2.2 github.com/docker/go-metrics 399ea8c73916000c64c2c76e8da00ca82f8387ab github.com/docker/libtrust fa567046d9b14f6aa788882a950d69651d230b21 github.com/garyburd/redigo 535138d7bcd717d6531c701ef5933d98b1866257 @@ -48,4 +48,4 @@ gopkg.in/square/go-jose.v1 40d457b439244b546f023d056628e5184136899b gopkg.in/yaml.v2 v2.2.1 rsc.io/letsencrypt e770c10b0f1a64775ae91d240407ce00d1a5bdeb https://github.com/dmcgowan/letsencrypt.git github.com/opencontainers/go-digest a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb -github.com/opencontainers/image-spec ab7389ef9f50030c9b245bc16b981c7ddf192882 +github.com/opencontainers/image-spec 67d2d5658fe0476ab9bf414cec164077ebff3920 # v1.0.2 diff --git a/vendor/github.com/docker/docker-credential-helpers/LICENSE b/vendor/github.com/docker/docker-credential-helpers/LICENSE new file mode 100644 index 00000000000..1ea555e2af0 --- /dev/null +++ b/vendor/github.com/docker/docker-credential-helpers/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2016 David Calavera + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/docker/docker-credential-helpers/client/client.go b/vendor/github.com/docker/docker-credential-helpers/client/client.go new file mode 100644 index 00000000000..d1d0434cb55 --- /dev/null +++ b/vendor/github.com/docker/docker-credential-helpers/client/client.go @@ -0,0 +1,121 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "strings" + + "github.com/docker/docker-credential-helpers/credentials" +) + +// isValidCredsMessage checks if 'msg' contains invalid credentials error message. +// It returns whether the logs are free of invalid credentials errors and the error if it isn't. +// error values can be errCredentialsMissingServerURL or errCredentialsMissingUsername. +func isValidCredsMessage(msg string) error { + if credentials.IsCredentialsMissingServerURLMessage(msg) { + return credentials.NewErrCredentialsMissingServerURL() + } + + if credentials.IsCredentialsMissingUsernameMessage(msg) { + return credentials.NewErrCredentialsMissingUsername() + } + + return nil +} + +// Store uses an external program to save credentials. +func Store(program ProgramFunc, creds *credentials.Credentials) error { + cmd := program("store") + + buffer := new(bytes.Buffer) + if err := json.NewEncoder(buffer).Encode(creds); err != nil { + return err + } + cmd.Input(buffer) + + out, err := cmd.Output() + if err != nil { + t := strings.TrimSpace(string(out)) + + if isValidErr := isValidCredsMessage(t); isValidErr != nil { + err = isValidErr + } + + return fmt.Errorf("error storing credentials - err: %v, out: `%s`", err, t) + } + + return nil +} + +// Get executes an external program to get the credentials from a native store. +func Get(program ProgramFunc, serverURL string) (*credentials.Credentials, error) { + cmd := program("get") + cmd.Input(strings.NewReader(serverURL)) + + out, err := cmd.Output() + if err != nil { + t := strings.TrimSpace(string(out)) + + if credentials.IsErrCredentialsNotFoundMessage(t) { + return nil, credentials.NewErrCredentialsNotFound() + } + + if isValidErr := isValidCredsMessage(t); isValidErr != nil { + err = isValidErr + } + + return nil, fmt.Errorf("error getting credentials - err: %v, out: `%s`", err, t) + } + + resp := &credentials.Credentials{ + ServerURL: serverURL, + } + + if err := json.NewDecoder(bytes.NewReader(out)).Decode(resp); err != nil { + return nil, err + } + + return resp, nil +} + +// Erase executes a program to remove the server credentials from the native store. +func Erase(program ProgramFunc, serverURL string) error { + cmd := program("erase") + cmd.Input(strings.NewReader(serverURL)) + out, err := cmd.Output() + if err != nil { + t := strings.TrimSpace(string(out)) + + if isValidErr := isValidCredsMessage(t); isValidErr != nil { + err = isValidErr + } + + return fmt.Errorf("error erasing credentials - err: %v, out: `%s`", err, t) + } + + return nil +} + +// List executes a program to list server credentials in the native store. +func List(program ProgramFunc) (map[string]string, error) { + cmd := program("list") + cmd.Input(strings.NewReader("unused")) + out, err := cmd.Output() + if err != nil { + t := strings.TrimSpace(string(out)) + + if isValidErr := isValidCredsMessage(t); isValidErr != nil { + err = isValidErr + } + + return nil, fmt.Errorf("error listing credentials - err: %v, out: `%s`", err, t) + } + + var resp map[string]string + if err = json.NewDecoder(bytes.NewReader(out)).Decode(&resp); err != nil { + return nil, err + } + + return resp, nil +} diff --git a/vendor/github.com/docker/docker-credential-helpers/client/command.go b/vendor/github.com/docker/docker-credential-helpers/client/command.go new file mode 100644 index 00000000000..0183c063939 --- /dev/null +++ b/vendor/github.com/docker/docker-credential-helpers/client/command.go @@ -0,0 +1,57 @@ +package client + +import ( + "fmt" + "io" + "os" + + exec "golang.org/x/sys/execabs" +) + +// Program is an interface to execute external programs. +type Program interface { + Output() ([]byte, error) + Input(in io.Reader) +} + +// ProgramFunc is a type of function that initializes programs based on arguments. +type ProgramFunc func(args ...string) Program + +// NewShellProgramFunc creates programs that are executed in a Shell. +func NewShellProgramFunc(name string) ProgramFunc { + return NewShellProgramFuncWithEnv(name, nil) +} + +// NewShellProgramFuncWithEnv creates programs that are executed in a Shell with environment variables +func NewShellProgramFuncWithEnv(name string, env *map[string]string) ProgramFunc { + return func(args ...string) Program { + return &Shell{cmd: createProgramCmdRedirectErr(name, args, env)} + } +} + +func createProgramCmdRedirectErr(commandName string, args []string, env *map[string]string) *exec.Cmd { + programCmd := exec.Command(commandName, args...) + programCmd.Env = os.Environ() + if env != nil { + for k, v := range *env { + programCmd.Env = append(programCmd.Env, fmt.Sprintf("%s=%s", k, v)) + } + } + programCmd.Stderr = os.Stderr + return programCmd +} + +// Shell invokes shell commands to talk with a remote credentials helper. +type Shell struct { + cmd *exec.Cmd +} + +// Output returns responses from the remote credentials helper. +func (s *Shell) Output() ([]byte, error) { + return s.cmd.Output() +} + +// Input sets the input to send to a remote credentials helper. +func (s *Shell) Input(in io.Reader) { + s.cmd.Stdin = in +} diff --git a/vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go b/vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go new file mode 100644 index 00000000000..da8b594e7f8 --- /dev/null +++ b/vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go @@ -0,0 +1,186 @@ +package credentials + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "io" + "os" + "strings" +) + +// Credentials holds the information shared between docker and the credentials store. +type Credentials struct { + ServerURL string + Username string + Secret string +} + +// isValid checks the integrity of Credentials object such that no credentials lack +// a server URL or a username. +// It returns whether the credentials are valid and the error if it isn't. +// error values can be errCredentialsMissingServerURL or errCredentialsMissingUsername +func (c *Credentials) isValid() (bool, error) { + if len(c.ServerURL) == 0 { + return false, NewErrCredentialsMissingServerURL() + } + + if len(c.Username) == 0 { + return false, NewErrCredentialsMissingUsername() + } + + return true, nil +} + +// CredsLabel holds the way Docker credentials should be labeled as such in credentials stores that allow labelling. +// That label allows to filter out non-Docker credentials too at lookup/search in macOS keychain, +// Windows credentials manager and Linux libsecret. Default value is "Docker Credentials" +var CredsLabel = "Docker Credentials" + +// SetCredsLabel is a simple setter for CredsLabel +func SetCredsLabel(label string) { + CredsLabel = label +} + +// Serve initializes the credentials helper and parses the action argument. +// This function is designed to be called from a command line interface. +// It uses os.Args[1] as the key for the action. +// It uses os.Stdin as input and os.Stdout as output. +// This function terminates the program with os.Exit(1) if there is an error. +func Serve(helper Helper) { + var err error + if len(os.Args) != 2 { + err = fmt.Errorf("Usage: %s ", os.Args[0]) + } + + if err == nil { + err = HandleCommand(helper, os.Args[1], os.Stdin, os.Stdout) + } + + if err != nil { + fmt.Fprintf(os.Stdout, "%v\n", err) + os.Exit(1) + } +} + +// HandleCommand uses a helper and a key to run a credential action. +func HandleCommand(helper Helper, key string, in io.Reader, out io.Writer) error { + switch key { + case "store": + return Store(helper, in) + case "get": + return Get(helper, in, out) + case "erase": + return Erase(helper, in) + case "list": + return List(helper, out) + case "version": + return PrintVersion(out) + } + return fmt.Errorf("Unknown credential action `%s`", key) +} + +// Store uses a helper and an input reader to save credentials. +// The reader must contain the JSON serialization of a Credentials struct. +func Store(helper Helper, reader io.Reader) error { + scanner := bufio.NewScanner(reader) + + buffer := new(bytes.Buffer) + for scanner.Scan() { + buffer.Write(scanner.Bytes()) + } + + if err := scanner.Err(); err != nil && err != io.EOF { + return err + } + + var creds Credentials + if err := json.NewDecoder(buffer).Decode(&creds); err != nil { + return err + } + + if ok, err := creds.isValid(); !ok { + return err + } + + return helper.Add(&creds) +} + +// Get retrieves the credentials for a given server url. +// The reader must contain the server URL to search. +// The writer is used to write the JSON serialization of the credentials. +func Get(helper Helper, reader io.Reader, writer io.Writer) error { + scanner := bufio.NewScanner(reader) + + buffer := new(bytes.Buffer) + for scanner.Scan() { + buffer.Write(scanner.Bytes()) + } + + if err := scanner.Err(); err != nil && err != io.EOF { + return err + } + + serverURL := strings.TrimSpace(buffer.String()) + if len(serverURL) == 0 { + return NewErrCredentialsMissingServerURL() + } + + username, secret, err := helper.Get(serverURL) + if err != nil { + return err + } + + resp := Credentials{ + ServerURL: serverURL, + Username: username, + Secret: secret, + } + + buffer.Reset() + if err := json.NewEncoder(buffer).Encode(resp); err != nil { + return err + } + + fmt.Fprint(writer, buffer.String()) + return nil +} + +// Erase removes credentials from the store. +// The reader must contain the server URL to remove. +func Erase(helper Helper, reader io.Reader) error { + scanner := bufio.NewScanner(reader) + + buffer := new(bytes.Buffer) + for scanner.Scan() { + buffer.Write(scanner.Bytes()) + } + + if err := scanner.Err(); err != nil && err != io.EOF { + return err + } + + serverURL := strings.TrimSpace(buffer.String()) + if len(serverURL) == 0 { + return NewErrCredentialsMissingServerURL() + } + + return helper.Delete(serverURL) +} + +//List returns all the serverURLs of keys in +//the OS store as a list of strings +func List(helper Helper, writer io.Writer) error { + accts, err := helper.List() + if err != nil { + return err + } + return json.NewEncoder(writer).Encode(accts) +} + +//PrintVersion outputs the current version. +func PrintVersion(writer io.Writer) error { + fmt.Fprintln(writer, Version) + return nil +} diff --git a/vendor/github.com/docker/docker-credential-helpers/credentials/error.go b/vendor/github.com/docker/docker-credential-helpers/credentials/error.go new file mode 100644 index 00000000000..fe6a5aef45c --- /dev/null +++ b/vendor/github.com/docker/docker-credential-helpers/credentials/error.go @@ -0,0 +1,102 @@ +package credentials + +const ( + // ErrCredentialsNotFound standardizes the not found error, so every helper returns + // the same message and docker can handle it properly. + errCredentialsNotFoundMessage = "credentials not found in native keychain" + + // ErrCredentialsMissingServerURL and ErrCredentialsMissingUsername standardize + // invalid credentials or credentials management operations + errCredentialsMissingServerURLMessage = "no credentials server URL" + errCredentialsMissingUsernameMessage = "no credentials username" +) + +// errCredentialsNotFound represents an error +// raised when credentials are not in the store. +type errCredentialsNotFound struct{} + +// Error returns the standard error message +// for when the credentials are not in the store. +func (errCredentialsNotFound) Error() string { + return errCredentialsNotFoundMessage +} + +// NewErrCredentialsNotFound creates a new error +// for when the credentials are not in the store. +func NewErrCredentialsNotFound() error { + return errCredentialsNotFound{} +} + +// IsErrCredentialsNotFound returns true if the error +// was caused by not having a set of credentials in a store. +func IsErrCredentialsNotFound(err error) bool { + _, ok := err.(errCredentialsNotFound) + return ok +} + +// IsErrCredentialsNotFoundMessage returns true if the error +// was caused by not having a set of credentials in a store. +// +// This function helps to check messages returned by an +// external program via its standard output. +func IsErrCredentialsNotFoundMessage(err string) bool { + return err == errCredentialsNotFoundMessage +} + +// errCredentialsMissingServerURL represents an error raised +// when the credentials object has no server URL or when no +// server URL is provided to a credentials operation requiring +// one. +type errCredentialsMissingServerURL struct{} + +func (errCredentialsMissingServerURL) Error() string { + return errCredentialsMissingServerURLMessage +} + +// errCredentialsMissingUsername represents an error raised +// when the credentials object has no username or when no +// username is provided to a credentials operation requiring +// one. +type errCredentialsMissingUsername struct{} + +func (errCredentialsMissingUsername) Error() string { + return errCredentialsMissingUsernameMessage +} + +// NewErrCredentialsMissingServerURL creates a new error for +// errCredentialsMissingServerURL. +func NewErrCredentialsMissingServerURL() error { + return errCredentialsMissingServerURL{} +} + +// NewErrCredentialsMissingUsername creates a new error for +// errCredentialsMissingUsername. +func NewErrCredentialsMissingUsername() error { + return errCredentialsMissingUsername{} +} + +// IsCredentialsMissingServerURL returns true if the error +// was an errCredentialsMissingServerURL. +func IsCredentialsMissingServerURL(err error) bool { + _, ok := err.(errCredentialsMissingServerURL) + return ok +} + +// IsCredentialsMissingServerURLMessage checks for an +// errCredentialsMissingServerURL in the error message. +func IsCredentialsMissingServerURLMessage(err string) bool { + return err == errCredentialsMissingServerURLMessage +} + +// IsCredentialsMissingUsername returns true if the error +// was an errCredentialsMissingUsername. +func IsCredentialsMissingUsername(err error) bool { + _, ok := err.(errCredentialsMissingUsername) + return ok +} + +// IsCredentialsMissingUsernameMessage checks for an +// errCredentialsMissingUsername in the error message. +func IsCredentialsMissingUsernameMessage(err string) bool { + return err == errCredentialsMissingUsernameMessage +} diff --git a/vendor/github.com/docker/docker-credential-helpers/credentials/helper.go b/vendor/github.com/docker/docker-credential-helpers/credentials/helper.go new file mode 100644 index 00000000000..135acd254d7 --- /dev/null +++ b/vendor/github.com/docker/docker-credential-helpers/credentials/helper.go @@ -0,0 +1,14 @@ +package credentials + +// Helper is the interface a credentials store helper must implement. +type Helper interface { + // Add appends credentials to the store. + Add(*Credentials) error + // Delete removes credentials from the store. + Delete(serverURL string) error + // Get retrieves credentials from the store. + // It returns username and secret as strings. + Get(serverURL string) (string, string, error) + // List returns the stored serverURLs and their associated usernames. + List() (map[string]string, error) +} diff --git a/vendor/github.com/docker/docker-credential-helpers/credentials/version.go b/vendor/github.com/docker/docker-credential-helpers/credentials/version.go new file mode 100644 index 00000000000..185e367961a --- /dev/null +++ b/vendor/github.com/docker/docker-credential-helpers/credentials/version.go @@ -0,0 +1,4 @@ +package credentials + +// Version holds a string describing the current version +const Version = "0.6.4" diff --git a/vendor/github.com/docker/docker/AUTHORS b/vendor/github.com/docker/docker/AUTHORS index 2ae76d2c2c9..dffacff1120 100644 --- a/vendor/github.com/docker/docker/AUTHORS +++ b/vendor/github.com/docker/docker/AUTHORS @@ -12,12 +12,11 @@ Aaron Welch Aaron.L.Xu Abel Muiño Abhijeet Kasurde -Abhinandan Prativadi +Abhinandan Prativadi Abhinav Ajgaonkar Abhishek Chanda Abhishek Sharma Abin Shahab -Ada Mancini Adam Avilla Adam Dobrawy Adam Eijdenberg @@ -27,7 +26,6 @@ Adam Mills Adam Pointer Adam Singer Adam Walz -Adam Williams Addam Hardy Aditi Rajagopal Aditya @@ -53,7 +51,6 @@ Akihiro Suda Akim Demaille Akira Koyasu Akshay Karle -Akshay Moghe Al Tobey alambike Alan Hoyle @@ -62,7 +59,6 @@ Alan Thompson Albert Callarisa Albert Zhang Albin Kerouanton -Alec Benson Alejandro González Hevia Aleksa Sarai Aleksandrs Fadins @@ -84,7 +80,6 @@ Alexander Boyd Alexander Larsson Alexander Midlash Alexander Morozov -Alexander Polakov Alexander Shopov Alexandre Beslic Alexandre Garnier @@ -95,8 +90,7 @@ Alexei Margasov Alexey Guskov Alexey Kotlyarov Alexey Shamrin -Alexis Ries -Alexis Thomas +Alexis THOMAS Alfred Landrum Ali Dehghani Alicia Lauerman @@ -109,7 +103,6 @@ Alvin Deng Alvin Richards amangoel Amen Belayneh -Ameya Gawde Amir Goldstein Amit Bakshi Amit Krishnan @@ -133,7 +126,6 @@ Andreas Köhler Andreas Savvides Andreas Tiefenthaler Andrei Gherzan -Andrei Ushakov Andrei Vagin Andrew C. Bodine Andrew Clay Shafer @@ -143,7 +135,6 @@ Andrew Gerrand Andrew Guenther Andrew He Andrew Hsu -Andrew Kim Andrew Kuklewicz Andrew Macgregor Andrew Macpherson @@ -164,12 +155,10 @@ Andy Chambers andy diller Andy Goldstein Andy Kipp -Andy Lindeman Andy Rothfusz Andy Smith Andy Wilson Anes Hasicic -Angel Velazquez Anil Belur Anil Madhavapeddy Ankit Jain @@ -190,7 +179,6 @@ Antonio Murdaca Antonis Kalipetis Antony Messerli Anuj Bahuguna -Anuj Varma Anusha Ragunathan apocas Arash Deshmeh @@ -212,14 +200,12 @@ Avi Miller Avi Vaid ayoshitake Azat Khuyiyakhmetov -Bao Yonglei Bardia Keyoumarsi Barnaby Gray Barry Allard Bartłomiej Piotrowski Bastiaan Bakker bdevloed -Bearice Ren Ben Bonnefoy Ben Firshman Ben Golub @@ -232,7 +218,6 @@ Ben Wiklund Benjamin Atkin Benjamin Baker Benjamin Boudreau -Benjamin Böhmke Benjamin Yolken Benny Ng Benoit Chesneau @@ -246,7 +231,6 @@ Bhiraj Butala Bhumika Bayani Bilal Amarni Bill Wang -Billy Ridgway Bily Zhang Bin Liu Bingshen Wang @@ -268,7 +252,6 @@ Brendan Dixon Brent Salisbury Brett Higgins Brett Kochendorfer -Brett Milford Brett Randall Brian (bex) Exelbierd Brian Bland @@ -299,7 +282,6 @@ Byung Kang Caleb Spare Calen Pennington Cameron Boehmer -Cameron Sparr Cameron Spear Campbell Allen Candid Dauth @@ -357,7 +339,6 @@ Chris Fordham Chris Gavin Chris Gibson Chris Khoo -Chris Kreussling (Flatbush Gardener) Chris McKinnel Chris McKinnel Chris Price @@ -370,7 +351,6 @@ Chris Telfer Chris Wahl Chris Weyl Chris White -Christian Becker Christian Berendt Christian Brauner Christian Böhme @@ -379,7 +359,6 @@ Christian Persson Christian Rotzoll Christian Simon Christian Stefanescu -Christoph Ziebuhr Christophe Mehay Christophe Troestler Christophe Vidal @@ -393,9 +372,7 @@ Christy Norman Chun Chen Ciro S. Costa Clayton Coleman -Clint Armstrong Clinton Kitson -clubby789 Cody Roseborough Coenraad Loubser Colin Dunklau @@ -417,7 +394,6 @@ cristiano balducci Cristina Yenyxe Gonzalez Garcia Cruceru Calin-Cristian CUI Wei -Cuong Manh Le Cyprian Gracz Cyril F Daan van Berkel @@ -480,7 +456,6 @@ Dave Henderson Dave MacDonald Dave Tucker David Anderson -David Bellotti David Calavera David Chung David Corking @@ -495,11 +470,9 @@ David Lawrence David Lechner David M. Karr David Mackey -David Manouchehri David Mat David Mcanulty David McKay -David O'Rourke David P Hilton David Pelaez David R. Jenni @@ -538,7 +511,6 @@ Devon Estes Devvyn Murphy Dharmit Shah Dhawal Yogesh Bhanushali -Dhilip Kumars Diego Romero Diego Siqueira Dieter Reuter @@ -564,8 +536,6 @@ Dmitry Shyshkin Dmitry Smirnov Dmitry V. Krivenok Dmitry Vorobev -Dmytro Iakovliev -docker-unir[bot] Dolph Mathews Dominic Tubach Dominic Yin @@ -599,7 +569,7 @@ Eivind Uggedal Elan Ruusamäe Elango Sivanandam Elena Morozova -Eli Uriegas +Eli Uriegas Elias Faxö Elias Probst Elijah Zupancic @@ -618,7 +588,6 @@ Eric Curtin Eric G. Noriega Eric Hanchrow Eric Lee -Eric Mountain Eric Myhre Eric Paris Eric Rafaloff @@ -628,7 +597,6 @@ Eric Soderstrom Eric Yang Eric-Olivier Lamey Erica Windisch -Erich Cordoba Erik Bray Erik Dubbelboer Erik Hollensbe @@ -638,10 +606,8 @@ Erik St. Martin Erik Weathers Erno Hopearuoho Erwin van der Koogh -Espen Suenson Ethan Bell Ethan Mosbaugh -Euan Harris Euan Kemp Eugen Krizo Eugene Yakubovich @@ -707,7 +673,6 @@ Florin Patan fonglh Foysal Iqbal Francesc Campoy -Francesco Degrassi Francesco Mari Francis Chuang Francisco Carriedo @@ -716,10 +681,9 @@ Frank Groeneveld Frank Herrmann Frank Macreery Frank Rosquin -Frank Yang +frankyang Fred Lifton Frederick F. Kautz IV -Frederico F. de Oliveira Frederik Loeffert Frederik Nordahl Jul Sabroe Freek Kalter @@ -729,7 +693,6 @@ Félix Baylac-Jacqué Félix Cantournet Gabe Rosenhouse Gabor Nagy -Gabriel L. Somlo Gabriel Linder Gabriel Monroy Gabriel Nicolas Avellaneda @@ -744,7 +707,6 @@ Gaurav Singh Gaël PORTAY Genki Takiuchi GennadySpb -Geoff Levand Geoffrey Bachelet Geon Kim George Kontridze @@ -784,7 +746,6 @@ Guilhem Lettron Guilherme Salgado Guillaume Dufour Guillaume J. Charmes -Gunadhya S. <6939749+gunadhya@users.noreply.github.com> guoxiuyan Guri Gurjeet Singh @@ -831,10 +792,8 @@ Hu Tao HuanHuan Ye Huanzhong Zhang Huayi Zhang -Hugo Barrera Hugo Duncan Hugo Marisco <0x6875676f@gmail.com> -Hui Kang Hunter Blanks huqun Huu Nguyen @@ -888,7 +847,7 @@ Jaivish Kothari Jake Champlin Jake Moshenko Jake Sanders -Jakub Drahos +jakedt James Allen James Carey James Carr @@ -953,14 +912,12 @@ Jeff Minard Jeff Nickoloff Jeff Silberman Jeff Welch -Jeff Zvier Jeffrey Bolle Jeffrey Morgan Jeffrey van Gogh Jenny Gebske Jeremy Chambers Jeremy Grosser -Jeremy Huntwork Jeremy Price Jeremy Qian Jeremy Unruh @@ -976,16 +933,13 @@ Ji.Zhilong Jian Liao Jian Zhang Jiang Jinyang -Jianyong Wu Jie Luo Jie Ma Jihyun Hwang Jilles Oldenbeuving Jim Alateras -Jim Carroll Jim Ehrismann Jim Galasyn -Jim Lin Jim Minter Jim Perrin Jimmy Cuadra @@ -997,7 +951,6 @@ Jiri Appl Jiri Popelka Jiuyue Ma Jiří Župka -Joakim Roubert Joao Fernandes Joao Trindade Joe Beda @@ -1059,7 +1012,6 @@ Joost Cassee Jordan Arentsen Jordan Jennings Jordan Sissel -Jordi Massaguer Pla Jorge Marin Jorit Kleine-Möllhoff Jose Diaz-Gonzalez @@ -1092,9 +1044,7 @@ Julien Pervillé Julien Pivotto Julio Guerra Julio Montes -Jun Du Jun-Ru Chang -junxu Jussi Nummelin Justas Brazauskas Justen Martin @@ -1112,7 +1062,6 @@ Jörg Thalheim K. Heller Kai Blin Kai Qiang Wu (Kennan) -Kaijie Chen Kamil Domański Kamjar Gerami Kanstantsin Shautsou @@ -1133,7 +1082,6 @@ Kawsar Saiyeed Kay Yan kayrus Kazuhiro Sera -Kazuyoshi Kato Ke Li Ke Xu Kei Ohmura @@ -1177,7 +1125,6 @@ Konrad Kleine Konstantin Gribov Konstantin L Konstantin Pelykh -Kostadin Plachkov Krasi Georgiev Krasimir Georgiev Kris-Mikael Krister @@ -1189,7 +1136,6 @@ Kunal Kushwaha Kunal Tyagi Kyle Conroy Kyle Linden -Kyle Squizzato Kyle Wuolle kyu Lachlan Coote @@ -1205,26 +1151,20 @@ Lars R. Damerow Lars-Magnus Skog Laszlo Meszaros Laura Frank -Laurent Bernaille Laurent Erignoux Laurie Voss Leandro Siqueira -Lee Calcote Lee Chao <932819864@qq.com> Lee, Meng-Han leeplay Lei Gong Lei Jitang -Leiiwang Len Weincier Lennie Leo Gallucci -Leonardo Nodari -Leonardo Taccari Leszek Kowalski Levi Blackstone Levi Gross -Levi Harrison Lewis Daly Lewis Marshall Lewis Peckover @@ -1233,12 +1173,10 @@ Liam Macgillavry Liana Lo Liang Mingqiang Liang-Chi Hsieh -liangwei Liao Qingwei Lifubang Lihua Tang Lily Guo -limeidan limsy Lin Lu LingFaKe @@ -1284,7 +1222,7 @@ Ma Shimiao Mabin Madhan Raj Mookkandy Madhav Puri -Madhu Venugopal +Madhu Venugopal Mageee Mahesh Tiyyagura malnick @@ -1323,7 +1261,6 @@ Mark McKinstry Mark Milstein Mark Oates Mark Parker -Mark Vainomaa Mark West Markan Patel Marko Mikulicic @@ -1337,7 +1274,6 @@ Martin Kelly Martin Mosegaard Amdisen Martin Muzatko Martin Redmond -Maru Newby Mary Anthony Masahito Zembutsu Masato Ohba @@ -1350,7 +1286,6 @@ Mathieu Le Marec - Pasquet Mathieu Parent Matt Apperson Matt Bachmann -Matt Bajor Matt Bentley Matt Haggard Matt Hoyle @@ -1370,14 +1305,12 @@ Matthew Riley Matthias Klumpp Matthias Kühnle Matthias Rampke -Matthieu Fronton Matthieu Hauglustaine Mattias Jernberg Mauricio Garavaglia mauriyouth Max Harmathy Max Shytikov -Max Timchenko Maxim Fedchyshyn Maxim Ivanov Maxim Kulkin @@ -1391,12 +1324,10 @@ Megan Kostick Mehul Kar Mei ChunTao Mengdi Gao -Menghui Chen Mert Yazıcıoğlu mgniu Micah Zoltu Michael A. Smith -Michael Beskin Michael Bridgen Michael Brown Michael Chiang @@ -1423,23 +1354,18 @@ Michael Zhao Michal Fojtik Michal Gebauer Michal Jemala -Michal Kostrzewa Michal Minář -Michal Rostecki Michal Wieczorek Michaël Pailloncy Michał Czeraszkiewicz Michał Gryko -Michał Kosek Michiel de Jong Mickaël Fortunato Mickaël Remars Miguel Angel Fernández Miguel Morales -Miguel Perez Mihai Borobocea Mihuleacc Sergiu -Mikael Davranche Mike Brown Mike Bush Mike Casas @@ -1466,7 +1392,7 @@ Misty Stanley-Jones Mitch Capper Mizuki Urushida mlarcher -Mohammad Banikazemi +Mohammad Banikazemi Mohammad Nasirifar Mohammed Aaqib Ansari Mohit Soni @@ -1480,7 +1406,6 @@ Moysés Borges mrfly Mrunal Patel Muayyad Alsadi -Muhammad Zohaib Aslam Mustafa Akın Muthukumar R Máximo Cuadros @@ -1497,8 +1422,6 @@ Natasha Jarus Nate Brennand Nate Eagleson Nate Jones -Nathan Carlson -Nathan Herald Nathan Hsieh Nathan Kleyn Nathan LeClaire @@ -1522,7 +1445,6 @@ Nick Payne Nick Russo Nick Stenning Nick Stinemates -Nick Wood NickrenREN Nicola Kabar Nicolas Borboën @@ -1550,7 +1472,6 @@ noducks Nolan Darilek Noriki Nakamura nponeccop -Nurahmadie Nuutti Kotivuori nzwsch O.S. Tezer @@ -1568,9 +1489,7 @@ Olle Jonsson Olli Janatuinen Olly Pomeroy Omri Shiv -Onur Filiz Oriol Francès -Oscar Bonilla <6f6231@gmail.com> Oskar Niburski Otto Kekäläinen Ouyang Liduo @@ -1583,12 +1502,10 @@ Pascal Borreli Pascal Hartig Patrick Böänziger Patrick Devine -Patrick Haas Patrick Hemmer Patrick Stapleton Patrik Cyvoct pattichen -Paul "TBBle" Hampson Paul paul Paul Annesley @@ -1603,7 +1520,6 @@ Paul Liljenberg Paul Morie Paul Nasrat Paul Weaver -Paulo Gomes Paulo Ribeiro Pavel Lobashov Pavel Matěja @@ -1636,7 +1552,6 @@ Peter Salvatore Peter Volpe Peter Waller Petr Švihlík -Petros Angelatos Phil Phil Estes Phil Spitler @@ -1655,25 +1570,21 @@ Pierre Dal-Pra Pierre Wacrenier Pierre-Alain RIVIERE Piotr Bogdan -Piotr Karbowski +pixelistik Porjo Poul Kjeldager Sørensen Pradeep Chhetri Pradip Dhara -Pradipta Kr. Banerjee Prasanna Gautam Pratik Karki Prayag Verma Priya Wadhwa Projjol Banerji Przemek Hejman -Puneet Pruthi Pure White pysqz Qiang Huang -Qin TianHuan Qinglan Peng -Quan Tian qudongfang Quentin Brossard Quentin Perez @@ -1696,7 +1607,6 @@ Ramon van Alteren RaviTeja Pothana Ray Tsang ReadmeCritic -realityone Recursive Madman Reficul Regan McCooey @@ -1724,7 +1634,6 @@ Riku Voipio Riley Guerin Ritesh H Shukla Riyaz Faizullabhoy -Rob Cowsill <42620235+rcowsill@users.noreply.github.com> Rob Gulewich Rob Vesse Robert Bachmann @@ -1762,7 +1671,6 @@ Ron Williams Rong Gao Rong Zhang Rongxiang Song -Rony Weng root root root @@ -1783,7 +1691,6 @@ Ryan Abrams Ryan Anderson Ryan Aslett Ryan Belgrave -Ryan Campbell Ryan Detzel Ryan Fowler Ryan Liu @@ -1799,7 +1706,6 @@ Ryan Zhang ryancooper7 RyanDeng Ryo Nakao -Ryoga Saito Rémy Greinhofer s. rannou s00318865 @@ -1839,7 +1745,6 @@ Satoshi Tagomori Scott Bessler Scott Collier Scott Johnston -Scott Percival Scott Stamp Scott Walls sdreyesg @@ -1852,8 +1757,6 @@ Sean P. Kane Sean Rodman Sebastiaan van Steenis Sebastiaan van Stijn -Sebastian Radloff -Sebastien Goasguen Senthil Kumar Selvaraj Senthil Kumaran SeongJae Park @@ -1873,12 +1776,10 @@ shaunol Shawn Landden Shawn Siefkas shawnhe -Shayan Pooya Shayne Wang Shekhar Gulati Sheng Yang Shengbo Song -Shengjing Zhu Shev Yan Shih-Yuan Lee Shijiang Wei @@ -1891,7 +1792,6 @@ shuai-z Shukui Yang Shuwei Hao Sian Lerk Lau -Siarhei Rasiukevich Sidhartha Mani sidharthamani Silas Sewell @@ -1908,16 +1808,13 @@ Simon Vikstrom Sindhu S Sjoerd Langkemper skanehira -Smark Meng Solganik Alexander Solomon Hykes Song Gao Soshi Katsuta -Sotiris Salloumis Soulou Spencer Brown Spencer Smith -Spike Curtis Sridatta Thatipamala Sridhar Ratnakumar Srini Brahmaroutu @@ -1952,7 +1849,6 @@ Steven Merrill Steven Richards Steven Taylor Stig Larsson -Su Wang Subhajit Ghosh Sujith Haridasan Sun Gengze <690388648@qq.com> @@ -1981,19 +1877,16 @@ Ted M. Young Tehmasp Chaudhri Tejaswini Duggaraju Tejesh Mehta -Terry Chu terryding77 <550147740@qq.com> tgic Thatcher Peskens theadactyl Thell 'Bo' Fowler Thermionix -Thiago Alves Silva Thijs Terlouw Thomas Bikeev Thomas Frössman Thomas Gazagnaire -Thomas Graf Thomas Grainger Thomas Hansen Thomas Leonard @@ -2011,7 +1904,6 @@ Tianyi Wang Tibor Vass Tiffany Jernigan Tiffany Low -Till Claassen Till Wegmüller Tim Tim Bart @@ -2023,14 +1915,11 @@ Tim Potter Tim Ruffles Tim Smith Tim Terhorst -Tim Wagner Tim Wang Tim Waugh Tim Wraight Tim Zju <21651152@zju.edu.cn> -timchenxiaoyu <837829664@qq.com> timfeirg -Timo Rothenpieler Timothy Hobbs tjwebb123 tobe @@ -2039,7 +1928,6 @@ Tobias Bradtke Tobias Gesellchen Tobias Klauser Tobias Munk -Tobias Pfandzelter Tobias Schmidt Tobias Schwab Todd Crane @@ -2053,19 +1941,14 @@ Tom Fotherby Tom Howe Tom Hulihan Tom Maaswinkel -Tom Parker Tom Sweeney Tom Wilkie Tom X. Tobin -Tom Zhao -Tomas Janousek -Tomas Kral Tomas Tomecek Tomasz Kopczynski Tomasz Lipinski Tomasz Nurkiewicz Tommaso Visconti -Tomoya Tabuchi Tomáš Hrčka Tonny Xu Tony Abboud @@ -2073,11 +1956,10 @@ Tony Daws Tony Miller toogley Torstein Husebø -Toshiaki Makita Tõnis Tiigi Trace Andreason tracylihui <793912329@qq.com> -Trapier Marshall +Trapier Marshall Travis Cline Travis Thieman Trent Ogren @@ -2087,7 +1969,6 @@ Trevor Sullivan Trishna Guha Tristan Carel Troy Denton -Ty Alexander Tycho Andersen Tyler Brock Tyler Brown @@ -2098,7 +1979,6 @@ Umesh Yadav Utz Bacher vagrant Vaidas Jablonskis -Valentin Kulesh vanderliang Velko Ivanov Veres Lajos @@ -2112,7 +1992,6 @@ Victor Palma Victor Vieux Victoria Bialas Vijaya Kumar K -Vikas Choudhary Vikram bir Singh Viktor Stanchev Viktor Vojnovski @@ -2138,7 +2017,6 @@ Vladimir Pouzanov Vladimir Rutsky Vladimir Varankin VladimirAus -Vladislav Kolesnikov Vlastimil Zeman Vojtech Vitek (V-Teq) waitingkuo @@ -2156,7 +2034,6 @@ wanghuaiqing Ward Vandewege WarheadsSE Wassim Dhif -Wataru Ishida Wayne Chang Wayne Song Weerasak Chongnguluam @@ -2191,21 +2068,16 @@ William Thurston Wilson Júnior Wing-Kam Wong WiseTrem -Wolfgang Nagele Wolfgang Powisch Wonjun Kim -WuLonghui xamyzhao Xian Chaobo Xianglin Gao -Xianjie Xianlu Bird Xiao YongBiao -Xiao Zhang XiaoBing Jiang Xiaodong Liu Xiaodong Zhang -Xiaohua Ding Xiaoxi He Xiaoxu Chen Xiaoyu Zhang @@ -2223,9 +2095,7 @@ Yahya YAMADA Tsuyoshi Yamasaki Masahide Yan Feng -Yan Zhu Yang Bai -Yang Li Yang Pengfei yangchenliang Yanqiang Miao @@ -2256,15 +2126,12 @@ Yuanhong Peng Yue Zhang Yuhao Fang Yuichiro Kaneko -YujiOshima Yunxiang Huang Yurii Rashkovskii Yusuf Tarık Günaydın -Yves Blusseau <90z7oey02@sneakemail.com> Yves Junqueira Zac Dover Zach Borboa -Zach Gershman Zachary Jaffee Zain Memon Zaiste! @@ -2280,7 +2147,6 @@ Zhenan Ye <21551168@zju.edu.cn> zhenghenghuo Zhenhai Gao Zhenkun Bi -ZhiPeng Lu zhipengzuo Zhou Hao Zhoulin Xie @@ -2307,4 +2173,3 @@ Zunayed Ali 慕陶 搏通 黄艳红00139573 -정재영 diff --git a/vendor/github.com/docker/docker/api/common.go b/vendor/github.com/docker/docker/api/common.go index bee9b875a88..1565e2af647 100644 --- a/vendor/github.com/docker/docker/api/common.go +++ b/vendor/github.com/docker/docker/api/common.go @@ -3,7 +3,7 @@ package api // import "github.com/docker/docker/api" // Common constants for daemon and client. const ( // DefaultVersion of Current REST API - DefaultVersion = "1.42" + DefaultVersion = "1.41" // NoBaseImageSpecifier is the symbol used by the FROM // command to specify that no base image is to be used. diff --git a/vendor/github.com/docker/docker/api/swagger.yaml b/vendor/github.com/docker/docker/api/swagger.yaml index b8b3ea09465..0bbe7470016 100644 --- a/vendor/github.com/docker/docker/api/swagger.yaml +++ b/vendor/github.com/docker/docker/api/swagger.yaml @@ -19,10 +19,10 @@ produces: consumes: - "application/json" - "text/plain" -basePath: "/v1.42" +basePath: "/v1.41" info: title: "Docker Engine API" - version: "1.42" + version: "1.41" x-logo: url: "https://docs.docker.com/images/logo-docker-main.png" description: | @@ -55,8 +55,8 @@ info: the URL is not supported by the daemon, a HTTP `400 Bad Request` error message is returned. - If you omit the version-prefix, the current version of the API (v1.42) is used. - For example, calling `/info` is the same as calling `/v1.42/info`. Using the + If you omit the version-prefix, the current version of the API (v1.41) is used. + For example, calling `/info` is the same as calling `/v1.41/info`. Using the API without a version-prefix is deprecated and will be removed in a future release. Engine releases in the near future should support this version of the API, @@ -382,13 +382,11 @@ definitions: type: "string" description: | - Empty string means not to restart - - `no` Do not automatically restart - `always` Always restart - `unless-stopped` Restart always except when the user has manually stopped the container - `on-failure` Restart only when the container exit code is non-zero enum: - "" - - "no" - "always" - "unless-stopped" - "on-failure" @@ -746,7 +744,6 @@ definitions: description: | Health stores information about the container's healthcheck results. type: "object" - x-nullable: true properties: Status: description: | @@ -772,13 +769,13 @@ definitions: description: | Log contains the last few results (oldest first) items: + x-nullable: true $ref: "#/definitions/HealthcheckResult" HealthcheckResult: description: | HealthcheckResult stores information about a single run of a healthcheck probe type: "object" - x-nullable: true properties: Start: description: | @@ -1277,7 +1274,7 @@ definitions: type: "object" properties: Bridge: - description: Name of the network's bridge (for example, `docker0`). + description: Name of the network'a bridge (for example, `docker0`). type: "string" example: "docker0" SandboxID: @@ -2191,25 +2188,6 @@ definitions: type: "string" x-nullable: false - PluginPrivilege: - description: | - Describes a permission the user has to accept upon installing - the plugin. - type: "object" - x-go-name: "PluginPrivilege" - properties: - Name: - type: "string" - example: "network" - Description: - type: "string" - Value: - type: "array" - items: - type: "string" - example: - - "host" - Plugin: description: "A plugin for the Engine API" type: "object" @@ -2992,7 +2970,19 @@ definitions: PluginPrivilege: type: "array" items: - $ref: "#/definitions/PluginPrivilege" + description: | + Describes a permission accepted by the user upon installing the + plugin. + type: "object" + properties: + Name: + type: "string" + Description: + type: "string" + Value: + type: "array" + items: + type: "string" ContainerSpec: type: "object" description: | @@ -3357,7 +3347,7 @@ definitions: Limits: description: "Define resources limits." $ref: "#/definitions/Limit" - Reservation: + Reservations: description: "Define resources reservation." $ref: "#/definitions/ResourceObject" RestartPolicy: @@ -4032,71 +4022,73 @@ definitions: Warning: "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" ContainerSummary: - type: "object" - properties: - Id: - description: "The ID of this container" - type: "string" - x-go-name: "ID" - Names: - description: "The names that this container has been given" - type: "array" - items: + type: "array" + items: + type: "object" + properties: + Id: + description: "The ID of this container" type: "string" - Image: - description: "The name of the image used when creating this container" - type: "string" - ImageID: - description: "The ID of the image that this container was created from" - type: "string" - Command: - description: "Command to run when starting the container" - type: "string" - Created: - description: "When the container was created" - type: "integer" - format: "int64" - Ports: - description: "The ports exposed by this container" - type: "array" - items: - $ref: "#/definitions/Port" - SizeRw: - description: "The size of files that have been created or changed by this container" - type: "integer" - format: "int64" - SizeRootFs: - description: "The total size of all the files in this container" - type: "integer" - format: "int64" - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: + x-go-name: "ID" + Names: + description: "The names that this container has been given" + type: "array" + items: + type: "string" + Image: + description: "The name of the image used when creating this container" type: "string" - State: - description: "The state of this container (e.g. `Exited`)" - type: "string" - Status: - description: "Additional human-readable status of this container (e.g. `Exit 0`)" - type: "string" - HostConfig: - type: "object" - properties: - NetworkMode: + ImageID: + description: "The ID of the image that this container was created from" + type: "string" + Command: + description: "Command to run when starting the container" + type: "string" + Created: + description: "When the container was created" + type: "integer" + format: "int64" + Ports: + description: "The ports exposed by this container" + type: "array" + items: + $ref: "#/definitions/Port" + SizeRw: + description: "The size of files that have been created or changed by this container" + type: "integer" + format: "int64" + SizeRootFs: + description: "The total size of all the files in this container" + type: "integer" + format: "int64" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: type: "string" - NetworkSettings: - description: "A summary of the container's network settings" - type: "object" - properties: - Networks: - type: "object" - additionalProperties: - $ref: "#/definitions/EndpointSettings" - Mounts: - type: "array" - items: - $ref: "#/definitions/Mount" + State: + description: "The state of this container (e.g. `Exited`)" + type: "string" + Status: + description: "Additional human-readable status of this container (e.g. `Exit 0`)" + type: "string" + HostConfig: + type: "object" + properties: + NetworkMode: + type: "string" + NetworkSettings: + description: "A summary of the container's network settings" + type: "object" + properties: + Networks: + type: "object" + additionalProperties: + $ref: "#/definitions/EndpointSettings" + Mounts: + type: "array" + items: + $ref: "#/definitions/Mount" Driver: description: "Driver represents a driver (network, logging, secrets)." @@ -4218,7 +4210,6 @@ definitions: ContainerState stores container's running state. It's part of ContainerJSONBase and will be returned by the "inspect" command. type: "object" - x-nullable: true properties: Status: description: | @@ -4276,6 +4267,7 @@ definitions: type: "string" example: "2020-01-06T09:07:59.461876391Z" Health: + x-nullable: true $ref: "#/definitions/Health" SystemVersion: @@ -4374,6 +4366,7 @@ definitions: type: "string" example: "2020-06-22T15:49:27.000000000+00:00" + SystemInfo: type: "object" properties: @@ -5206,158 +5199,6 @@ definitions: additionalProperties: type: "string" - EventActor: - description: | - Actor describes something that generates events, like a container, network, - or a volume. - type: "object" - properties: - ID: - description: "The ID of the object emitting the event" - type: "string" - example: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" - Attributes: - description: | - Various key/value attributes of the object, depending on its type. - type: "object" - additionalProperties: - type: "string" - example: - com.example.some-label: "some-label-value" - image: "alpine:latest" - name: "my-container" - - EventMessage: - description: | - EventMessage represents the information an event contains. - type: "object" - title: "SystemEventsResponse" - properties: - Type: - description: "The type of object emitting the event" - type: "string" - enum: ["builder", "config", "container", "daemon", "image", "network", "node", "plugin", "secret", "service", "volume"] - example: "container" - Action: - description: "The type of event" - type: "string" - example: "create" - Actor: - $ref: "#/definitions/EventActor" - scope: - description: | - Scope of the event. Engine events are `local` scope. Cluster (Swarm) - events are `swarm` scope. - type: "string" - enum: ["local", "swarm"] - time: - description: "Timestamp of event" - type: "integer" - format: "int64" - example: 1629574695 - timeNano: - description: "Timestamp of event, with nanosecond accuracy" - type: "integer" - format: "int64" - example: 1629574695515050031 - - OCIDescriptor: - type: "object" - x-go-name: Descriptor - description: | - A descriptor struct containing digest, media type, and size, as defined in - the [OCI Content Descriptors Specification](https://github.com/opencontainers/image-spec/blob/v1.0.1/descriptor.md). - properties: - mediaType: - description: | - The media type of the object this schema refers to. - type: "string" - example: "application/vnd.docker.distribution.manifest.v2+json" - digest: - description: | - The digest of the targeted content. - type: "string" - example: "sha256:c0537ff6a5218ef531ece93d4984efc99bbf3f7497c0a7726c88e2bb7584dc96" - size: - description: | - The size in bytes of the blob. - type: "integer" - format: "int64" - example: 3987495 - # TODO Not yet including these fields for now, as they are nil / omitted in our response. - # urls: - # description: | - # List of URLs from which this object MAY be downloaded. - # type: "array" - # items: - # type: "string" - # format: "uri" - # annotations: - # description: | - # Arbitrary metadata relating to the targeted content. - # type: "object" - # additionalProperties: - # type: "string" - # platform: - # $ref: "#/definitions/OCIPlatform" - - OCIPlatform: - type: "object" - x-go-name: Platform - description: | - Describes the platform which the image in the manifest runs on, as defined - in the [OCI Image Index Specification](https://github.com/opencontainers/image-spec/blob/v1.0.1/image-index.md). - properties: - architecture: - description: | - The CPU architecture, for example `amd64` or `ppc64`. - type: "string" - example: "arm" - os: - description: | - The operating system, for example `linux` or `windows`. - type: "string" - example: "windows" - os.version: - description: | - Optional field specifying the operating system version, for example on - Windows `10.0.19041.1165`. - type: "string" - example: "10.0.19041.1165" - os.features: - description: | - Optional field specifying an array of strings, each listing a required - OS feature (for example on Windows `win32k`). - type: "array" - items: - type: "string" - example: - - "win32k" - variant: - description: | - Optional field specifying a variant of the CPU, for example `v7` to - specify ARMv7 when architecture is `arm`. - type: "string" - example: "v7" - - DistributionInspect: - type: "object" - x-go-name: DistributionInspect - title: "DistributionInspectResponse" - required: [Descriptor, Platforms] - description: | - Describes the result obtained from contacting the registry to retrieve - image metadata. - properties: - Descriptor: - $ref: "#/definitions/OCIDescriptor" - Platforms: - type: "array" - description: | - An array containing all platforms supported by the image. - items: - $ref: "#/definitions/OCIPlatform" - paths: /containers/json: get: @@ -5420,9 +5261,7 @@ paths: 200: description: "no error" schema: - type: "array" - items: - $ref: "#/definitions/ContainerSummary" + $ref: "#/definitions/ContainerSummary" examples: application/json: - Id: "8dfafdbc3a40" @@ -5788,6 +5627,7 @@ paths: items: type: "string" State: + x-nullable: true $ref: "#/definitions/ContainerState" Image: description: "The container's image ID" @@ -5915,7 +5755,6 @@ paths: property1: "string" property2: "string" IpcMode: "" - LxcConf: [] Memory: 0 MemorySwap: 0 MemoryReservation: 0 @@ -7362,11 +7201,6 @@ paths: - `reference`=(`[:]`) - `since`=(`[:]`, `` or ``) type: "string" - - name: "shared-size" - in: "query" - description: "Compute and show shared size as a `SharedSize` field on each image." - type: "boolean" - default: false - name: "digests" in: "query" description: "Show digest information as a `RepoDigests` field on each image." @@ -7665,18 +7499,6 @@ paths: Refer to the [authentication section](#section/Authentication) for details. type: "string" - - name: "changes" - in: "query" - description: | - Apply `Dockerfile` instructions to the image that is created, - for example: `changes=ENV DEBUG=true`. - Note that `ENV DEBUG=true` should be URI component encoded. - - Supported `Dockerfile` instructions: - `CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR` - type: "array" - items: - type: "string" - name: "platform" in: "query" description: "Platform in the format os[/arch[/variant]]" @@ -8351,7 +8173,44 @@ paths: 200: description: "no error" schema: - $ref: "#/definitions/EventMessage" + type: "object" + title: "SystemEventsResponse" + properties: + Type: + description: "The type of object emitting the event" + type: "string" + Action: + description: "The type of event" + type: "string" + Actor: + type: "object" + properties: + ID: + description: "The ID of the object emitting the event" + type: "string" + Attributes: + description: "Various key/value attributes of the object, depending on its type" + type: "object" + additionalProperties: + type: "string" + time: + description: "Timestamp of event" + type: "integer" + timeNano: + description: "Timestamp of event, with nanosecond accuracy" + type: "integer" + format: "int64" + examples: + application/json: + Type: "container" + Action: "create" + Actor: + ID: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" + Attributes: + com.example.some-label: "some-label-value" + image: "alpine" + name: "my-container" + time: 1461943101 400: description: "bad parameter" schema: @@ -8479,43 +8338,10 @@ paths: UsageData: Size: 10920104 RefCount: 2 - BuildCache: - - - ID: "hw53o5aio51xtltp5xjp8v7fx" - Parent: "" - Type: "regular" - Description: "pulled from docker.io/library/debian@sha256:234cb88d3020898631af0ccbbcca9a66ae7306ecd30c9720690858c1b007d2a0" - InUse: false - Shared: true - Size: 0 - CreatedAt: "2021-06-28T13:31:01.474619385Z" - LastUsedAt: "2021-07-07T22:02:32.738075951Z" - UsageCount: 26 - - - ID: "ndlpt0hhvkqcdfkputsk4cq9c" - Parent: "hw53o5aio51xtltp5xjp8v7fx" - Type: "regular" - Description: "mount / from exec /bin/sh -c echo 'Binary::apt::APT::Keep-Downloaded-Packages \"true\";' > /etc/apt/apt.conf.d/keep-cache" - InUse: false - Shared: true - Size: 51 - CreatedAt: "2021-06-28T13:31:03.002625487Z" - LastUsedAt: "2021-07-07T22:02:32.773909517Z" - UsageCount: 26 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" - parameters: - - name: "type" - in: "query" - description: | - Object types, for which to compute and return data. - type: "array" - collectionFormat: multi - items: - type: "string" - enum: ["container", "image", "volume", "build-cache"] tags: ["System"] /images/{name}/get: get: @@ -8666,7 +8492,6 @@ paths: description: "Exec configuration" schema: type: "object" - title: "ExecConfig" properties: AttachStdin: type: "boolean" @@ -8757,7 +8582,6 @@ paths: in: "body" schema: type: "object" - title: "ExecStartConfig" properties: Detach: type: "boolean" @@ -8782,12 +8606,20 @@ paths: if `tty` was specified as part of creating and starting the exec instance. operationId: "ExecResize" responses: - 201: + 200: description: "No error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" 404: description: "No such exec instance" schema: $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" @@ -9292,7 +9124,6 @@ paths: required: true schema: type: "object" - title: "NetworkCreateRequest" required: ["Name"] properties: Name: @@ -9403,7 +9234,6 @@ paths: required: true schema: type: "object" - title: "NetworkConnectRequest" properties: Container: type: "string" @@ -9450,7 +9280,6 @@ paths: required: true schema: type: "object" - title: "NetworkDisconnectRequest" properties: Container: type: "string" @@ -9535,7 +9364,20 @@ paths: schema: type: "array" items: - $ref: "#/definitions/PluginPrivilege" + description: | + Describes a permission the user has to accept upon installing + the plugin. + type: "object" + title: "PluginPrivilegeItem" + properties: + Name: + type: "string" + Description: + type: "string" + Value: + type: "array" + items: + type: "string" example: - Name: "network" Description: "" @@ -9611,7 +9453,19 @@ paths: schema: type: "array" items: - $ref: "#/definitions/PluginPrivilege" + description: | + Describes a permission accepted by the user upon installing the + plugin. + type: "object" + properties: + Name: + type: "string" + Description: + type: "string" + Value: + type: "array" + items: + type: "string" example: - Name: "network" Description: "" @@ -9783,7 +9637,19 @@ paths: schema: type: "array" items: - $ref: "#/definitions/PluginPrivilege" + description: | + Describes a permission accepted by the user upon installing the + plugin. + type: "object" + properties: + Name: + type: "string" + Description: + type: "string" + Value: + type: "array" + items: + type: "string" example: - Name: "network" Description: "" @@ -10073,7 +9939,6 @@ paths: required: true schema: type: "object" - title: "SwarmInitRequest" properties: ListenAddr: description: | @@ -10172,7 +10037,6 @@ paths: required: true schema: type: "object" - title: "SwarmJoinRequest" properties: ListenAddr: description: | @@ -10193,7 +10057,7 @@ paths: description: | Address or interface to use for data path traffic (format: ``), for example, `192.168.1.1`, or an interface, - like `eth0`. If `DataPathAddr` is unspecified, the same address + like `eth0`. If `DataPathAddr` is unspecified, the same addres as `AdvertiseAddr` is used. The `DataPathAddr` specifies the address that global scope @@ -10333,7 +10197,6 @@ paths: required: true schema: type: "object" - title: "SwarmUnlockRequest" properties: UnlockKey: description: "The swarm's unlock key." @@ -11445,7 +11308,67 @@ paths: 200: description: "descriptor and platform information" schema: - $ref: "#/definitions/DistributionInspect" + type: "object" + x-go-name: DistributionInspect + title: "DistributionInspectResponse" + required: [Descriptor, Platforms] + properties: + Descriptor: + type: "object" + description: | + A descriptor struct containing digest, media type, and size. + properties: + MediaType: + type: "string" + Size: + type: "integer" + format: "int64" + Digest: + type: "string" + URLs: + type: "array" + items: + type: "string" + Platforms: + type: "array" + description: | + An array containing all platforms supported by the image. + items: + type: "object" + properties: + Architecture: + type: "string" + OS: + type: "string" + OSVersion: + type: "string" + OSFeatures: + type: "array" + items: + type: "string" + Variant: + type: "string" + Features: + type: "array" + items: + type: "string" + examples: + application/json: + Descriptor: + MediaType: "application/vnd.docker.distribution.manifest.v2+json" + Digest: "sha256:c0537ff6a5218ef531ece93d4984efc99bbf3f7497c0a7726c88e2bb7584dc96" + Size: 3987495 + URLs: + - "" + Platforms: + - Architecture: "amd64" + OS: "linux" + OSVersion: "" + OSFeatures: + - "" + Variant: "" + Features: + - "" 401: description: "Failed authentication or no image found" schema: diff --git a/vendor/github.com/docker/docker/api/types/client.go b/vendor/github.com/docker/docker/api/types/client.go index e3c06cef691..9c464b73e25 100644 --- a/vendor/github.com/docker/docker/api/types/client.go +++ b/vendor/github.com/docker/docker/api/types/client.go @@ -59,6 +59,7 @@ type ContainerExecInspect struct { // ContainerListOptions holds parameters to list containers with. type ContainerListOptions struct { + Quiet bool Size bool All bool Latest bool @@ -235,20 +236,10 @@ type ImageImportOptions struct { Platform string // Platform is the target platform of the image } -// ImageListOptions holds parameters to list images with. +// ImageListOptions holds parameters to filter the list of images with. type ImageListOptions struct { - // All controls whether all images in the graph are filtered, or just - // the heads. - All bool - - // Filters is a JSON-encoded set of filter arguments. + All bool Filters filters.Args - - // SharedSize indicates whether the shared size of images should be computed. - SharedSize bool - - // ContainerCount indicates whether container count should be computed. - ContainerCount bool } // ImageLoadResponse returns information to the client about a load process. diff --git a/vendor/github.com/docker/docker/api/types/container/host_config.go b/vendor/github.com/docker/docker/api/types/container/host_config.go index dcea6c8a5ae..2d1cbaa9abd 100644 --- a/vendor/github.com/docker/docker/api/types/container/host_config.go +++ b/vendor/github.com/docker/docker/api/types/container/host_config.go @@ -13,26 +13,19 @@ import ( // CgroupnsMode represents the cgroup namespace mode of the container type CgroupnsMode string -// cgroup namespace modes for containers -const ( - CgroupnsModeEmpty CgroupnsMode = "" - CgroupnsModePrivate CgroupnsMode = "private" - CgroupnsModeHost CgroupnsMode = "host" -) - // IsPrivate indicates whether the container uses its own private cgroup namespace func (c CgroupnsMode) IsPrivate() bool { - return c == CgroupnsModePrivate + return c == "private" } // IsHost indicates whether the container shares the host's cgroup namespace func (c CgroupnsMode) IsHost() bool { - return c == CgroupnsModeHost + return c == "host" } // IsEmpty indicates whether the container cgroup namespace mode is unset func (c CgroupnsMode) IsEmpty() bool { - return c == CgroupnsModeEmpty + return c == "" } // Valid indicates whether the cgroup namespace mode is valid @@ -44,69 +37,60 @@ func (c CgroupnsMode) Valid() bool { // values are platform specific type Isolation string -// Isolation modes for containers -const ( - IsolationEmpty Isolation = "" // IsolationEmpty is unspecified (same behavior as default) - IsolationDefault Isolation = "default" // IsolationDefault is the default isolation mode on current daemon - IsolationProcess Isolation = "process" // IsolationProcess is process isolation mode - IsolationHyperV Isolation = "hyperv" // IsolationHyperV is HyperV isolation mode -) - // IsDefault indicates the default isolation technology of a container. On Linux this // is the native driver. On Windows, this is a Windows Server Container. func (i Isolation) IsDefault() bool { - // TODO consider making isolation-mode strict (case-sensitive) - v := Isolation(strings.ToLower(string(i))) - return v == IsolationDefault || v == IsolationEmpty + return strings.ToLower(string(i)) == "default" || string(i) == "" } // IsHyperV indicates the use of a Hyper-V partition for isolation func (i Isolation) IsHyperV() bool { - // TODO consider making isolation-mode strict (case-sensitive) - return Isolation(strings.ToLower(string(i))) == IsolationHyperV + return strings.ToLower(string(i)) == "hyperv" } // IsProcess indicates the use of process isolation func (i Isolation) IsProcess() bool { - // TODO consider making isolation-mode strict (case-sensitive) - return Isolation(strings.ToLower(string(i))) == IsolationProcess + return strings.ToLower(string(i)) == "process" } -// IpcMode represents the container ipc stack. -type IpcMode string - -// IpcMode constants const ( - IPCModeNone IpcMode = "none" - IPCModeHost IpcMode = "host" - IPCModeContainer IpcMode = "container" - IPCModePrivate IpcMode = "private" - IPCModeShareable IpcMode = "shareable" + // IsolationEmpty is unspecified (same behavior as default) + IsolationEmpty = Isolation("") + // IsolationDefault is the default isolation mode on current daemon + IsolationDefault = Isolation("default") + // IsolationProcess is process isolation mode + IsolationProcess = Isolation("process") + // IsolationHyperV is HyperV isolation mode + IsolationHyperV = Isolation("hyperv") ) +// IpcMode represents the container ipc stack. +type IpcMode string + // IsPrivate indicates whether the container uses its own private ipc namespace which can not be shared. func (n IpcMode) IsPrivate() bool { - return n == IPCModePrivate + return n == "private" } // IsHost indicates whether the container shares the host's ipc namespace. func (n IpcMode) IsHost() bool { - return n == IPCModeHost + return n == "host" } // IsShareable indicates whether the container's ipc namespace can be shared with another container. func (n IpcMode) IsShareable() bool { - return n == IPCModeShareable + return n == "shareable" } // IsContainer indicates whether the container uses another container's ipc namespace. func (n IpcMode) IsContainer() bool { - return strings.HasPrefix(string(n), string(IPCModeContainer)+":") + parts := strings.SplitN(string(n), ":", 2) + return len(parts) > 1 && parts[0] == "container" } // IsNone indicates whether container IpcMode is set to "none". func (n IpcMode) IsNone() bool { - return n == IPCModeNone + return n == "none" } // IsEmpty indicates whether container IpcMode is empty @@ -121,8 +105,9 @@ func (n IpcMode) Valid() bool { // Container returns the name of the container ipc stack is going to be used. func (n IpcMode) Container() string { - if n.IsContainer() { - return strings.TrimPrefix(string(n), string(IPCModeContainer)+":") + parts := strings.SplitN(string(n), ":", 2) + if len(parts) > 1 && parts[0] == "container" { + return parts[1] } return "" } @@ -341,7 +326,7 @@ type LogMode string // Available logging modes const ( - LogModeUnset LogMode = "" + LogModeUnset = "" LogModeBlocking LogMode = "blocking" LogModeNonBlock LogMode = "non-blocking" ) diff --git a/vendor/github.com/docker/docker/api/types/events/events.go b/vendor/github.com/docker/docker/api/types/events/events.go index 9fe07e26fd2..aa8fba81548 100644 --- a/vendor/github.com/docker/docker/api/types/events/events.go +++ b/vendor/github.com/docker/docker/api/types/events/events.go @@ -1,26 +1,33 @@ package events // import "github.com/docker/docker/api/types/events" -// Type is used for event-types. -type Type = string - -// List of known event types. const ( - BuilderEventType Type = "builder" // BuilderEventType is the event type that the builder generates. - ConfigEventType Type = "config" // ConfigEventType is the event type that configs generate. - ContainerEventType Type = "container" // ContainerEventType is the event type that containers generate. - DaemonEventType Type = "daemon" // DaemonEventType is the event type that daemon generate. - ImageEventType Type = "image" // ImageEventType is the event type that images generate. - NetworkEventType Type = "network" // NetworkEventType is the event type that networks generate. - NodeEventType Type = "node" // NodeEventType is the event type that nodes generate. - PluginEventType Type = "plugin" // PluginEventType is the event type that plugins generate. - SecretEventType Type = "secret" // SecretEventType is the event type that secrets generate. - ServiceEventType Type = "service" // ServiceEventType is the event type that services generate. - VolumeEventType Type = "volume" // VolumeEventType is the event type that volumes generate. + // BuilderEventType is the event type that the builder generates + BuilderEventType = "builder" + // ContainerEventType is the event type that containers generate + ContainerEventType = "container" + // DaemonEventType is the event type that daemon generate + DaemonEventType = "daemon" + // ImageEventType is the event type that images generate + ImageEventType = "image" + // NetworkEventType is the event type that networks generate + NetworkEventType = "network" + // PluginEventType is the event type that plugins generate + PluginEventType = "plugin" + // VolumeEventType is the event type that volumes generate + VolumeEventType = "volume" + // ServiceEventType is the event type that services generate + ServiceEventType = "service" + // NodeEventType is the event type that nodes generate + NodeEventType = "node" + // SecretEventType is the event type that secrets generate + SecretEventType = "secret" + // ConfigEventType is the event type that configs generate + ConfigEventType = "config" ) // Actor describes something that generates events, // like a container, or a network, or a volume. -// It has a defined name and a set of attributes. +// It has a defined name and a set or attributes. // The container attributes are its labels, other actors // can generate these attributes from other properties. type Actor struct { @@ -32,11 +39,11 @@ type Actor struct { type Message struct { // Deprecated information from JSONMessage. // With data only in container events. - Status string `json:"status,omitempty"` // Deprecated: use Action instead. - ID string `json:"id,omitempty"` // Deprecated: use Actor.ID instead. - From string `json:"from,omitempty"` // Deprecated: use Actor.Attributes["image"] instead. + Status string `json:"status,omitempty"` + ID string `json:"id,omitempty"` + From string `json:"from,omitempty"` - Type Type + Type string Action string Actor Actor // Engine events are local scope. Cluster events are swarm scope. diff --git a/vendor/github.com/docker/docker/api/types/types.go b/vendor/github.com/docker/docker/api/types/types.go index a186550d724..e3a159912e2 100644 --- a/vendor/github.com/docker/docker/api/types/types.go +++ b/vendor/github.com/docker/docker/api/types/types.go @@ -212,12 +212,7 @@ type Info struct { SecurityOptions []string ProductLicense string `json:",omitempty"` DefaultAddressPools []NetworkAddressPool `json:",omitempty"` - - // Warnings contains a slice of warnings that occurred while collecting - // system information. These warnings are intended to be informational - // messages for the user, and are not intended to be parsed / used for - // other purposes, as they do not have a fixed format. - Warnings []string + Warnings []string } // KeyValue holds a key/value pair @@ -535,27 +530,6 @@ type ShimConfig struct { Opts interface{} } -// DiskUsageObject represents an object type used for disk usage query filtering. -type DiskUsageObject string - -const ( - // ContainerObject represents a container DiskUsageObject. - ContainerObject DiskUsageObject = "container" - // ImageObject represents an image DiskUsageObject. - ImageObject DiskUsageObject = "image" - // VolumeObject represents a volume DiskUsageObject. - VolumeObject DiskUsageObject = "volume" - // BuildCacheObject represents a build-cache DiskUsageObject. - BuildCacheObject DiskUsageObject = "build-cache" -) - -// DiskUsageOptions holds parameters for system disk usage query. -type DiskUsageOptions struct { - // Types specifies what object types to include in the response. If empty, - // all object types are returned. - Types []DiskUsageObject -} - // DiskUsage contains response of Engine API: // GET "/system/df" type DiskUsage struct { @@ -564,7 +538,7 @@ type DiskUsage struct { Containers []*Container Volumes []*Volume BuildCache []*BuildCache - BuilderSize int64 `json:",omitempty"` // Deprecated: deprecated in API 1.38, and no longer used since API 1.40. + BuilderSize int64 // deprecated } // ContainersPruneReport contains the response for Engine API: diff --git a/vendor/github.com/docker/docker/api/types/versions/compare.go b/vendor/github.com/docker/docker/api/types/versions/compare.go index 489e917ee52..8ccb0aa92eb 100644 --- a/vendor/github.com/docker/docker/api/types/versions/compare.go +++ b/vendor/github.com/docker/docker/api/types/versions/compare.go @@ -8,9 +8,6 @@ import ( // compare compares two version strings // returns -1 if v1 < v2, 1 if v1 > v2, 0 otherwise. func compare(v1, v2 string) int { - if v1 == v2 { - return 0 - } var ( currTab = strings.Split(v1, ".") otherTab = strings.Split(v2, ".") diff --git a/vendor/github.com/docker/docker/client/build_cancel.go b/vendor/github.com/docker/docker/client/build_cancel.go index b76bf366bb9..3aae43e3d17 100644 --- a/vendor/github.com/docker/docker/client/build_cancel.go +++ b/vendor/github.com/docker/docker/client/build_cancel.go @@ -5,7 +5,7 @@ import ( "net/url" ) -// BuildCancel requests the daemon to cancel the ongoing build request. +// BuildCancel requests the daemon to cancel ongoing build request func (cli *Client) BuildCancel(ctx context.Context, id string) error { query := url.Values{} query.Set("id", id) diff --git a/vendor/github.com/docker/docker/client/client.go b/vendor/github.com/docker/docker/client/client.go index fa511897e0e..21edf1fa1fc 100644 --- a/vendor/github.com/docker/docker/client/client.go +++ b/vendor/github.com/docker/docker/client/client.go @@ -281,6 +281,21 @@ func ParseHostURL(host string) (*url.URL, error) { }, nil } +// CustomHTTPHeaders returns the custom http headers stored by the client. +func (cli *Client) CustomHTTPHeaders() map[string]string { + m := make(map[string]string) + for k, v := range cli.customHTTPHeaders { + m[k] = v + } + return m +} + +// SetCustomHTTPHeaders that will be set on every HTTP request made by the client. +// Deprecated: use WithHTTPHeaders when creating the client. +func (cli *Client) SetCustomHTTPHeaders(headers map[string]string) { + cli.customHTTPHeaders = headers +} + // Dialer returns a dialer for a raw stream connection, with HTTP/1.1 header, that can be used for proxying the daemon connection. // Used by `docker dial-stdio` (docker/cli#889). func (cli *Client) Dialer() func(context.Context) (net.Conn, error) { diff --git a/vendor/github.com/docker/docker/client/config_create.go b/vendor/github.com/docker/docker/client/config_create.go index f6b1881fc36..ee7d411df06 100644 --- a/vendor/github.com/docker/docker/client/config_create.go +++ b/vendor/github.com/docker/docker/client/config_create.go @@ -8,7 +8,7 @@ import ( "github.com/docker/docker/api/types/swarm" ) -// ConfigCreate creates a new config. +// ConfigCreate creates a new Config. func (cli *Client) ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (types.ConfigCreateResponse, error) { var response types.ConfigCreateResponse if err := cli.NewVersionError("1.30", "config create"); err != nil { diff --git a/vendor/github.com/docker/docker/client/config_inspect.go b/vendor/github.com/docker/docker/client/config_inspect.go index f1b0d7f7536..7d0ce3e11c0 100644 --- a/vendor/github.com/docker/docker/client/config_inspect.go +++ b/vendor/github.com/docker/docker/client/config_inspect.go @@ -4,7 +4,7 @@ import ( "bytes" "context" "encoding/json" - "io" + "io/ioutil" "github.com/docker/docker/api/types/swarm" ) @@ -23,7 +23,7 @@ func (cli *Client) ConfigInspectWithRaw(ctx context.Context, id string) (swarm.C return swarm.Config{}, nil, wrapResponseError(err, resp, "config", id) } - body, err := io.ReadAll(resp.body) + body, err := ioutil.ReadAll(resp.body) if err != nil { return swarm.Config{}, nil, err } diff --git a/vendor/github.com/docker/docker/client/config_remove.go b/vendor/github.com/docker/docker/client/config_remove.go index 93de0d8445b..a708fcaecfd 100644 --- a/vendor/github.com/docker/docker/client/config_remove.go +++ b/vendor/github.com/docker/docker/client/config_remove.go @@ -2,7 +2,7 @@ package client // import "github.com/docker/docker/client" import "context" -// ConfigRemove removes a config. +// ConfigRemove removes a Config. func (cli *Client) ConfigRemove(ctx context.Context, id string) error { if err := cli.NewVersionError("1.30", "config remove"); err != nil { return err diff --git a/vendor/github.com/docker/docker/client/config_update.go b/vendor/github.com/docker/docker/client/config_update.go index ba79ae64e59..39e59cf8589 100644 --- a/vendor/github.com/docker/docker/client/config_update.go +++ b/vendor/github.com/docker/docker/client/config_update.go @@ -8,7 +8,7 @@ import ( "github.com/docker/docker/api/types/swarm" ) -// ConfigUpdate attempts to update a config +// ConfigUpdate attempts to update a Config func (cli *Client) ConfigUpdate(ctx context.Context, id string, version swarm.Version, config swarm.ConfigSpec) error { if err := cli.NewVersionError("1.30", "config update"); err != nil { return err diff --git a/vendor/github.com/docker/docker/client/container_commit.go b/vendor/github.com/docker/docker/client/container_commit.go index cd7f7634646..2966e88c8ec 100644 --- a/vendor/github.com/docker/docker/client/container_commit.go +++ b/vendor/github.com/docker/docker/client/container_commit.go @@ -10,7 +10,7 @@ import ( "github.com/docker/docker/api/types" ) -// ContainerCommit applies changes to a container and creates a new tagged image. +// ContainerCommit applies changes into a container and creates a new tagged image. func (cli *Client) ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.IDResponse, error) { var repository, tag string if options.Reference != "" { diff --git a/vendor/github.com/docker/docker/client/container_copy.go b/vendor/github.com/docker/docker/client/container_copy.go index c0a47c14e31..bb278bf7f32 100644 --- a/vendor/github.com/docker/docker/client/container_copy.go +++ b/vendor/github.com/docker/docker/client/container_copy.go @@ -14,7 +14,7 @@ import ( "github.com/docker/docker/api/types" ) -// ContainerStatPath returns stat information about a path inside the container filesystem. +// ContainerStatPath returns Stat information about a path inside the container filesystem. func (cli *Client) ContainerStatPath(ctx context.Context, containerID, path string) (types.ContainerPathStat, error) { query := url.Values{} query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API. diff --git a/vendor/github.com/docker/docker/client/container_create.go b/vendor/github.com/docker/docker/client/container_create.go index 47d15c2bbd6..c5079ee539e 100644 --- a/vendor/github.com/docker/docker/client/container_create.go +++ b/vendor/github.com/docker/docker/client/container_create.go @@ -18,7 +18,7 @@ type configWrapper struct { NetworkingConfig *network.NetworkingConfig } -// ContainerCreate creates a new container based on the given configuration. +// ContainerCreate creates a new container based in the given configuration. // It can be associated with a name, but it's not mandatory. func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, platform *specs.Platform, containerName string) (container.ContainerCreateCreatedBody, error) { var response container.ContainerCreateCreatedBody diff --git a/vendor/github.com/docker/docker/client/container_inspect.go b/vendor/github.com/docker/docker/client/container_inspect.go index 43db32bd973..c496bcffea5 100644 --- a/vendor/github.com/docker/docker/client/container_inspect.go +++ b/vendor/github.com/docker/docker/client/container_inspect.go @@ -4,7 +4,7 @@ import ( "bytes" "context" "encoding/json" - "io" + "io/ioutil" "net/url" "github.com/docker/docker/api/types" @@ -41,7 +41,7 @@ func (cli *Client) ContainerInspectWithRaw(ctx context.Context, containerID stri return types.ContainerJSON{}, nil, wrapResponseError(err, serverResp, "container", containerID) } - body, err := io.ReadAll(serverResp.body) + body, err := ioutil.ReadAll(serverResp.body) if err != nil { return types.ContainerJSON{}, nil, err } diff --git a/vendor/github.com/docker/docker/client/container_restart.go b/vendor/github.com/docker/docker/client/container_restart.go index aa0d6485de3..41e421969f4 100644 --- a/vendor/github.com/docker/docker/client/container_restart.go +++ b/vendor/github.com/docker/docker/client/container_restart.go @@ -9,7 +9,7 @@ import ( ) // ContainerRestart stops and starts a container again. -// It makes the daemon wait for the container to be up again for +// It makes the daemon to wait for the container to be up again for // a specific amount of time, given the timeout. func (cli *Client) ContainerRestart(ctx context.Context, containerID string, timeout *time.Duration) error { query := url.Values{} diff --git a/vendor/github.com/docker/docker/client/container_update.go b/vendor/github.com/docker/docker/client/container_update.go index bf68a5300e9..6917cf9fb36 100644 --- a/vendor/github.com/docker/docker/client/container_update.go +++ b/vendor/github.com/docker/docker/client/container_update.go @@ -7,7 +7,7 @@ import ( "github.com/docker/docker/api/types/container" ) -// ContainerUpdate updates the resources of a container. +// ContainerUpdate updates resources of a container func (cli *Client) ContainerUpdate(ctx context.Context, containerID string, updateConfig container.UpdateConfig) (container.ContainerUpdateOKBody, error) { var response container.ContainerUpdateOKBody serverResp, err := cli.post(ctx, "/containers/"+containerID+"/update", nil, updateConfig, nil) diff --git a/vendor/github.com/docker/docker/client/disk_usage.go b/vendor/github.com/docker/docker/client/disk_usage.go index ba0d92e9e66..354cd36939a 100644 --- a/vendor/github.com/docker/docker/client/disk_usage.go +++ b/vendor/github.com/docker/docker/client/disk_usage.go @@ -4,30 +4,23 @@ import ( "context" "encoding/json" "fmt" - "net/url" "github.com/docker/docker/api/types" ) // DiskUsage requests the current data usage from the daemon -func (cli *Client) DiskUsage(ctx context.Context, options types.DiskUsageOptions) (types.DiskUsage, error) { - var query url.Values - if len(options.Types) > 0 { - query = url.Values{} - for _, t := range options.Types { - query.Add("type", string(t)) - } - } +func (cli *Client) DiskUsage(ctx context.Context) (types.DiskUsage, error) { + var du types.DiskUsage - serverResp, err := cli.get(ctx, "/system/df", query, nil) + serverResp, err := cli.get(ctx, "/system/df", nil, nil) defer ensureReaderClosed(serverResp) if err != nil { - return types.DiskUsage{}, err + return du, err } - var du types.DiskUsage if err := json.NewDecoder(serverResp.body).Decode(&du); err != nil { - return types.DiskUsage{}, fmt.Errorf("Error retrieving disk usage: %v", err) + return du, fmt.Errorf("Error retrieving disk usage: %v", err) } + return du, nil } diff --git a/vendor/github.com/docker/docker/client/distribution_inspect.go b/vendor/github.com/docker/docker/client/distribution_inspect.go index 7f36c99a016..f4e3794cb4c 100644 --- a/vendor/github.com/docker/docker/client/distribution_inspect.go +++ b/vendor/github.com/docker/docker/client/distribution_inspect.go @@ -8,7 +8,7 @@ import ( registrytypes "github.com/docker/docker/api/types/registry" ) -// DistributionInspect returns the image digest with the full manifest. +// DistributionInspect returns the image digest with full Manifest func (cli *Client) DistributionInspect(ctx context.Context, image, encodedRegistryAuth string) (registrytypes.DistributionInspect, error) { // Contact the registry to retrieve digest and platform information var distributionInspect registrytypes.DistributionInspect diff --git a/vendor/github.com/docker/docker/client/image_build.go b/vendor/github.com/docker/docker/client/image_build.go index d16e1d8ea98..8fcf995036f 100644 --- a/vendor/github.com/docker/docker/client/image_build.go +++ b/vendor/github.com/docker/docker/client/image_build.go @@ -14,8 +14,8 @@ import ( "github.com/docker/docker/api/types/container" ) -// ImageBuild sends a request to the daemon to build images. -// The Body in the response implements an io.ReadCloser and it's up to the caller to +// ImageBuild sends request to the daemon to build images. +// The Body in the response implement an io.ReadCloser and it's up to the caller to // close it. func (cli *Client) ImageBuild(ctx context.Context, buildContext io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) { query, err := cli.imageBuildOptionsToQuery(options) diff --git a/vendor/github.com/docker/docker/client/image_create.go b/vendor/github.com/docker/docker/client/image_create.go index b1c0227775c..239380474e6 100644 --- a/vendor/github.com/docker/docker/client/image_create.go +++ b/vendor/github.com/docker/docker/client/image_create.go @@ -10,7 +10,7 @@ import ( "github.com/docker/docker/api/types" ) -// ImageCreate creates a new image based on the parent options. +// ImageCreate creates a new image based in the parent options. // It returns the JSON content in the response body. func (cli *Client) ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) { ref, err := reference.ParseNormalizedNamed(parentReference) diff --git a/vendor/github.com/docker/docker/client/image_import.go b/vendor/github.com/docker/docker/client/image_import.go index c5de42cb799..d3336d4106a 100644 --- a/vendor/github.com/docker/docker/client/image_import.go +++ b/vendor/github.com/docker/docker/client/image_import.go @@ -10,7 +10,7 @@ import ( "github.com/docker/docker/api/types" ) -// ImageImport creates a new image based on the source options. +// ImageImport creates a new image based in the source options. // It returns the JSON content in the response body. func (cli *Client) ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) { if ref != "" { diff --git a/vendor/github.com/docker/docker/client/image_inspect.go b/vendor/github.com/docker/docker/client/image_inspect.go index 03aa12d8b4c..1eb8dce0259 100644 --- a/vendor/github.com/docker/docker/client/image_inspect.go +++ b/vendor/github.com/docker/docker/client/image_inspect.go @@ -4,7 +4,7 @@ import ( "bytes" "context" "encoding/json" - "io" + "io/ioutil" "github.com/docker/docker/api/types" ) @@ -20,7 +20,7 @@ func (cli *Client) ImageInspectWithRaw(ctx context.Context, imageID string) (typ return types.ImageInspect{}, nil, wrapResponseError(err, serverResp, "image", imageID) } - body, err := io.ReadAll(serverResp.body) + body, err := ioutil.ReadAll(serverResp.body) if err != nil { return types.ImageInspect{}, nil, err } diff --git a/vendor/github.com/docker/docker/client/image_search.go b/vendor/github.com/docker/docker/client/image_search.go index 5f40a22a964..82955a74775 100644 --- a/vendor/github.com/docker/docker/client/image_search.go +++ b/vendor/github.com/docker/docker/client/image_search.go @@ -12,7 +12,7 @@ import ( "github.com/docker/docker/errdefs" ) -// ImageSearch makes the docker host search by a term in a remote registry. +// ImageSearch makes the docker host to search by a term in a remote registry. // The list of results is not sorted in any fashion. func (cli *Client) ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error) { var results []registry.SearchResult diff --git a/vendor/github.com/docker/docker/client/interface.go b/vendor/github.com/docker/docker/client/interface.go index 7277d1bbdd2..aabad4a9110 100644 --- a/vendor/github.com/docker/docker/client/interface.go +++ b/vendor/github.com/docker/docker/client/interface.go @@ -168,7 +168,7 @@ type SystemAPIClient interface { Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error) Info(ctx context.Context) (types.Info, error) RegistryLogin(ctx context.Context, auth types.AuthConfig) (registry.AuthenticateOKBody, error) - DiskUsage(ctx context.Context, options types.DiskUsageOptions) (types.DiskUsage, error) + DiskUsage(ctx context.Context) (types.DiskUsage, error) Ping(ctx context.Context) (types.Ping, error) } diff --git a/vendor/github.com/docker/docker/client/network_inspect.go b/vendor/github.com/docker/docker/client/network_inspect.go index ecf20ceb6e4..89a05b3021a 100644 --- a/vendor/github.com/docker/docker/client/network_inspect.go +++ b/vendor/github.com/docker/docker/client/network_inspect.go @@ -4,7 +4,7 @@ import ( "bytes" "context" "encoding/json" - "io" + "io/ioutil" "net/url" "github.com/docker/docker/api/types" @@ -39,7 +39,7 @@ func (cli *Client) NetworkInspectWithRaw(ctx context.Context, networkID string, return networkResource, nil, wrapResponseError(err, resp, "network", networkID) } - body, err := io.ReadAll(resp.body) + body, err := ioutil.ReadAll(resp.body) if err != nil { return networkResource, nil, err } diff --git a/vendor/github.com/docker/docker/client/node_inspect.go b/vendor/github.com/docker/docker/client/node_inspect.go index b58db528567..d296c9fdde3 100644 --- a/vendor/github.com/docker/docker/client/node_inspect.go +++ b/vendor/github.com/docker/docker/client/node_inspect.go @@ -4,7 +4,7 @@ import ( "bytes" "context" "encoding/json" - "io" + "io/ioutil" "github.com/docker/docker/api/types/swarm" ) @@ -20,7 +20,7 @@ func (cli *Client) NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm return swarm.Node{}, nil, wrapResponseError(err, serverResp, "node", nodeID) } - body, err := io.ReadAll(serverResp.body) + body, err := ioutil.ReadAll(serverResp.body) if err != nil { return swarm.Node{}, nil, err } diff --git a/vendor/github.com/docker/docker/client/plugin_inspect.go b/vendor/github.com/docker/docker/client/plugin_inspect.go index 4a90bec51a0..81b89732b03 100644 --- a/vendor/github.com/docker/docker/client/plugin_inspect.go +++ b/vendor/github.com/docker/docker/client/plugin_inspect.go @@ -4,7 +4,7 @@ import ( "bytes" "context" "encoding/json" - "io" + "io/ioutil" "github.com/docker/docker/api/types" ) @@ -20,7 +20,7 @@ func (cli *Client) PluginInspectWithRaw(ctx context.Context, name string) (*type return nil, nil, wrapResponseError(err, resp, "plugin", name) } - body, err := io.ReadAll(resp.body) + body, err := ioutil.ReadAll(resp.body) if err != nil { return nil, nil, err } diff --git a/vendor/github.com/docker/docker/client/request.go b/vendor/github.com/docker/docker/client/request.go index 475ce4bd404..813eac2c9e0 100644 --- a/vendor/github.com/docker/docker/client/request.go +++ b/vendor/github.com/docker/docker/client/request.go @@ -6,6 +6,7 @@ import ( "encoding/json" "fmt" "io" + "io/ioutil" "net" "net/http" "net/url" @@ -109,16 +110,11 @@ func (cli *Client) sendRequest(ctx context.Context, method, path string, query u if err != nil { return serverResponse{}, err } - resp, err := cli.doRequest(ctx, req) - switch { - case errors.Is(err, context.Canceled): - return serverResponse{}, errdefs.Cancelled(err) - case errors.Is(err, context.DeadlineExceeded): - return serverResponse{}, errdefs.Deadline(err) - case err == nil: - err = cli.checkResponseErr(resp) + if err != nil { + return resp, errdefs.FromStatusCode(err, resp.statusCode) } + err = cli.checkResponseErr(resp) return resp, errdefs.FromStatusCode(err, resp.statusCode) } @@ -205,7 +201,7 @@ func (cli *Client) checkResponseErr(serverResp serverResponse) error { R: serverResp.body, N: int64(bodyMax), } - body, err = io.ReadAll(bodyR) + body, err = ioutil.ReadAll(bodyR) if err != nil { return err } @@ -246,8 +242,10 @@ func (cli *Client) addHeaders(req *http.Request, headers headers) *http.Request req.Header.Set(k, v) } - for k, v := range headers { - req.Header[k] = v + if headers != nil { + for k, v := range headers { + req.Header[k] = v + } } return req } @@ -265,7 +263,7 @@ func encodeData(data interface{}) (*bytes.Buffer, error) { func ensureReaderClosed(response serverResponse) { if response.body != nil { // Drain up to 512 bytes and close the body to let the Transport reuse the connection - io.CopyN(io.Discard, response.body, 512) + io.CopyN(ioutil.Discard, response.body, 512) response.body.Close() } } diff --git a/vendor/github.com/docker/docker/client/secret_create.go b/vendor/github.com/docker/docker/client/secret_create.go index c65d38a191f..fd5b914136c 100644 --- a/vendor/github.com/docker/docker/client/secret_create.go +++ b/vendor/github.com/docker/docker/client/secret_create.go @@ -8,7 +8,7 @@ import ( "github.com/docker/docker/api/types/swarm" ) -// SecretCreate creates a new secret. +// SecretCreate creates a new Secret. func (cli *Client) SecretCreate(ctx context.Context, secret swarm.SecretSpec) (types.SecretCreateResponse, error) { var response types.SecretCreateResponse if err := cli.NewVersionError("1.25", "secret create"); err != nil { diff --git a/vendor/github.com/docker/docker/client/secret_inspect.go b/vendor/github.com/docker/docker/client/secret_inspect.go index c07c9550d44..d093916c9a4 100644 --- a/vendor/github.com/docker/docker/client/secret_inspect.go +++ b/vendor/github.com/docker/docker/client/secret_inspect.go @@ -4,7 +4,7 @@ import ( "bytes" "context" "encoding/json" - "io" + "io/ioutil" "github.com/docker/docker/api/types/swarm" ) @@ -23,7 +23,7 @@ func (cli *Client) SecretInspectWithRaw(ctx context.Context, id string) (swarm.S return swarm.Secret{}, nil, wrapResponseError(err, resp, "secret", id) } - body, err := io.ReadAll(resp.body) + body, err := ioutil.ReadAll(resp.body) if err != nil { return swarm.Secret{}, nil, err } diff --git a/vendor/github.com/docker/docker/client/secret_remove.go b/vendor/github.com/docker/docker/client/secret_remove.go index f6c69e57f85..c16f5558041 100644 --- a/vendor/github.com/docker/docker/client/secret_remove.go +++ b/vendor/github.com/docker/docker/client/secret_remove.go @@ -2,7 +2,7 @@ package client // import "github.com/docker/docker/client" import "context" -// SecretRemove removes a secret. +// SecretRemove removes a Secret. func (cli *Client) SecretRemove(ctx context.Context, id string) error { if err := cli.NewVersionError("1.25", "secret remove"); err != nil { return err diff --git a/vendor/github.com/docker/docker/client/secret_update.go b/vendor/github.com/docker/docker/client/secret_update.go index d082dcef75c..164256bbc15 100644 --- a/vendor/github.com/docker/docker/client/secret_update.go +++ b/vendor/github.com/docker/docker/client/secret_update.go @@ -8,7 +8,7 @@ import ( "github.com/docker/docker/api/types/swarm" ) -// SecretUpdate attempts to update a secret. +// SecretUpdate attempts to update a Secret func (cli *Client) SecretUpdate(ctx context.Context, id string, version swarm.Version, secret swarm.SecretSpec) error { if err := cli.NewVersionError("1.25", "secret update"); err != nil { return err diff --git a/vendor/github.com/docker/docker/client/service_create.go b/vendor/github.com/docker/docker/client/service_create.go index a07315f71fe..e0428bf98b3 100644 --- a/vendor/github.com/docker/docker/client/service_create.go +++ b/vendor/github.com/docker/docker/client/service_create.go @@ -13,7 +13,7 @@ import ( "github.com/pkg/errors" ) -// ServiceCreate creates a new service. +// ServiceCreate creates a new Service. func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (types.ServiceCreateResponse, error) { var response types.ServiceCreateResponse headers := map[string][]string{ diff --git a/vendor/github.com/docker/docker/client/service_inspect.go b/vendor/github.com/docker/docker/client/service_inspect.go index c5368bab1e3..2801483b80f 100644 --- a/vendor/github.com/docker/docker/client/service_inspect.go +++ b/vendor/github.com/docker/docker/client/service_inspect.go @@ -5,7 +5,7 @@ import ( "context" "encoding/json" "fmt" - "io" + "io/ioutil" "net/url" "github.com/docker/docker/api/types" @@ -25,7 +25,7 @@ func (cli *Client) ServiceInspectWithRaw(ctx context.Context, serviceID string, return swarm.Service{}, nil, wrapResponseError(err, serverResp, "service", serviceID) } - body, err := io.ReadAll(serverResp.body) + body, err := ioutil.ReadAll(serverResp.body) if err != nil { return swarm.Service{}, nil, err } diff --git a/vendor/github.com/docker/docker/client/task_inspect.go b/vendor/github.com/docker/docker/client/task_inspect.go index 410fc526e26..44d40ba5ae8 100644 --- a/vendor/github.com/docker/docker/client/task_inspect.go +++ b/vendor/github.com/docker/docker/client/task_inspect.go @@ -4,12 +4,12 @@ import ( "bytes" "context" "encoding/json" - "io" + "io/ioutil" "github.com/docker/docker/api/types/swarm" ) -// TaskInspectWithRaw returns the task information and its raw representation. +// TaskInspectWithRaw returns the task information and its raw representation.. func (cli *Client) TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) { if taskID == "" { return swarm.Task{}, nil, objectNotFoundError{object: "task", id: taskID} @@ -20,7 +20,7 @@ func (cli *Client) TaskInspectWithRaw(ctx context.Context, taskID string) (swarm return swarm.Task{}, nil, wrapResponseError(err, serverResp, "task", taskID) } - body, err := io.ReadAll(serverResp.body) + body, err := ioutil.ReadAll(serverResp.body) if err != nil { return swarm.Task{}, nil, err } diff --git a/vendor/github.com/docker/docker/client/volume_inspect.go b/vendor/github.com/docker/docker/client/volume_inspect.go index 5c5b3f905c5..e20b2c67c7a 100644 --- a/vendor/github.com/docker/docker/client/volume_inspect.go +++ b/vendor/github.com/docker/docker/client/volume_inspect.go @@ -4,7 +4,7 @@ import ( "bytes" "context" "encoding/json" - "io" + "io/ioutil" "github.com/docker/docker/api/types" ) @@ -28,7 +28,7 @@ func (cli *Client) VolumeInspectWithRaw(ctx context.Context, volumeID string) (t return volume, nil, wrapResponseError(err, resp, "volume", volumeID) } - body, err := io.ReadAll(resp.body) + body, err := ioutil.ReadAll(resp.body) if err != nil { return volume, nil, err } diff --git a/vendor/github.com/docker/docker/errdefs/http_helpers.go b/vendor/github.com/docker/docker/errdefs/http_helpers.go index 73576f1c6e4..5afe486779d 100644 --- a/vendor/github.com/docker/docker/errdefs/http_helpers.go +++ b/vendor/github.com/docker/docker/errdefs/http_helpers.go @@ -1,78 +1,11 @@ package errdefs // import "github.com/docker/docker/errdefs" import ( - "fmt" "net/http" - containerderrors "github.com/containerd/containerd/errdefs" - "github.com/docker/distribution/registry/api/errcode" "github.com/sirupsen/logrus" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" ) -// GetHTTPErrorStatusCode retrieves status code from error message. -func GetHTTPErrorStatusCode(err error) int { - if err == nil { - logrus.WithFields(logrus.Fields{"error": err}).Error("unexpected HTTP error handling") - return http.StatusInternalServerError - } - - var statusCode int - - // Stop right there - // Are you sure you should be adding a new error class here? Do one of the existing ones work? - - // Note that the below functions are already checking the error causal chain for matches. - switch { - case IsNotFound(err): - statusCode = http.StatusNotFound - case IsInvalidParameter(err): - statusCode = http.StatusBadRequest - case IsConflict(err): - statusCode = http.StatusConflict - case IsUnauthorized(err): - statusCode = http.StatusUnauthorized - case IsUnavailable(err): - statusCode = http.StatusServiceUnavailable - case IsForbidden(err): - statusCode = http.StatusForbidden - case IsNotModified(err): - statusCode = http.StatusNotModified - case IsNotImplemented(err): - statusCode = http.StatusNotImplemented - case IsSystem(err) || IsUnknown(err) || IsDataLoss(err) || IsDeadline(err) || IsCancelled(err): - statusCode = http.StatusInternalServerError - default: - statusCode = statusCodeFromGRPCError(err) - if statusCode != http.StatusInternalServerError { - return statusCode - } - statusCode = statusCodeFromContainerdError(err) - if statusCode != http.StatusInternalServerError { - return statusCode - } - statusCode = statusCodeFromDistributionError(err) - if statusCode != http.StatusInternalServerError { - return statusCode - } - if e, ok := err.(causer); ok { - return GetHTTPErrorStatusCode(e.Cause()) - } - - logrus.WithFields(logrus.Fields{ - "module": "api", - "error_type": fmt.Sprintf("%T", err), - }).Debugf("FIXME: Got an API for which error does not match any expected type!!!: %+v", err) - } - - if statusCode == 0 { - statusCode = http.StatusInternalServerError - } - - return statusCode -} - // FromStatusCode creates an errdef error, based on the provided HTTP status-code func FromStatusCode(err error, statusCode int) error { if err == nil { @@ -118,74 +51,3 @@ func FromStatusCode(err error, statusCode int) error { } return err } - -// statusCodeFromGRPCError returns status code according to gRPC error -func statusCodeFromGRPCError(err error) int { - switch status.Code(err) { - case codes.InvalidArgument: // code 3 - return http.StatusBadRequest - case codes.NotFound: // code 5 - return http.StatusNotFound - case codes.AlreadyExists: // code 6 - return http.StatusConflict - case codes.PermissionDenied: // code 7 - return http.StatusForbidden - case codes.FailedPrecondition: // code 9 - return http.StatusBadRequest - case codes.Unauthenticated: // code 16 - return http.StatusUnauthorized - case codes.OutOfRange: // code 11 - return http.StatusBadRequest - case codes.Unimplemented: // code 12 - return http.StatusNotImplemented - case codes.Unavailable: // code 14 - return http.StatusServiceUnavailable - default: - // codes.Canceled(1) - // codes.Unknown(2) - // codes.DeadlineExceeded(4) - // codes.ResourceExhausted(8) - // codes.Aborted(10) - // codes.Internal(13) - // codes.DataLoss(15) - return http.StatusInternalServerError - } -} - -// statusCodeFromDistributionError returns status code according to registry errcode -// code is loosely based on errcode.ServeJSON() in docker/distribution -func statusCodeFromDistributionError(err error) int { - switch errs := err.(type) { - case errcode.Errors: - if len(errs) < 1 { - return http.StatusInternalServerError - } - if _, ok := errs[0].(errcode.ErrorCoder); ok { - return statusCodeFromDistributionError(errs[0]) - } - case errcode.ErrorCoder: - return errs.ErrorCode().Descriptor().HTTPStatusCode - } - return http.StatusInternalServerError -} - -// statusCodeFromContainerdError returns status code for containerd errors when -// consumed directly (not through gRPC) -func statusCodeFromContainerdError(err error) int { - switch { - case containerderrors.IsInvalidArgument(err): - return http.StatusBadRequest - case containerderrors.IsNotFound(err): - return http.StatusNotFound - case containerderrors.IsAlreadyExists(err): - return http.StatusConflict - case containerderrors.IsFailedPrecondition(err): - return http.StatusPreconditionFailed - case containerderrors.IsUnavailable(err): - return http.StatusServiceUnavailable - case containerderrors.IsNotImplemented(err): - return http.StatusNotImplemented - default: - return http.StatusInternalServerError - } -} diff --git a/vendor/github.com/docker/docker/pkg/archive/README.md b/vendor/github.com/docker/docker/pkg/archive/README.md new file mode 100644 index 00000000000..7307d9694f6 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/README.md @@ -0,0 +1 @@ +This code provides helper functions for dealing with archive files. diff --git a/vendor/github.com/docker/docker/pkg/archive/archive.go b/vendor/github.com/docker/docker/pkg/archive/archive.go new file mode 100644 index 00000000000..50b83c62c63 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/archive.go @@ -0,0 +1,1322 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "archive/tar" + "bufio" + "bytes" + "compress/bzip2" + "compress/gzip" + "context" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strconv" + "strings" + "syscall" + "time" + + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/system" + "github.com/sirupsen/logrus" + exec "golang.org/x/sys/execabs" +) + +type ( + // Compression is the state represents if compressed or not. + Compression int + // WhiteoutFormat is the format of whiteouts unpacked + WhiteoutFormat int + + // TarOptions wraps the tar options. + TarOptions struct { + IncludeFiles []string + ExcludePatterns []string + Compression Compression + NoLchown bool + UIDMaps []idtools.IDMap + GIDMaps []idtools.IDMap + ChownOpts *idtools.Identity + IncludeSourceDir bool + // WhiteoutFormat is the expected on disk format for whiteout files. + // This format will be converted to the standard format on pack + // and from the standard format on unpack. + WhiteoutFormat WhiteoutFormat + // When unpacking, specifies whether overwriting a directory with a + // non-directory is allowed and vice versa. + NoOverwriteDirNonDir bool + // For each include when creating an archive, the included name will be + // replaced with the matching name from this map. + RebaseNames map[string]string + InUserNS bool + } +) + +// Archiver implements the Archiver interface and allows the reuse of most utility functions of +// this package with a pluggable Untar function. Also, to facilitate the passing of specific id +// mappings for untar, an Archiver can be created with maps which will then be passed to Untar operations. +type Archiver struct { + Untar func(io.Reader, string, *TarOptions) error + IDMapping *idtools.IdentityMapping +} + +// NewDefaultArchiver returns a new Archiver without any IdentityMapping +func NewDefaultArchiver() *Archiver { + return &Archiver{Untar: Untar, IDMapping: &idtools.IdentityMapping{}} +} + +// breakoutError is used to differentiate errors related to breaking out +// When testing archive breakout in the unit tests, this error is expected +// in order for the test to pass. +type breakoutError error + +const ( + // Uncompressed represents the uncompressed. + Uncompressed Compression = iota + // Bzip2 is bzip2 compression algorithm. + Bzip2 + // Gzip is gzip compression algorithm. + Gzip + // Xz is xz compression algorithm. + Xz +) + +const ( + // AUFSWhiteoutFormat is the default format for whiteouts + AUFSWhiteoutFormat WhiteoutFormat = iota + // OverlayWhiteoutFormat formats whiteout according to the overlay + // standard. + OverlayWhiteoutFormat +) + +const ( + modeISDIR = 040000 // Directory + modeISFIFO = 010000 // FIFO + modeISREG = 0100000 // Regular file + modeISLNK = 0120000 // Symbolic link + modeISBLK = 060000 // Block special file + modeISCHR = 020000 // Character special file + modeISSOCK = 0140000 // Socket +) + +// IsArchivePath checks if the (possibly compressed) file at the given path +// starts with a tar file header. +func IsArchivePath(path string) bool { + file, err := os.Open(path) + if err != nil { + return false + } + defer file.Close() + rdr, err := DecompressStream(file) + if err != nil { + return false + } + defer rdr.Close() + r := tar.NewReader(rdr) + _, err = r.Next() + return err == nil +} + +// DetectCompression detects the compression algorithm of the source. +func DetectCompression(source []byte) Compression { + for compression, m := range map[Compression][]byte{ + Bzip2: {0x42, 0x5A, 0x68}, + Gzip: {0x1F, 0x8B, 0x08}, + Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, + } { + if len(source) < len(m) { + logrus.Debug("Len too short") + continue + } + if bytes.Equal(m, source[:len(m)]) { + return compression + } + } + return Uncompressed +} + +func xzDecompress(ctx context.Context, archive io.Reader) (io.ReadCloser, error) { + args := []string{"xz", "-d", "-c", "-q"} + + return cmdStream(exec.CommandContext(ctx, args[0], args[1:]...), archive) +} + +func gzDecompress(ctx context.Context, buf io.Reader) (io.ReadCloser, error) { + noPigzEnv := os.Getenv("MOBY_DISABLE_PIGZ") + var noPigz bool + + if noPigzEnv != "" { + var err error + noPigz, err = strconv.ParseBool(noPigzEnv) + if err != nil { + logrus.WithError(err).Warn("invalid value in MOBY_DISABLE_PIGZ env var") + } + } + + if noPigz { + logrus.Debugf("Use of pigz is disabled due to MOBY_DISABLE_PIGZ=%s", noPigzEnv) + return gzip.NewReader(buf) + } + + unpigzPath, err := exec.LookPath("unpigz") + if err != nil { + logrus.Debugf("unpigz binary not found, falling back to go gzip library") + return gzip.NewReader(buf) + } + + logrus.Debugf("Using %s to decompress", unpigzPath) + + return cmdStream(exec.CommandContext(ctx, unpigzPath, "-d", "-c"), buf) +} + +func wrapReadCloser(readBuf io.ReadCloser, cancel context.CancelFunc) io.ReadCloser { + return ioutils.NewReadCloserWrapper(readBuf, func() error { + cancel() + return readBuf.Close() + }) +} + +// DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive. +func DecompressStream(archive io.Reader) (io.ReadCloser, error) { + p := pools.BufioReader32KPool + buf := p.Get(archive) + bs, err := buf.Peek(10) + if err != nil && err != io.EOF { + // Note: we'll ignore any io.EOF error because there are some odd + // cases where the layer.tar file will be empty (zero bytes) and + // that results in an io.EOF from the Peek() call. So, in those + // cases we'll just treat it as a non-compressed stream and + // that means just create an empty layer. + // See Issue 18170 + return nil, err + } + + compression := DetectCompression(bs) + switch compression { + case Uncompressed: + readBufWrapper := p.NewReadCloserWrapper(buf, buf) + return readBufWrapper, nil + case Gzip: + ctx, cancel := context.WithCancel(context.Background()) + + gzReader, err := gzDecompress(ctx, buf) + if err != nil { + cancel() + return nil, err + } + readBufWrapper := p.NewReadCloserWrapper(buf, gzReader) + return wrapReadCloser(readBufWrapper, cancel), nil + case Bzip2: + bz2Reader := bzip2.NewReader(buf) + readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader) + return readBufWrapper, nil + case Xz: + ctx, cancel := context.WithCancel(context.Background()) + + xzReader, err := xzDecompress(ctx, buf) + if err != nil { + cancel() + return nil, err + } + readBufWrapper := p.NewReadCloserWrapper(buf, xzReader) + return wrapReadCloser(readBufWrapper, cancel), nil + default: + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + } +} + +// CompressStream compresses the dest with specified compression algorithm. +func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { + p := pools.BufioWriter32KPool + buf := p.Get(dest) + switch compression { + case Uncompressed: + writeBufWrapper := p.NewWriteCloserWrapper(buf, buf) + return writeBufWrapper, nil + case Gzip: + gzWriter := gzip.NewWriter(dest) + writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter) + return writeBufWrapper, nil + case Bzip2, Xz: + // archive/bzip2 does not support writing, and there is no xz support at all + // However, this is not a problem as docker only currently generates gzipped tars + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + default: + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + } +} + +// TarModifierFunc is a function that can be passed to ReplaceFileTarWrapper to +// modify the contents or header of an entry in the archive. If the file already +// exists in the archive the TarModifierFunc will be called with the Header and +// a reader which will return the files content. If the file does not exist both +// header and content will be nil. +type TarModifierFunc func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) + +// ReplaceFileTarWrapper converts inputTarStream to a new tar stream. Files in the +// tar stream are modified if they match any of the keys in mods. +func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModifierFunc) io.ReadCloser { + pipeReader, pipeWriter := io.Pipe() + + go func() { + tarReader := tar.NewReader(inputTarStream) + tarWriter := tar.NewWriter(pipeWriter) + defer inputTarStream.Close() + defer tarWriter.Close() + + modify := func(name string, original *tar.Header, modifier TarModifierFunc, tarReader io.Reader) error { + header, data, err := modifier(name, original, tarReader) + switch { + case err != nil: + return err + case header == nil: + return nil + } + + header.Name = name + header.Size = int64(len(data)) + if err := tarWriter.WriteHeader(header); err != nil { + return err + } + if len(data) != 0 { + if _, err := tarWriter.Write(data); err != nil { + return err + } + } + return nil + } + + var err error + var originalHeader *tar.Header + for { + originalHeader, err = tarReader.Next() + if err == io.EOF { + break + } + if err != nil { + pipeWriter.CloseWithError(err) + return + } + + modifier, ok := mods[originalHeader.Name] + if !ok { + // No modifiers for this file, copy the header and data + if err := tarWriter.WriteHeader(originalHeader); err != nil { + pipeWriter.CloseWithError(err) + return + } + if _, err := pools.Copy(tarWriter, tarReader); err != nil { + pipeWriter.CloseWithError(err) + return + } + continue + } + delete(mods, originalHeader.Name) + + if err := modify(originalHeader.Name, originalHeader, modifier, tarReader); err != nil { + pipeWriter.CloseWithError(err) + return + } + } + + // Apply the modifiers that haven't matched any files in the archive + for name, modifier := range mods { + if err := modify(name, nil, modifier, nil); err != nil { + pipeWriter.CloseWithError(err) + return + } + } + + pipeWriter.Close() + + }() + return pipeReader +} + +// Extension returns the extension of a file that uses the specified compression algorithm. +func (compression *Compression) Extension() string { + switch *compression { + case Uncompressed: + return "tar" + case Bzip2: + return "tar.bz2" + case Gzip: + return "tar.gz" + case Xz: + return "tar.xz" + } + return "" +} + +// FileInfoHeader creates a populated Header from fi. +// Compared to archive pkg this function fills in more information. +// Also, regardless of Go version, this function fills file type bits (e.g. hdr.Mode |= modeISDIR), +// which have been deleted since Go 1.9 archive/tar. +func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) { + hdr, err := tar.FileInfoHeader(fi, link) + if err != nil { + return nil, err + } + hdr.Format = tar.FormatPAX + hdr.ModTime = hdr.ModTime.Truncate(time.Second) + hdr.AccessTime = time.Time{} + hdr.ChangeTime = time.Time{} + hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi) + hdr.Name = canonicalTarName(name, fi.IsDir()) + if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil { + return nil, err + } + return hdr, nil +} + +// fillGo18FileTypeBits fills type bits which have been removed on Go 1.9 archive/tar +// https://github.com/golang/go/commit/66b5a2f +func fillGo18FileTypeBits(mode int64, fi os.FileInfo) int64 { + fm := fi.Mode() + switch { + case fm.IsRegular(): + mode |= modeISREG + case fi.IsDir(): + mode |= modeISDIR + case fm&os.ModeSymlink != 0: + mode |= modeISLNK + case fm&os.ModeDevice != 0: + if fm&os.ModeCharDevice != 0 { + mode |= modeISCHR + } else { + mode |= modeISBLK + } + case fm&os.ModeNamedPipe != 0: + mode |= modeISFIFO + case fm&os.ModeSocket != 0: + mode |= modeISSOCK + } + return mode +} + +// ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem +// to a tar header +func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error { + const ( + // Values based on linux/include/uapi/linux/capability.h + xattrCapsSz2 = 20 + versionOffset = 3 + vfsCapRevision2 = 2 + vfsCapRevision3 = 3 + ) + capability, _ := system.Lgetxattr(path, "security.capability") + if capability != nil { + length := len(capability) + if capability[versionOffset] == vfsCapRevision3 { + // Convert VFS_CAP_REVISION_3 to VFS_CAP_REVISION_2 as root UID makes no + // sense outside the user namespace the archive is built in. + capability[versionOffset] = vfsCapRevision2 + length = xattrCapsSz2 + } + hdr.Xattrs = make(map[string]string) + hdr.Xattrs["security.capability"] = string(capability[:length]) + } + return nil +} + +type tarWhiteoutConverter interface { + ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error) + ConvertRead(*tar.Header, string) (bool, error) +} + +type tarAppender struct { + TarWriter *tar.Writer + Buffer *bufio.Writer + + // for hardlink mapping + SeenFiles map[uint64]string + IdentityMapping *idtools.IdentityMapping + ChownOpts *idtools.Identity + + // For packing and unpacking whiteout files in the + // non standard format. The whiteout files defined + // by the AUFS standard are used as the tar whiteout + // standard. + WhiteoutConverter tarWhiteoutConverter +} + +func newTarAppender(idMapping *idtools.IdentityMapping, writer io.Writer, chownOpts *idtools.Identity) *tarAppender { + return &tarAppender{ + SeenFiles: make(map[uint64]string), + TarWriter: tar.NewWriter(writer), + Buffer: pools.BufioWriter32KPool.Get(nil), + IdentityMapping: idMapping, + ChownOpts: chownOpts, + } +} + +// canonicalTarName provides a platform-independent and consistent posix-style +// path for files and directories to be archived regardless of the platform. +func canonicalTarName(name string, isDir bool) string { + name = CanonicalTarNameForPath(name) + + // suffix with '/' for directories + if isDir && !strings.HasSuffix(name, "/") { + name += "/" + } + return name +} + +// addTarFile adds to the tar archive a file from `path` as `name` +func (ta *tarAppender) addTarFile(path, name string) error { + fi, err := os.Lstat(path) + if err != nil { + return err + } + + var link string + if fi.Mode()&os.ModeSymlink != 0 { + var err error + link, err = os.Readlink(path) + if err != nil { + return err + } + } + + hdr, err := FileInfoHeader(name, fi, link) + if err != nil { + return err + } + if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil { + return err + } + + // if it's not a directory and has more than 1 link, + // it's hard linked, so set the type flag accordingly + if !fi.IsDir() && hasHardlinks(fi) { + inode, err := getInodeFromStat(fi.Sys()) + if err != nil { + return err + } + // a link should have a name that it links too + // and that linked name should be first in the tar archive + if oldpath, ok := ta.SeenFiles[inode]; ok { + hdr.Typeflag = tar.TypeLink + hdr.Linkname = oldpath + hdr.Size = 0 // This Must be here for the writer math to add up! + } else { + ta.SeenFiles[inode] = name + } + } + + // check whether the file is overlayfs whiteout + // if yes, skip re-mapping container ID mappings. + isOverlayWhiteout := fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 + + // handle re-mapping container ID mappings back to host ID mappings before + // writing tar headers/files. We skip whiteout files because they were written + // by the kernel and already have proper ownership relative to the host + if !isOverlayWhiteout && !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && !ta.IdentityMapping.Empty() { + fileIDPair, err := getFileUIDGID(fi.Sys()) + if err != nil { + return err + } + hdr.Uid, hdr.Gid, err = ta.IdentityMapping.ToContainer(fileIDPair) + if err != nil { + return err + } + } + + // explicitly override with ChownOpts + if ta.ChownOpts != nil { + hdr.Uid = ta.ChownOpts.UID + hdr.Gid = ta.ChownOpts.GID + } + + if ta.WhiteoutConverter != nil { + wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi) + if err != nil { + return err + } + + // If a new whiteout file exists, write original hdr, then + // replace hdr with wo to be written after. Whiteouts should + // always be written after the original. Note the original + // hdr may have been updated to be a whiteout with returning + // a whiteout header + if wo != nil { + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + return err + } + if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { + return fmt.Errorf("tar: cannot use whiteout for non-empty file") + } + hdr = wo + } + } + + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + return err + } + + if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { + // We use system.OpenSequential to ensure we use sequential file + // access on Windows to avoid depleting the standby list. + // On Linux, this equates to a regular os.Open. + file, err := system.OpenSequential(path) + if err != nil { + return err + } + + ta.Buffer.Reset(ta.TarWriter) + defer ta.Buffer.Reset(nil) + _, err = io.Copy(ta.Buffer, file) + file.Close() + if err != nil { + return err + } + err = ta.Buffer.Flush() + if err != nil { + return err + } + } + + return nil +} + +func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.Identity, inUserns bool) error { + // hdr.Mode is in linux format, which we can use for sycalls, + // but for os.Foo() calls we need the mode converted to os.FileMode, + // so use hdrInfo.Mode() (they differ for e.g. setuid bits) + hdrInfo := hdr.FileInfo() + + switch hdr.Typeflag { + case tar.TypeDir: + // Create directory unless it exists as a directory already. + // In that case we just want to merge the two + if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { + if err := os.Mkdir(path, hdrInfo.Mode()); err != nil { + return err + } + } + + case tar.TypeReg, tar.TypeRegA: + // Source is regular file. We use system.OpenFileSequential to use sequential + // file access to avoid depleting the standby list on Windows. + // On Linux, this equates to a regular os.OpenFile + file, err := system.OpenFileSequential(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) + if err != nil { + return err + } + if _, err := io.Copy(file, reader); err != nil { + file.Close() + return err + } + file.Close() + + case tar.TypeBlock, tar.TypeChar: + if inUserns { // cannot create devices in a userns + return nil + } + // Handle this is an OS-specific way + if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { + return err + } + + case tar.TypeFifo: + // Handle this is an OS-specific way + if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { + return err + } + + case tar.TypeLink: + targetPath := filepath.Join(extractDir, hdr.Linkname) + // check for hardlink breakout + if !strings.HasPrefix(targetPath, extractDir) { + return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname)) + } + if err := os.Link(targetPath, path); err != nil { + return err + } + + case tar.TypeSymlink: + // path -> hdr.Linkname = targetPath + // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file + targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) + + // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because + // that symlink would first have to be created, which would be caught earlier, at this very check: + if !strings.HasPrefix(targetPath, extractDir) { + return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) + } + if err := os.Symlink(hdr.Linkname, path); err != nil { + return err + } + + case tar.TypeXGlobalHeader: + logrus.Debug("PAX Global Extended Headers found and ignored") + return nil + + default: + return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag) + } + + // Lchown is not supported on Windows. + if Lchown && runtime.GOOS != "windows" { + if chownOpts == nil { + chownOpts = &idtools.Identity{UID: hdr.Uid, GID: hdr.Gid} + } + if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil { + return err + } + } + + var errors []string + for key, value := range hdr.Xattrs { + if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { + if err == syscall.ENOTSUP || err == syscall.EPERM { + // We ignore errors here because not all graphdrivers support + // xattrs *cough* old versions of AUFS *cough*. However only + // ENOTSUP should be emitted in that case, otherwise we still + // bail. + // EPERM occurs if modifying xattrs is not allowed. This can + // happen when running in userns with restrictions (ChromeOS). + errors = append(errors, err.Error()) + continue + } + return err + } + + } + + if len(errors) > 0 { + logrus.WithFields(logrus.Fields{ + "errors": errors, + }).Warn("ignored xattrs in archive: underlying filesystem doesn't support them") + } + + // There is no LChmod, so ignore mode for symlink. Also, this + // must happen after chown, as that can modify the file mode + if err := handleLChmod(hdr, path, hdrInfo); err != nil { + return err + } + + aTime := hdr.AccessTime + if aTime.Before(hdr.ModTime) { + // Last access time should never be before last modified time. + aTime = hdr.ModTime + } + + // system.Chtimes doesn't support a NOFOLLOW flag atm + if hdr.Typeflag == tar.TypeLink { + if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { + if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { + return err + } + } + } else if hdr.Typeflag != tar.TypeSymlink { + if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { + return err + } + } else { + ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)} + if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { + return err + } + } + return nil +} + +// Tar creates an archive from the directory at `path`, and returns it as a +// stream of bytes. +func Tar(path string, compression Compression) (io.ReadCloser, error) { + return TarWithOptions(path, &TarOptions{Compression: compression}) +} + +// TarWithOptions creates an archive from the directory at `path`, only including files whose relative +// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. +func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { + + // Fix the source path to work with long path names. This is a no-op + // on platforms other than Windows. + srcPath = fixVolumePathPrefix(srcPath) + + pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns) + if err != nil { + return nil, err + } + + pipeReader, pipeWriter := io.Pipe() + + compressWriter, err := CompressStream(pipeWriter, options.Compression) + if err != nil { + return nil, err + } + + whiteoutConverter, err := getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS) + if err != nil { + return nil, err + } + + go func() { + ta := newTarAppender( + idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps), + compressWriter, + options.ChownOpts, + ) + ta.WhiteoutConverter = whiteoutConverter + + defer func() { + // Make sure to check the error on Close. + if err := ta.TarWriter.Close(); err != nil { + logrus.Errorf("Can't close tar writer: %s", err) + } + if err := compressWriter.Close(); err != nil { + logrus.Errorf("Can't close compress writer: %s", err) + } + if err := pipeWriter.Close(); err != nil { + logrus.Errorf("Can't close pipe writer: %s", err) + } + }() + + // this buffer is needed for the duration of this piped stream + defer pools.BufioWriter32KPool.Put(ta.Buffer) + + // In general we log errors here but ignore them because + // during e.g. a diff operation the container can continue + // mutating the filesystem and we can see transient errors + // from this + + stat, err := os.Lstat(srcPath) + if err != nil { + return + } + + if !stat.IsDir() { + // We can't later join a non-dir with any includes because the + // 'walk' will error if "file/." is stat-ed and "file" is not a + // directory. So, we must split the source path and use the + // basename as the include. + if len(options.IncludeFiles) > 0 { + logrus.Warn("Tar: Can't archive a file with includes") + } + + dir, base := SplitPathDirEntry(srcPath) + srcPath = dir + options.IncludeFiles = []string{base} + } + + if len(options.IncludeFiles) == 0 { + options.IncludeFiles = []string{"."} + } + + seen := make(map[string]bool) + + for _, include := range options.IncludeFiles { + rebaseName := options.RebaseNames[include] + + walkRoot := getWalkRoot(srcPath, include) + filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error { + if err != nil { + logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err) + return nil + } + + relFilePath, err := filepath.Rel(srcPath, filePath) + if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) { + // Error getting relative path OR we are looking + // at the source directory path. Skip in both situations. + return nil + } + + if options.IncludeSourceDir && include == "." && relFilePath != "." { + relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator)) + } + + skip := false + + // If "include" is an exact match for the current file + // then even if there's an "excludePatterns" pattern that + // matches it, don't skip it. IOW, assume an explicit 'include' + // is asking for that file no matter what - which is true + // for some files, like .dockerignore and Dockerfile (sometimes) + if include != relFilePath { + skip, err = pm.Matches(relFilePath) + if err != nil { + logrus.Errorf("Error matching %s: %v", relFilePath, err) + return err + } + } + + if skip { + // If we want to skip this file and its a directory + // then we should first check to see if there's an + // excludes pattern (e.g. !dir/file) that starts with this + // dir. If so then we can't skip this dir. + + // Its not a dir then so we can just return/skip. + if !f.IsDir() { + return nil + } + + // No exceptions (!...) in patterns so just skip dir + if !pm.Exclusions() { + return filepath.SkipDir + } + + dirSlash := relFilePath + string(filepath.Separator) + + for _, pat := range pm.Patterns() { + if !pat.Exclusion() { + continue + } + if strings.HasPrefix(pat.String()+string(filepath.Separator), dirSlash) { + // found a match - so can't skip this dir + return nil + } + } + + // No matching exclusion dir so just skip dir + return filepath.SkipDir + } + + if seen[relFilePath] { + return nil + } + seen[relFilePath] = true + + // Rename the base resource. + if rebaseName != "" { + var replacement string + if rebaseName != string(filepath.Separator) { + // Special case the root directory to replace with an + // empty string instead so that we don't end up with + // double slashes in the paths. + replacement = rebaseName + } + + relFilePath = strings.Replace(relFilePath, include, replacement, 1) + } + + if err := ta.addTarFile(filePath, relFilePath); err != nil { + logrus.Errorf("Can't add file %s to tar: %s", filePath, err) + // if pipe is broken, stop writing tar stream to it + if err == io.ErrClosedPipe { + return err + } + } + return nil + }) + } + }() + + return pipeReader, nil +} + +// Unpack unpacks the decompressedArchive to dest with options. +func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { + tr := tar.NewReader(decompressedArchive) + trBuf := pools.BufioReader32KPool.Get(nil) + defer pools.BufioReader32KPool.Put(trBuf) + + var dirs []*tar.Header + idMapping := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) + rootIDs := idMapping.RootPair() + whiteoutConverter, err := getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS) + if err != nil { + return err + } + + // Iterate through the files in the archive. +loop: + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + return err + } + + // ignore XGlobalHeader early to avoid creating parent directories for them + if hdr.Typeflag == tar.TypeXGlobalHeader { + logrus.Debugf("PAX Global Extended Headers found for %s and ignored", hdr.Name) + continue + } + + // Normalize name, for safety and for a simple is-root check + // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows: + // This keeps "..\" as-is, but normalizes "\..\" to "\". + hdr.Name = filepath.Clean(hdr.Name) + + for _, exclude := range options.ExcludePatterns { + if strings.HasPrefix(hdr.Name, exclude) { + continue loop + } + } + + // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in + // the filepath format for the OS on which the daemon is running. Hence + // the check for a slash-suffix MUST be done in an OS-agnostic way. + if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { + // Not the root directory, ensure that the parent directory exists + parent := filepath.Dir(hdr.Name) + parentPath := filepath.Join(dest, parent) + if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { + err = idtools.MkdirAllAndChownNew(parentPath, 0755, rootIDs) + if err != nil { + return err + } + } + } + + path := filepath.Join(dest, hdr.Name) + rel, err := filepath.Rel(dest, path) + if err != nil { + return err + } + if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { + return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) + } + + // If path exits we almost always just want to remove and replace it + // The only exception is when it is a directory *and* the file from + // the layer is also a directory. Then we want to merge them (i.e. + // just apply the metadata from the layer). + if fi, err := os.Lstat(path); err == nil { + if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir { + // If NoOverwriteDirNonDir is true then we cannot replace + // an existing directory with a non-directory from the archive. + return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest) + } + + if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir { + // If NoOverwriteDirNonDir is true then we cannot replace + // an existing non-directory with a directory from the archive. + return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest) + } + + if fi.IsDir() && hdr.Name == "." { + continue + } + + if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { + if err := os.RemoveAll(path); err != nil { + return err + } + } + } + trBuf.Reset(tr) + + if err := remapIDs(idMapping, hdr); err != nil { + return err + } + + if whiteoutConverter != nil { + writeFile, err := whiteoutConverter.ConvertRead(hdr, path) + if err != nil { + return err + } + if !writeFile { + continue + } + } + + if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts, options.InUserNS); err != nil { + return err + } + + // Directory mtimes must be handled at the end to avoid further + // file creation in them to modify the directory mtime + if hdr.Typeflag == tar.TypeDir { + dirs = append(dirs, hdr) + } + } + + for _, hdr := range dirs { + path := filepath.Join(dest, hdr.Name) + + if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { + return err + } + } + return nil +} + +// Untar reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive may be compressed with one of the following algorithms: +// identity (uncompressed), gzip, bzip2, xz. +// FIXME: specify behavior when target path exists vs. doesn't exist. +func Untar(tarArchive io.Reader, dest string, options *TarOptions) error { + return untarHandler(tarArchive, dest, options, true) +} + +// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive must be an uncompressed stream. +func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error { + return untarHandler(tarArchive, dest, options, false) +} + +// Handler for teasing out the automatic decompression +func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error { + if tarArchive == nil { + return fmt.Errorf("Empty archive") + } + dest = filepath.Clean(dest) + if options == nil { + options = &TarOptions{} + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + + r := tarArchive + if decompress { + decompressedArchive, err := DecompressStream(tarArchive) + if err != nil { + return err + } + defer decompressedArchive.Close() + r = decompressedArchive + } + + return Unpack(r, dest, options) +} + +// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. +// If either Tar or Untar fails, TarUntar aborts and returns the error. +func (archiver *Archiver) TarUntar(src, dst string) error { + logrus.Debugf("TarUntar(%s %s)", src, dst) + archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) + if err != nil { + return err + } + defer archive.Close() + options := &TarOptions{ + UIDMaps: archiver.IDMapping.UIDs(), + GIDMaps: archiver.IDMapping.GIDs(), + } + return archiver.Untar(archive, dst, options) +} + +// UntarPath untar a file from path to a destination, src is the source tar file path. +func (archiver *Archiver) UntarPath(src, dst string) error { + archive, err := os.Open(src) + if err != nil { + return err + } + defer archive.Close() + options := &TarOptions{ + UIDMaps: archiver.IDMapping.UIDs(), + GIDMaps: archiver.IDMapping.GIDs(), + } + return archiver.Untar(archive, dst, options) +} + +// CopyWithTar creates a tar archive of filesystem path `src`, and +// unpacks it at filesystem path `dst`. +// The archive is streamed directly with fixed buffering and no +// intermediary disk IO. +func (archiver *Archiver) CopyWithTar(src, dst string) error { + srcSt, err := os.Stat(src) + if err != nil { + return err + } + if !srcSt.IsDir() { + return archiver.CopyFileWithTar(src, dst) + } + + // if this Archiver is set up with ID mapping we need to create + // the new destination directory with the remapped root UID/GID pair + // as owner + rootIDs := archiver.IDMapping.RootPair() + // Create dst, copy src's content into it + logrus.Debugf("Creating dest directory: %s", dst) + if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil { + return err + } + logrus.Debugf("Calling TarUntar(%s, %s)", src, dst) + return archiver.TarUntar(src, dst) +} + +// CopyFileWithTar emulates the behavior of the 'cp' command-line +// for a single file. It copies a regular file from path `src` to +// path `dst`, and preserves all its metadata. +func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { + logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst) + srcSt, err := os.Stat(src) + if err != nil { + return err + } + + if srcSt.IsDir() { + return fmt.Errorf("Can't copy a directory") + } + + // Clean up the trailing slash. This must be done in an operating + // system specific manner. + if dst[len(dst)-1] == os.PathSeparator { + dst = filepath.Join(dst, filepath.Base(src)) + } + // Create the holding directory if necessary + if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil { + return err + } + + r, w := io.Pipe() + errC := make(chan error, 1) + + go func() { + defer close(errC) + + errC <- func() error { + defer w.Close() + + srcF, err := os.Open(src) + if err != nil { + return err + } + defer srcF.Close() + + hdr, err := tar.FileInfoHeader(srcSt, "") + if err != nil { + return err + } + hdr.Format = tar.FormatPAX + hdr.ModTime = hdr.ModTime.Truncate(time.Second) + hdr.AccessTime = time.Time{} + hdr.ChangeTime = time.Time{} + hdr.Name = filepath.Base(dst) + hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) + + if err := remapIDs(archiver.IDMapping, hdr); err != nil { + return err + } + + tw := tar.NewWriter(w) + defer tw.Close() + if err := tw.WriteHeader(hdr); err != nil { + return err + } + if _, err := io.Copy(tw, srcF); err != nil { + return err + } + return nil + }() + }() + defer func() { + if er := <-errC; err == nil && er != nil { + err = er + } + }() + + err = archiver.Untar(r, filepath.Dir(dst), nil) + if err != nil { + r.CloseWithError(err) + } + return err +} + +// IdentityMapping returns the IdentityMapping of the archiver. +func (archiver *Archiver) IdentityMapping() *idtools.IdentityMapping { + return archiver.IDMapping +} + +func remapIDs(idMapping *idtools.IdentityMapping, hdr *tar.Header) error { + ids, err := idMapping.ToHost(idtools.Identity{UID: hdr.Uid, GID: hdr.Gid}) + hdr.Uid, hdr.Gid = ids.UID, ids.GID + return err +} + +// cmdStream executes a command, and returns its stdout as a stream. +// If the command fails to run or doesn't complete successfully, an error +// will be returned, including anything written on stderr. +func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) { + cmd.Stdin = input + pipeR, pipeW := io.Pipe() + cmd.Stdout = pipeW + var errBuf bytes.Buffer + cmd.Stderr = &errBuf + + // Run the command and return the pipe + if err := cmd.Start(); err != nil { + return nil, err + } + + // Ensure the command has exited before we clean anything up + done := make(chan struct{}) + + // Copy stdout to the returned pipe + go func() { + if err := cmd.Wait(); err != nil { + pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String())) + } else { + pipeW.Close() + } + close(done) + }() + + return ioutils.NewReadCloserWrapper(pipeR, func() error { + // Close pipeR, and then wait for the command to complete before returning. We have to close pipeR first, as + // cmd.Wait waits for any non-file stdout/stderr/stdin to close. + err := pipeR.Close() + <-done + return err + }), nil +} + +// NewTempArchive reads the content of src into a temporary file, and returns the contents +// of that file as an archive. The archive can only be read once - as soon as reading completes, +// the file will be deleted. +func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) { + f, err := ioutil.TempFile(dir, "") + if err != nil { + return nil, err + } + if _, err := io.Copy(f, src); err != nil { + return nil, err + } + if _, err := f.Seek(0, 0); err != nil { + return nil, err + } + st, err := f.Stat() + if err != nil { + return nil, err + } + size := st.Size() + return &TempArchive{File: f, Size: size}, nil +} + +// TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes, +// the file will be deleted. +type TempArchive struct { + *os.File + Size int64 // Pre-computed from Stat().Size() as a convenience + read int64 + closed bool +} + +// Close closes the underlying file if it's still open, or does a no-op +// to allow callers to try to close the TempArchive multiple times safely. +func (archive *TempArchive) Close() error { + if archive.closed { + return nil + } + + archive.closed = true + + return archive.File.Close() +} + +func (archive *TempArchive) Read(data []byte) (int, error) { + n, err := archive.File.Read(data) + archive.read += int64(n) + if err != nil || archive.read == archive.Size { + archive.Close() + os.Remove(archive.File.Name()) + } + return n, err +} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_linux.go b/vendor/github.com/docker/docker/pkg/archive/archive_linux.go new file mode 100644 index 00000000000..0a3cc1f92bc --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/archive_linux.go @@ -0,0 +1,100 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "archive/tar" + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/system" + "github.com/pkg/errors" + "golang.org/x/sys/unix" +) + +func getWhiteoutConverter(format WhiteoutFormat, inUserNS bool) (tarWhiteoutConverter, error) { + if format == OverlayWhiteoutFormat { + if inUserNS { + return nil, errors.New("specifying OverlayWhiteoutFormat is not allowed in userns") + } + return overlayWhiteoutConverter{}, nil + } + return nil, nil +} + +type overlayWhiteoutConverter struct { +} + +func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) (wo *tar.Header, err error) { + // convert whiteouts to AUFS format + if fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 { + // we just rename the file and make it normal + dir, filename := filepath.Split(hdr.Name) + hdr.Name = filepath.Join(dir, WhiteoutPrefix+filename) + hdr.Mode = 0600 + hdr.Typeflag = tar.TypeReg + hdr.Size = 0 + } + + if fi.Mode()&os.ModeDir != 0 { + // convert opaque dirs to AUFS format by writing an empty file with the prefix + opaque, err := system.Lgetxattr(path, "trusted.overlay.opaque") + if err != nil { + return nil, err + } + if len(opaque) == 1 && opaque[0] == 'y' { + if hdr.Xattrs != nil { + delete(hdr.Xattrs, "trusted.overlay.opaque") + } + + // create a header for the whiteout file + // it should inherit some properties from the parent, but be a regular file + wo = &tar.Header{ + Typeflag: tar.TypeReg, + Mode: hdr.Mode & int64(os.ModePerm), + Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir), + Size: 0, + Uid: hdr.Uid, + Uname: hdr.Uname, + Gid: hdr.Gid, + Gname: hdr.Gname, + AccessTime: hdr.AccessTime, + ChangeTime: hdr.ChangeTime, + } + } + } + + return +} + +func (c overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) { + base := filepath.Base(path) + dir := filepath.Dir(path) + + // if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay + if base == WhiteoutOpaqueDir { + err := unix.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0) + if err != nil { + return false, errors.Wrapf(err, "setxattr(%q, trusted.overlay.opaque=y)", dir) + } + // don't write the file itself + return false, err + } + + // if a file was deleted and we are using overlay, we need to create a character device + if strings.HasPrefix(base, WhiteoutPrefix) { + originalBase := base[len(WhiteoutPrefix):] + originalPath := filepath.Join(dir, originalBase) + + if err := unix.Mknod(originalPath, unix.S_IFCHR, 0); err != nil { + return false, errors.Wrapf(err, "failed to mknod(%q, S_IFCHR, 0)", originalPath) + } + if err := os.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil { + return false, err + } + + // don't write the file itself + return false, nil + } + + return true, nil +} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_other.go b/vendor/github.com/docker/docker/pkg/archive/archive_other.go new file mode 100644 index 00000000000..28ae2769c5a --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/archive_other.go @@ -0,0 +1,8 @@ +//go:build !linux +// +build !linux + +package archive // import "github.com/docker/docker/pkg/archive" + +func getWhiteoutConverter(format WhiteoutFormat, inUserNS bool) (tarWhiteoutConverter, error) { + return nil, nil +} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_unix.go b/vendor/github.com/docker/docker/pkg/archive/archive_unix.go new file mode 100644 index 00000000000..1eb0b74c355 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/archive_unix.go @@ -0,0 +1,116 @@ +//go:build !windows +// +build !windows + +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "archive/tar" + "errors" + "os" + "path/filepath" + "strings" + "syscall" + + "github.com/containerd/containerd/sys" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/system" + "golang.org/x/sys/unix" +) + +// fixVolumePathPrefix does platform specific processing to ensure that if +// the path being passed in is not in a volume path format, convert it to one. +func fixVolumePathPrefix(srcPath string) string { + return srcPath +} + +// getWalkRoot calculates the root path when performing a TarWithOptions. +// We use a separate function as this is platform specific. On Linux, we +// can't use filepath.Join(srcPath,include) because this will clean away +// a trailing "." or "/" which may be important. +func getWalkRoot(srcPath string, include string) string { + return strings.TrimSuffix(srcPath, string(filepath.Separator)) + string(filepath.Separator) + include +} + +// CanonicalTarNameForPath returns platform-specific filepath +// to canonical posix-style path for tar archival. p is relative +// path. +func CanonicalTarNameForPath(p string) string { + return p // already unix-style +} + +// chmodTarEntry is used to adjust the file permissions used in tar header based +// on the platform the archival is done. + +func chmodTarEntry(perm os.FileMode) os.FileMode { + return perm // noop for unix as golang APIs provide perm bits correctly +} + +func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) { + s, ok := stat.(*syscall.Stat_t) + + if ok { + // Currently go does not fill in the major/minors + if s.Mode&unix.S_IFBLK != 0 || + s.Mode&unix.S_IFCHR != 0 { + hdr.Devmajor = int64(unix.Major(uint64(s.Rdev))) // nolint: unconvert + hdr.Devminor = int64(unix.Minor(uint64(s.Rdev))) // nolint: unconvert + } + } + + return +} + +func getInodeFromStat(stat interface{}) (inode uint64, err error) { + s, ok := stat.(*syscall.Stat_t) + + if ok { + inode = s.Ino + } + + return +} + +func getFileUIDGID(stat interface{}) (idtools.Identity, error) { + s, ok := stat.(*syscall.Stat_t) + + if !ok { + return idtools.Identity{}, errors.New("cannot convert stat value to syscall.Stat_t") + } + return idtools.Identity{UID: int(s.Uid), GID: int(s.Gid)}, nil +} + +// handleTarTypeBlockCharFifo is an OS-specific helper function used by +// createTarFile to handle the following types of header: Block; Char; Fifo +func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { + mode := uint32(hdr.Mode & 07777) + switch hdr.Typeflag { + case tar.TypeBlock: + mode |= unix.S_IFBLK + case tar.TypeChar: + mode |= unix.S_IFCHR + case tar.TypeFifo: + mode |= unix.S_IFIFO + } + + err := system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))) + if errors.Is(err, syscall.EPERM) && sys.RunningInUserNS() { + // In most cases, cannot create a device if running in user namespace + err = nil + } + return err +} + +func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { + if hdr.Typeflag == tar.TypeLink { + if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { + if err := os.Chmod(path, hdrInfo.Mode()); err != nil { + return err + } + } + } else if hdr.Typeflag != tar.TypeSymlink { + if err := os.Chmod(path, hdrInfo.Mode()); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_windows.go b/vendor/github.com/docker/docker/pkg/archive/archive_windows.go new file mode 100644 index 00000000000..7260174bfbb --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/archive_windows.go @@ -0,0 +1,67 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "archive/tar" + "os" + "path/filepath" + + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/longpath" +) + +// fixVolumePathPrefix does platform specific processing to ensure that if +// the path being passed in is not in a volume path format, convert it to one. +func fixVolumePathPrefix(srcPath string) string { + return longpath.AddPrefix(srcPath) +} + +// getWalkRoot calculates the root path when performing a TarWithOptions. +// We use a separate function as this is platform specific. +func getWalkRoot(srcPath string, include string) string { + return filepath.Join(srcPath, include) +} + +// CanonicalTarNameForPath returns platform-specific filepath +// to canonical posix-style path for tar archival. p is relative +// path. +func CanonicalTarNameForPath(p string) string { + return filepath.ToSlash(p) +} + +// chmodTarEntry is used to adjust the file permissions used in tar header based +// on the platform the archival is done. +func chmodTarEntry(perm os.FileMode) os.FileMode { + // perm &= 0755 // this 0-ed out tar flags (like link, regular file, directory marker etc.) + permPart := perm & os.ModePerm + noPermPart := perm &^ os.ModePerm + // Add the x bit: make everything +x from windows + permPart |= 0111 + permPart &= 0755 + + return noPermPart | permPart +} + +func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) { + // do nothing. no notion of Rdev, Nlink in stat on Windows + return +} + +func getInodeFromStat(stat interface{}) (inode uint64, err error) { + // do nothing. no notion of Inode in stat on Windows + return +} + +// handleTarTypeBlockCharFifo is an OS-specific helper function used by +// createTarFile to handle the following types of header: Block; Char; Fifo +func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { + return nil +} + +func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { + return nil +} + +func getFileUIDGID(stat interface{}) (idtools.Identity, error) { + // no notion of file ownership mapping yet on Windows + return idtools.Identity{UID: 0, GID: 0}, nil +} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes.go b/vendor/github.com/docker/docker/pkg/archive/changes.go new file mode 100644 index 00000000000..aedb91b0352 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/changes.go @@ -0,0 +1,445 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "archive/tar" + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "sort" + "strings" + "syscall" + "time" + + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/system" + "github.com/sirupsen/logrus" +) + +// ChangeType represents the change type. +type ChangeType int + +const ( + // ChangeModify represents the modify operation. + ChangeModify = iota + // ChangeAdd represents the add operation. + ChangeAdd + // ChangeDelete represents the delete operation. + ChangeDelete +) + +func (c ChangeType) String() string { + switch c { + case ChangeModify: + return "C" + case ChangeAdd: + return "A" + case ChangeDelete: + return "D" + } + return "" +} + +// Change represents a change, it wraps the change type and path. +// It describes changes of the files in the path respect to the +// parent layers. The change could be modify, add, delete. +// This is used for layer diff. +type Change struct { + Path string + Kind ChangeType +} + +func (change *Change) String() string { + return fmt.Sprintf("%s %s", change.Kind, change.Path) +} + +// for sort.Sort +type changesByPath []Change + +func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path } +func (c changesByPath) Len() int { return len(c) } +func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] } + +// Gnu tar doesn't have sub-second mtime precision. The go tar +// writer (1.10+) does when using PAX format, but we round times to seconds +// to ensure archives have the same hashes for backwards compatibility. +// See https://github.com/moby/moby/pull/35739/commits/fb170206ba12752214630b269a40ac7be6115ed4. +// +// Non-sub-second is problematic when we apply changes via tar +// files. We handle this by comparing for exact times, *or* same +// second count and either a or b having exactly 0 nanoseconds +func sameFsTime(a, b time.Time) bool { + return a.Equal(b) || + (a.Unix() == b.Unix() && + (a.Nanosecond() == 0 || b.Nanosecond() == 0)) +} + +func sameFsTimeSpec(a, b syscall.Timespec) bool { + return a.Sec == b.Sec && + (a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0) +} + +// Changes walks the path rw and determines changes for the files in the path, +// with respect to the parent layers +func Changes(layers []string, rw string) ([]Change, error) { + return changes(layers, rw, aufsDeletedFile, aufsMetadataSkip) +} + +func aufsMetadataSkip(path string) (skip bool, err error) { + skip, err = filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path) + if err != nil { + skip = true + } + return +} + +func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) { + f := filepath.Base(path) + + // If there is a whiteout, then the file was removed + if strings.HasPrefix(f, WhiteoutPrefix) { + originalFile := f[len(WhiteoutPrefix):] + return filepath.Join(filepath.Dir(path), originalFile), nil + } + + return "", nil +} + +type skipChange func(string) (bool, error) +type deleteChange func(string, string, os.FileInfo) (string, error) + +func changes(layers []string, rw string, dc deleteChange, sc skipChange) ([]Change, error) { + var ( + changes []Change + changedDirs = make(map[string]struct{}) + ) + + err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + path, err = filepath.Rel(rw, path) + if err != nil { + return err + } + + // As this runs on the daemon side, file paths are OS specific. + path = filepath.Join(string(os.PathSeparator), path) + + // Skip root + if path == string(os.PathSeparator) { + return nil + } + + if sc != nil { + if skip, err := sc(path); skip { + return err + } + } + + change := Change{ + Path: path, + } + + deletedFile, err := dc(rw, path, f) + if err != nil { + return err + } + + // Find out what kind of modification happened + if deletedFile != "" { + change.Path = deletedFile + change.Kind = ChangeDelete + } else { + // Otherwise, the file was added + change.Kind = ChangeAdd + + // ...Unless it already existed in a top layer, in which case, it's a modification + for _, layer := range layers { + stat, err := os.Stat(filepath.Join(layer, path)) + if err != nil && !os.IsNotExist(err) { + return err + } + if err == nil { + // The file existed in the top layer, so that's a modification + + // However, if it's a directory, maybe it wasn't actually modified. + // If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar + if stat.IsDir() && f.IsDir() { + if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) { + // Both directories are the same, don't record the change + return nil + } + } + change.Kind = ChangeModify + break + } + } + } + + // If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files. + // This block is here to ensure the change is recorded even if the + // modify time, mode and size of the parent directory in the rw and ro layers are all equal. + // Check https://github.com/docker/docker/pull/13590 for details. + if f.IsDir() { + changedDirs[path] = struct{}{} + } + if change.Kind == ChangeAdd || change.Kind == ChangeDelete { + parent := filepath.Dir(path) + if _, ok := changedDirs[parent]; !ok && parent != "/" { + changes = append(changes, Change{Path: parent, Kind: ChangeModify}) + changedDirs[parent] = struct{}{} + } + } + + // Record change + changes = append(changes, change) + return nil + }) + if err != nil && !os.IsNotExist(err) { + return nil, err + } + return changes, nil +} + +// FileInfo describes the information of a file. +type FileInfo struct { + parent *FileInfo + name string + stat *system.StatT + children map[string]*FileInfo + capability []byte + added bool +} + +// LookUp looks up the file information of a file. +func (info *FileInfo) LookUp(path string) *FileInfo { + // As this runs on the daemon side, file paths are OS specific. + parent := info + if path == string(os.PathSeparator) { + return info + } + + pathElements := strings.Split(path, string(os.PathSeparator)) + for _, elem := range pathElements { + if elem != "" { + child := parent.children[elem] + if child == nil { + return nil + } + parent = child + } + } + return parent +} + +func (info *FileInfo) path() string { + if info.parent == nil { + // As this runs on the daemon side, file paths are OS specific. + return string(os.PathSeparator) + } + return filepath.Join(info.parent.path(), info.name) +} + +func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { + + sizeAtEntry := len(*changes) + + if oldInfo == nil { + // add + change := Change{ + Path: info.path(), + Kind: ChangeAdd, + } + *changes = append(*changes, change) + info.added = true + } + + // We make a copy so we can modify it to detect additions + // also, we only recurse on the old dir if the new info is a directory + // otherwise any previous delete/change is considered recursive + oldChildren := make(map[string]*FileInfo) + if oldInfo != nil && info.isDir() { + for k, v := range oldInfo.children { + oldChildren[k] = v + } + } + + for name, newChild := range info.children { + oldChild := oldChildren[name] + if oldChild != nil { + // change? + oldStat := oldChild.stat + newStat := newChild.stat + // Note: We can't compare inode or ctime or blocksize here, because these change + // when copying a file into a container. However, that is not generally a problem + // because any content change will change mtime, and any status change should + // be visible when actually comparing the stat fields. The only time this + // breaks down is if some code intentionally hides a change by setting + // back mtime + if statDifferent(oldStat, newStat) || + !bytes.Equal(oldChild.capability, newChild.capability) { + change := Change{ + Path: newChild.path(), + Kind: ChangeModify, + } + *changes = append(*changes, change) + newChild.added = true + } + + // Remove from copy so we can detect deletions + delete(oldChildren, name) + } + + newChild.addChanges(oldChild, changes) + } + for _, oldChild := range oldChildren { + // delete + change := Change{ + Path: oldChild.path(), + Kind: ChangeDelete, + } + *changes = append(*changes, change) + } + + // If there were changes inside this directory, we need to add it, even if the directory + // itself wasn't changed. This is needed to properly save and restore filesystem permissions. + // As this runs on the daemon side, file paths are OS specific. + if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) { + change := Change{ + Path: info.path(), + Kind: ChangeModify, + } + // Let's insert the directory entry before the recently added entries located inside this dir + *changes = append(*changes, change) // just to resize the slice, will be overwritten + copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:]) + (*changes)[sizeAtEntry] = change + } + +} + +// Changes add changes to file information. +func (info *FileInfo) Changes(oldInfo *FileInfo) []Change { + var changes []Change + + info.addChanges(oldInfo, &changes) + + return changes +} + +func newRootFileInfo() *FileInfo { + // As this runs on the daemon side, file paths are OS specific. + root := &FileInfo{ + name: string(os.PathSeparator), + children: make(map[string]*FileInfo), + } + return root +} + +// ChangesDirs compares two directories and generates an array of Change objects describing the changes. +// If oldDir is "", then all files in newDir will be Add-Changes. +func ChangesDirs(newDir, oldDir string) ([]Change, error) { + var ( + oldRoot, newRoot *FileInfo + ) + if oldDir == "" { + emptyDir, err := ioutil.TempDir("", "empty") + if err != nil { + return nil, err + } + defer os.Remove(emptyDir) + oldDir = emptyDir + } + oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir) + if err != nil { + return nil, err + } + + return newRoot.Changes(oldRoot), nil +} + +// ChangesSize calculates the size in bytes of the provided changes, based on newDir. +func ChangesSize(newDir string, changes []Change) int64 { + var ( + size int64 + sf = make(map[uint64]struct{}) + ) + for _, change := range changes { + if change.Kind == ChangeModify || change.Kind == ChangeAdd { + file := filepath.Join(newDir, change.Path) + fileInfo, err := os.Lstat(file) + if err != nil { + logrus.Errorf("Can not stat %q: %s", file, err) + continue + } + + if fileInfo != nil && !fileInfo.IsDir() { + if hasHardlinks(fileInfo) { + inode := getIno(fileInfo) + if _, ok := sf[inode]; !ok { + size += fileInfo.Size() + sf[inode] = struct{}{} + } + } else { + size += fileInfo.Size() + } + } + } + } + return size +} + +// ExportChanges produces an Archive from the provided changes, relative to dir. +func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (io.ReadCloser, error) { + reader, writer := io.Pipe() + go func() { + ta := newTarAppender(idtools.NewIDMappingsFromMaps(uidMaps, gidMaps), writer, nil) + + // this buffer is needed for the duration of this piped stream + defer pools.BufioWriter32KPool.Put(ta.Buffer) + + sort.Sort(changesByPath(changes)) + + // In general we log errors here but ignore them because + // during e.g. a diff operation the container can continue + // mutating the filesystem and we can see transient errors + // from this + for _, change := range changes { + if change.Kind == ChangeDelete { + whiteOutDir := filepath.Dir(change.Path) + whiteOutBase := filepath.Base(change.Path) + whiteOut := filepath.Join(whiteOutDir, WhiteoutPrefix+whiteOutBase) + timestamp := time.Now() + hdr := &tar.Header{ + Name: whiteOut[1:], + Size: 0, + ModTime: timestamp, + AccessTime: timestamp, + ChangeTime: timestamp, + } + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + logrus.Debugf("Can't write whiteout header: %s", err) + } + } else { + path := filepath.Join(dir, change.Path) + if err := ta.addTarFile(path, change.Path[1:]); err != nil { + logrus.Debugf("Can't add file %s to tar: %s", path, err) + } + } + } + + // Make sure to check the error on Close. + if err := ta.TarWriter.Close(); err != nil { + logrus.Debugf("Can't close layer: %s", err) + } + if err := writer.Close(); err != nil { + logrus.Debugf("failed close Changes writer: %s", err) + } + }() + return reader, nil +} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_linux.go b/vendor/github.com/docker/docker/pkg/archive/changes_linux.go new file mode 100644 index 00000000000..f8792b3d4e5 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/changes_linux.go @@ -0,0 +1,286 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + "sort" + "syscall" + "unsafe" + + "github.com/docker/docker/pkg/system" + "golang.org/x/sys/unix" +) + +// walker is used to implement collectFileInfoForChanges on linux. Where this +// method in general returns the entire contents of two directory trees, we +// optimize some FS calls out on linux. In particular, we take advantage of the +// fact that getdents(2) returns the inode of each file in the directory being +// walked, which, when walking two trees in parallel to generate a list of +// changes, can be used to prune subtrees without ever having to lstat(2) them +// directly. Eliminating stat calls in this way can save up to seconds on large +// images. +type walker struct { + dir1 string + dir2 string + root1 *FileInfo + root2 *FileInfo +} + +// collectFileInfoForChanges returns a complete representation of the trees +// rooted at dir1 and dir2, with one important exception: any subtree or +// leaf where the inode and device numbers are an exact match between dir1 +// and dir2 will be pruned from the results. This method is *only* to be used +// to generating a list of changes between the two directories, as it does not +// reflect the full contents. +func collectFileInfoForChanges(dir1, dir2 string) (*FileInfo, *FileInfo, error) { + w := &walker{ + dir1: dir1, + dir2: dir2, + root1: newRootFileInfo(), + root2: newRootFileInfo(), + } + + i1, err := os.Lstat(w.dir1) + if err != nil { + return nil, nil, err + } + i2, err := os.Lstat(w.dir2) + if err != nil { + return nil, nil, err + } + + if err := w.walk("/", i1, i2); err != nil { + return nil, nil, err + } + + return w.root1, w.root2, nil +} + +// Given a FileInfo, its path info, and a reference to the root of the tree +// being constructed, register this file with the tree. +func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error { + if fi == nil { + return nil + } + parent := root.LookUp(filepath.Dir(path)) + if parent == nil { + return fmt.Errorf("walkchunk: Unexpectedly no parent for %s", path) + } + info := &FileInfo{ + name: filepath.Base(path), + children: make(map[string]*FileInfo), + parent: parent, + } + cpath := filepath.Join(dir, path) + stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t)) + if err != nil { + return err + } + info.stat = stat + info.capability, _ = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access + parent.children[info.name] = info + return nil +} + +// Walk a subtree rooted at the same path in both trees being iterated. For +// example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d +func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) { + // Register these nodes with the return trees, unless we're still at the + // (already-created) roots: + if path != "/" { + if err := walkchunk(path, i1, w.dir1, w.root1); err != nil { + return err + } + if err := walkchunk(path, i2, w.dir2, w.root2); err != nil { + return err + } + } + + is1Dir := i1 != nil && i1.IsDir() + is2Dir := i2 != nil && i2.IsDir() + + sameDevice := false + if i1 != nil && i2 != nil { + si1 := i1.Sys().(*syscall.Stat_t) + si2 := i2.Sys().(*syscall.Stat_t) + if si1.Dev == si2.Dev { + sameDevice = true + } + } + + // If these files are both non-existent, or leaves (non-dirs), we are done. + if !is1Dir && !is2Dir { + return nil + } + + // Fetch the names of all the files contained in both directories being walked: + var names1, names2 []nameIno + if is1Dir { + names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access + if err != nil { + return err + } + } + if is2Dir { + names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access + if err != nil { + return err + } + } + + // We have lists of the files contained in both parallel directories, sorted + // in the same order. Walk them in parallel, generating a unique merged list + // of all items present in either or both directories. + var names []string + ix1 := 0 + ix2 := 0 + + for { + if ix1 >= len(names1) { + break + } + if ix2 >= len(names2) { + break + } + + ni1 := names1[ix1] + ni2 := names2[ix2] + + switch bytes.Compare([]byte(ni1.name), []byte(ni2.name)) { + case -1: // ni1 < ni2 -- advance ni1 + // we will not encounter ni1 in names2 + names = append(names, ni1.name) + ix1++ + case 0: // ni1 == ni2 + if ni1.ino != ni2.ino || !sameDevice { + names = append(names, ni1.name) + } + ix1++ + ix2++ + case 1: // ni1 > ni2 -- advance ni2 + // we will not encounter ni2 in names1 + names = append(names, ni2.name) + ix2++ + } + } + for ix1 < len(names1) { + names = append(names, names1[ix1].name) + ix1++ + } + for ix2 < len(names2) { + names = append(names, names2[ix2].name) + ix2++ + } + + // For each of the names present in either or both of the directories being + // iterated, stat the name under each root, and recurse the pair of them: + for _, name := range names { + fname := filepath.Join(path, name) + var cInfo1, cInfo2 os.FileInfo + if is1Dir { + cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access + if err != nil && !os.IsNotExist(err) { + return err + } + } + if is2Dir { + cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access + if err != nil && !os.IsNotExist(err) { + return err + } + } + if err = w.walk(fname, cInfo1, cInfo2); err != nil { + return err + } + } + return nil +} + +// {name,inode} pairs used to support the early-pruning logic of the walker type +type nameIno struct { + name string + ino uint64 +} + +type nameInoSlice []nameIno + +func (s nameInoSlice) Len() int { return len(s) } +func (s nameInoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name } + +// readdirnames is a hacked-apart version of the Go stdlib code, exposing inode +// numbers further up the stack when reading directory contents. Unlike +// os.Readdirnames, which returns a list of filenames, this function returns a +// list of {filename,inode} pairs. +func readdirnames(dirname string) (names []nameIno, err error) { + var ( + size = 100 + buf = make([]byte, 4096) + nbuf int + bufp int + nb int + ) + + f, err := os.Open(dirname) + if err != nil { + return nil, err + } + defer f.Close() + + names = make([]nameIno, 0, size) // Empty with room to grow. + for { + // Refill the buffer if necessary + if bufp >= nbuf { + bufp = 0 + nbuf, err = unix.ReadDirent(int(f.Fd()), buf) // getdents on linux + if nbuf < 0 { + nbuf = 0 + } + if err != nil { + return nil, os.NewSyscallError("readdirent", err) + } + if nbuf <= 0 { + break // EOF + } + } + + // Drain the buffer + nb, names = parseDirent(buf[bufp:nbuf], names) + bufp += nb + } + + sl := nameInoSlice(names) + sort.Sort(sl) + return sl, nil +} + +// parseDirent is a minor modification of unix.ParseDirent (linux version) +// which returns {name,inode} pairs instead of just names. +func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) { + origlen := len(buf) + for len(buf) > 0 { + dirent := (*unix.Dirent)(unsafe.Pointer(&buf[0])) + buf = buf[dirent.Reclen:] + if dirent.Ino == 0 { // File absent in directory. + continue + } + bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0])) + var name = string(bytes[0:clen(bytes[:])]) + if name == "." || name == ".." { // Useless names + continue + } + names = append(names, nameIno{name, dirent.Ino}) + } + return origlen - len(buf), names +} + +func clen(n []byte) int { + for i := 0; i < len(n); i++ { + if n[i] == 0 { + return i + } + } + return len(n) +} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_other.go b/vendor/github.com/docker/docker/pkg/archive/changes_other.go new file mode 100644 index 00000000000..0e4399a43b2 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/changes_other.go @@ -0,0 +1,98 @@ +//go:build !linux +// +build !linux + +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "fmt" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/docker/docker/pkg/system" +) + +func collectFileInfoForChanges(oldDir, newDir string) (*FileInfo, *FileInfo, error) { + var ( + oldRoot, newRoot *FileInfo + err1, err2 error + errs = make(chan error, 2) + ) + go func() { + oldRoot, err1 = collectFileInfo(oldDir) + errs <- err1 + }() + go func() { + newRoot, err2 = collectFileInfo(newDir) + errs <- err2 + }() + + // block until both routines have returned + for i := 0; i < 2; i++ { + if err := <-errs; err != nil { + return nil, nil, err + } + } + + return oldRoot, newRoot, nil +} + +func collectFileInfo(sourceDir string) (*FileInfo, error) { + root := newRootFileInfo() + + err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + relPath, err := filepath.Rel(sourceDir, path) + if err != nil { + return err + } + + // As this runs on the daemon side, file paths are OS specific. + relPath = filepath.Join(string(os.PathSeparator), relPath) + + // See https://github.com/golang/go/issues/9168 - bug in filepath.Join. + // Temporary workaround. If the returned path starts with two backslashes, + // trim it down to a single backslash. Only relevant on Windows. + if runtime.GOOS == "windows" { + if strings.HasPrefix(relPath, `\\`) { + relPath = relPath[1:] + } + } + + if relPath == string(os.PathSeparator) { + return nil + } + + parent := root.LookUp(filepath.Dir(relPath)) + if parent == nil { + return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath) + } + + info := &FileInfo{ + name: filepath.Base(relPath), + children: make(map[string]*FileInfo), + parent: parent, + } + + s, err := system.Lstat(path) + if err != nil { + return err + } + info.stat = s + + info.capability, _ = system.Lgetxattr(path, "security.capability") + + parent.children[info.name] = info + + return nil + }) + if err != nil { + return nil, err + } + return root, nil +} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_unix.go b/vendor/github.com/docker/docker/pkg/archive/changes_unix.go new file mode 100644 index 00000000000..54aace970ed --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/changes_unix.go @@ -0,0 +1,44 @@ +//go:build !windows +// +build !windows + +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "os" + "syscall" + + "github.com/docker/docker/pkg/system" + "golang.org/x/sys/unix" +) + +func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { + // Don't look at size for dirs, its not a good measure of change + if oldStat.Mode() != newStat.Mode() || + oldStat.UID() != newStat.UID() || + oldStat.GID() != newStat.GID() || + oldStat.Rdev() != newStat.Rdev() || + // Don't look at size or modification time for dirs, its not a good + // measure of change. See https://github.com/moby/moby/issues/9874 + // for a description of the issue with modification time, and + // https://github.com/moby/moby/pull/11422 for the change. + // (Note that in the Windows implementation of this function, + // modification time IS taken as a change). See + // https://github.com/moby/moby/pull/37982 for more information. + (oldStat.Mode()&unix.S_IFDIR != unix.S_IFDIR && + (!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) { + return true + } + return false +} + +func (info *FileInfo) isDir() bool { + return info.parent == nil || info.stat.Mode()&unix.S_IFDIR != 0 +} + +func getIno(fi os.FileInfo) uint64 { + return fi.Sys().(*syscall.Stat_t).Ino +} + +func hasHardlinks(fi os.FileInfo) bool { + return fi.Sys().(*syscall.Stat_t).Nlink > 1 +} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_windows.go b/vendor/github.com/docker/docker/pkg/archive/changes_windows.go new file mode 100644 index 00000000000..9906685e4b0 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/changes_windows.go @@ -0,0 +1,34 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "os" + + "github.com/docker/docker/pkg/system" +) + +func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { + // Note there is slight difference between the Linux and Windows + // implementations here. Due to https://github.com/moby/moby/issues/9874, + // and the fix at https://github.com/moby/moby/pull/11422, Linux does not + // consider a change to the directory time as a change. Windows on NTFS + // does. See https://github.com/moby/moby/pull/37982 for more information. + + if !sameFsTime(oldStat.Mtim(), newStat.Mtim()) || + oldStat.Mode() != newStat.Mode() || + oldStat.Size() != newStat.Size() && !oldStat.Mode().IsDir() { + return true + } + return false +} + +func (info *FileInfo) isDir() bool { + return info.parent == nil || info.stat.Mode().IsDir() +} + +func getIno(fi os.FileInfo) (inode uint64) { + return +} + +func hasHardlinks(fi os.FileInfo) bool { + return false +} diff --git a/vendor/github.com/docker/docker/pkg/archive/copy.go b/vendor/github.com/docker/docker/pkg/archive/copy.go new file mode 100644 index 00000000000..57fddac078d --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/copy.go @@ -0,0 +1,480 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "archive/tar" + "errors" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/system" + "github.com/sirupsen/logrus" +) + +// Errors used or returned by this file. +var ( + ErrNotDirectory = errors.New("not a directory") + ErrDirNotExists = errors.New("no such directory") + ErrCannotCopyDir = errors.New("cannot copy directory") + ErrInvalidCopySource = errors.New("invalid copy source content") +) + +// PreserveTrailingDotOrSeparator returns the given cleaned path (after +// processing using any utility functions from the path or filepath stdlib +// packages) and appends a trailing `/.` or `/` if its corresponding original +// path (from before being processed by utility functions from the path or +// filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned +// path already ends in a `.` path segment, then another is not added. If the +// clean path already ends in the separator, then another is not added. +func PreserveTrailingDotOrSeparator(cleanedPath string, originalPath string, sep byte) string { + // Ensure paths are in platform semantics + cleanedPath = strings.Replace(cleanedPath, "/", string(sep), -1) + originalPath = strings.Replace(originalPath, "/", string(sep), -1) + + if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) { + if !hasTrailingPathSeparator(cleanedPath, sep) { + // Add a separator if it doesn't already end with one (a cleaned + // path would only end in a separator if it is the root). + cleanedPath += string(sep) + } + cleanedPath += "." + } + + if !hasTrailingPathSeparator(cleanedPath, sep) && hasTrailingPathSeparator(originalPath, sep) { + cleanedPath += string(sep) + } + + return cleanedPath +} + +// assertsDirectory returns whether the given path is +// asserted to be a directory, i.e., the path ends with +// a trailing '/' or `/.`, assuming a path separator of `/`. +func assertsDirectory(path string, sep byte) bool { + return hasTrailingPathSeparator(path, sep) || specifiesCurrentDir(path) +} + +// hasTrailingPathSeparator returns whether the given +// path ends with the system's path separator character. +func hasTrailingPathSeparator(path string, sep byte) bool { + return len(path) > 0 && path[len(path)-1] == sep +} + +// specifiesCurrentDir returns whether the given path specifies +// a "current directory", i.e., the last path segment is `.`. +func specifiesCurrentDir(path string) bool { + return filepath.Base(path) == "." +} + +// SplitPathDirEntry splits the given path between its directory name and its +// basename by first cleaning the path but preserves a trailing "." if the +// original path specified the current directory. +func SplitPathDirEntry(path string) (dir, base string) { + cleanedPath := filepath.Clean(filepath.FromSlash(path)) + + if specifiesCurrentDir(path) { + cleanedPath += string(os.PathSeparator) + "." + } + + return filepath.Dir(cleanedPath), filepath.Base(cleanedPath) +} + +// TarResource archives the resource described by the given CopyInfo to a Tar +// archive. A non-nil error is returned if sourcePath does not exist or is +// asserted to be a directory but exists as another type of file. +// +// This function acts as a convenient wrapper around TarWithOptions, which +// requires a directory as the source path. TarResource accepts either a +// directory or a file path and correctly sets the Tar options. +func TarResource(sourceInfo CopyInfo) (content io.ReadCloser, err error) { + return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName) +} + +// TarResourceRebase is like TarResource but renames the first path element of +// items in the resulting tar archive to match the given rebaseName if not "". +func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, err error) { + sourcePath = normalizePath(sourcePath) + if _, err = os.Lstat(sourcePath); err != nil { + // Catches the case where the source does not exist or is not a + // directory if asserted to be a directory, as this also causes an + // error. + return + } + + // Separate the source path between its directory and + // the entry in that directory which we are archiving. + sourceDir, sourceBase := SplitPathDirEntry(sourcePath) + opts := TarResourceRebaseOpts(sourceBase, rebaseName) + + logrus.Debugf("copying %q from %q", sourceBase, sourceDir) + return TarWithOptions(sourceDir, opts) +} + +// TarResourceRebaseOpts does not preform the Tar, but instead just creates the rebase +// parameters to be sent to TarWithOptions (the TarOptions struct) +func TarResourceRebaseOpts(sourceBase string, rebaseName string) *TarOptions { + filter := []string{sourceBase} + return &TarOptions{ + Compression: Uncompressed, + IncludeFiles: filter, + IncludeSourceDir: true, + RebaseNames: map[string]string{ + sourceBase: rebaseName, + }, + } +} + +// CopyInfo holds basic info about the source +// or destination path of a copy operation. +type CopyInfo struct { + Path string + Exists bool + IsDir bool + RebaseName string +} + +// CopyInfoSourcePath stats the given path to create a CopyInfo +// struct representing that resource for the source of an archive copy +// operation. The given path should be an absolute local path. A source path +// has all symlinks evaluated that appear before the last path separator ("/" +// on Unix). As it is to be a copy source, the path must exist. +func CopyInfoSourcePath(path string, followLink bool) (CopyInfo, error) { + // normalize the file path and then evaluate the symbol link + // we will use the target file instead of the symbol link if + // followLink is set + path = normalizePath(path) + + resolvedPath, rebaseName, err := ResolveHostSourcePath(path, followLink) + if err != nil { + return CopyInfo{}, err + } + + stat, err := os.Lstat(resolvedPath) + if err != nil { + return CopyInfo{}, err + } + + return CopyInfo{ + Path: resolvedPath, + Exists: true, + IsDir: stat.IsDir(), + RebaseName: rebaseName, + }, nil +} + +// CopyInfoDestinationPath stats the given path to create a CopyInfo +// struct representing that resource for the destination of an archive copy +// operation. The given path should be an absolute local path. +func CopyInfoDestinationPath(path string) (info CopyInfo, err error) { + maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot. + path = normalizePath(path) + originalPath := path + + stat, err := os.Lstat(path) + + if err == nil && stat.Mode()&os.ModeSymlink == 0 { + // The path exists and is not a symlink. + return CopyInfo{ + Path: path, + Exists: true, + IsDir: stat.IsDir(), + }, nil + } + + // While the path is a symlink. + for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ { + if n > maxSymlinkIter { + // Don't follow symlinks more than this arbitrary number of times. + return CopyInfo{}, errors.New("too many symlinks in " + originalPath) + } + + // The path is a symbolic link. We need to evaluate it so that the + // destination of the copy operation is the link target and not the + // link itself. This is notably different than CopyInfoSourcePath which + // only evaluates symlinks before the last appearing path separator. + // Also note that it is okay if the last path element is a broken + // symlink as the copy operation should create the target. + var linkTarget string + + linkTarget, err = os.Readlink(path) + if err != nil { + return CopyInfo{}, err + } + + if !system.IsAbs(linkTarget) { + // Join with the parent directory. + dstParent, _ := SplitPathDirEntry(path) + linkTarget = filepath.Join(dstParent, linkTarget) + } + + path = linkTarget + stat, err = os.Lstat(path) + } + + if err != nil { + // It's okay if the destination path doesn't exist. We can still + // continue the copy operation if the parent directory exists. + if !os.IsNotExist(err) { + return CopyInfo{}, err + } + + // Ensure destination parent dir exists. + dstParent, _ := SplitPathDirEntry(path) + + parentDirStat, err := os.Stat(dstParent) + if err != nil { + return CopyInfo{}, err + } + if !parentDirStat.IsDir() { + return CopyInfo{}, ErrNotDirectory + } + + return CopyInfo{Path: path}, nil + } + + // The path exists after resolving symlinks. + return CopyInfo{ + Path: path, + Exists: true, + IsDir: stat.IsDir(), + }, nil +} + +// PrepareArchiveCopy prepares the given srcContent archive, which should +// contain the archived resource described by srcInfo, to the destination +// described by dstInfo. Returns the possibly modified content archive along +// with the path to the destination directory which it should be extracted to. +func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content io.ReadCloser, err error) { + // Ensure in platform semantics + srcInfo.Path = normalizePath(srcInfo.Path) + dstInfo.Path = normalizePath(dstInfo.Path) + + // Separate the destination path between its directory and base + // components in case the source archive contents need to be rebased. + dstDir, dstBase := SplitPathDirEntry(dstInfo.Path) + _, srcBase := SplitPathDirEntry(srcInfo.Path) + + switch { + case dstInfo.Exists && dstInfo.IsDir: + // The destination exists as a directory. No alteration + // to srcContent is needed as its contents can be + // simply extracted to the destination directory. + return dstInfo.Path, ioutil.NopCloser(srcContent), nil + case dstInfo.Exists && srcInfo.IsDir: + // The destination exists as some type of file and the source + // content is a directory. This is an error condition since + // you cannot copy a directory to an existing file location. + return "", nil, ErrCannotCopyDir + case dstInfo.Exists: + // The destination exists as some type of file and the source content + // is also a file. The source content entry will have to be renamed to + // have a basename which matches the destination path's basename. + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil + case srcInfo.IsDir: + // The destination does not exist and the source content is an archive + // of a directory. The archive should be extracted to the parent of + // the destination path instead, and when it is, the directory that is + // created as a result should take the name of the destination path. + // The source content entries will have to be renamed to have a + // basename which matches the destination path's basename. + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil + case assertsDirectory(dstInfo.Path, os.PathSeparator): + // The destination does not exist and is asserted to be created as a + // directory, but the source content is not a directory. This is an + // error condition since you cannot create a directory from a file + // source. + return "", nil, ErrDirNotExists + default: + // The last remaining case is when the destination does not exist, is + // not asserted to be a directory, and the source content is not an + // archive of a directory. It this case, the destination file will need + // to be created when the archive is extracted and the source content + // entry will have to be renamed to have a basename which matches the + // destination path's basename. + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil + } + +} + +// RebaseArchiveEntries rewrites the given srcContent archive replacing +// an occurrence of oldBase with newBase at the beginning of entry names. +func RebaseArchiveEntries(srcContent io.Reader, oldBase, newBase string) io.ReadCloser { + if oldBase == string(os.PathSeparator) { + // If oldBase specifies the root directory, use an empty string as + // oldBase instead so that newBase doesn't replace the path separator + // that all paths will start with. + oldBase = "" + } + + rebased, w := io.Pipe() + + go func() { + srcTar := tar.NewReader(srcContent) + rebasedTar := tar.NewWriter(w) + + for { + hdr, err := srcTar.Next() + if err == io.EOF { + // Signals end of archive. + rebasedTar.Close() + w.Close() + return + } + if err != nil { + w.CloseWithError(err) + return + } + + // srcContent tar stream, as served by TarWithOptions(), is + // definitely in PAX format, but tar.Next() mistakenly guesses it + // as USTAR, which creates a problem: if the newBase is >100 + // characters long, WriteHeader() returns an error like + // "archive/tar: cannot encode header: Format specifies USTAR; and USTAR cannot encode Name=...". + // + // To fix, set the format to PAX here. See docker/for-linux issue #484. + hdr.Format = tar.FormatPAX + hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1) + if hdr.Typeflag == tar.TypeLink { + hdr.Linkname = strings.Replace(hdr.Linkname, oldBase, newBase, 1) + } + + if err = rebasedTar.WriteHeader(hdr); err != nil { + w.CloseWithError(err) + return + } + + if _, err = io.Copy(rebasedTar, srcTar); err != nil { + w.CloseWithError(err) + return + } + } + }() + + return rebased +} + +// TODO @gupta-ak. These might have to be changed in the future to be +// continuity driver aware as well to support LCOW. + +// CopyResource performs an archive copy from the given source path to the +// given destination path. The source path MUST exist and the destination +// path's parent directory must exist. +func CopyResource(srcPath, dstPath string, followLink bool) error { + var ( + srcInfo CopyInfo + err error + ) + + // Ensure in platform semantics + srcPath = normalizePath(srcPath) + dstPath = normalizePath(dstPath) + + // Clean the source and destination paths. + srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath, os.PathSeparator) + dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath, os.PathSeparator) + + if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil { + return err + } + + content, err := TarResource(srcInfo) + if err != nil { + return err + } + defer content.Close() + + return CopyTo(content, srcInfo, dstPath) +} + +// CopyTo handles extracting the given content whose +// entries should be sourced from srcInfo to dstPath. +func CopyTo(content io.Reader, srcInfo CopyInfo, dstPath string) error { + // The destination path need not exist, but CopyInfoDestinationPath will + // ensure that at least the parent directory exists. + dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath)) + if err != nil { + return err + } + + dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo) + if err != nil { + return err + } + defer copyArchive.Close() + + options := &TarOptions{ + NoLchown: true, + NoOverwriteDirNonDir: true, + } + + return Untar(copyArchive, dstDir, options) +} + +// ResolveHostSourcePath decides real path need to be copied with parameters such as +// whether to follow symbol link or not, if followLink is true, resolvedPath will return +// link target of any symbol link file, else it will only resolve symlink of directory +// but return symbol link file itself without resolving. +func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, err error) { + if followLink { + resolvedPath, err = filepath.EvalSymlinks(path) + if err != nil { + return + } + + resolvedPath, rebaseName = GetRebaseName(path, resolvedPath) + } else { + dirPath, basePath := filepath.Split(path) + + // if not follow symbol link, then resolve symbol link of parent dir + var resolvedDirPath string + resolvedDirPath, err = filepath.EvalSymlinks(dirPath) + if err != nil { + return + } + // resolvedDirPath will have been cleaned (no trailing path separators) so + // we can manually join it with the base path element. + resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath + if hasTrailingPathSeparator(path, os.PathSeparator) && + filepath.Base(path) != filepath.Base(resolvedPath) { + rebaseName = filepath.Base(path) + } + } + return resolvedPath, rebaseName, nil +} + +// GetRebaseName normalizes and compares path and resolvedPath, +// return completed resolved path and rebased file name +func GetRebaseName(path, resolvedPath string) (string, string) { + // linkTarget will have been cleaned (no trailing path separators and dot) so + // we can manually join it with them + var rebaseName string + if specifiesCurrentDir(path) && + !specifiesCurrentDir(resolvedPath) { + resolvedPath += string(filepath.Separator) + "." + } + + if hasTrailingPathSeparator(path, os.PathSeparator) && + !hasTrailingPathSeparator(resolvedPath, os.PathSeparator) { + resolvedPath += string(filepath.Separator) + } + + if filepath.Base(path) != filepath.Base(resolvedPath) { + // In the case where the path had a trailing separator and a symlink + // evaluation has changed the last path component, we will need to + // rebase the name in the archive that is being copied to match the + // originally requested name. + rebaseName = filepath.Base(path) + } + return resolvedPath, rebaseName +} diff --git a/vendor/github.com/docker/docker/pkg/archive/copy_unix.go b/vendor/github.com/docker/docker/pkg/archive/copy_unix.go new file mode 100644 index 00000000000..2ac7729f4cf --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/copy_unix.go @@ -0,0 +1,12 @@ +//go:build !windows +// +build !windows + +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "path/filepath" +) + +func normalizePath(path string) string { + return filepath.ToSlash(path) +} diff --git a/vendor/github.com/docker/docker/pkg/archive/copy_windows.go b/vendor/github.com/docker/docker/pkg/archive/copy_windows.go new file mode 100644 index 00000000000..a878d1bac42 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/copy_windows.go @@ -0,0 +1,9 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "path/filepath" +) + +func normalizePath(path string) string { + return filepath.FromSlash(path) +} diff --git a/vendor/github.com/docker/docker/pkg/archive/diff.go b/vendor/github.com/docker/docker/pkg/archive/diff.go new file mode 100644 index 00000000000..27897e6ab73 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/diff.go @@ -0,0 +1,260 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "archive/tar" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/system" + "github.com/sirupsen/logrus" +) + +// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be +// compressed or uncompressed. +// Returns the size in bytes of the contents of the layer. +func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, err error) { + tr := tar.NewReader(layer) + trBuf := pools.BufioReader32KPool.Get(tr) + defer pools.BufioReader32KPool.Put(trBuf) + + var dirs []*tar.Header + unpackedPaths := make(map[string]struct{}) + + if options == nil { + options = &TarOptions{} + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + idMapping := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) + + aufsTempdir := "" + aufsHardlinks := make(map[string]*tar.Header) + + // Iterate through the files in the archive. + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + return 0, err + } + + size += hdr.Size + + // Normalize name, for safety and for a simple is-root check + hdr.Name = filepath.Clean(hdr.Name) + + // Windows does not support filenames with colons in them. Ignore + // these files. This is not a problem though (although it might + // appear that it is). Let's suppose a client is running docker pull. + // The daemon it points to is Windows. Would it make sense for the + // client to be doing a docker pull Ubuntu for example (which has files + // with colons in the name under /usr/share/man/man3)? No, absolutely + // not as it would really only make sense that they were pulling a + // Windows image. However, for development, it is necessary to be able + // to pull Linux images which are in the repository. + // + // TODO Windows. Once the registry is aware of what images are Windows- + // specific or Linux-specific, this warning should be changed to an error + // to cater for the situation where someone does manage to upload a Linux + // image but have it tagged as Windows inadvertently. + if runtime.GOOS == "windows" { + if strings.Contains(hdr.Name, ":") { + logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name) + continue + } + } + + // Note as these operations are platform specific, so must the slash be. + if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { + // Not the root directory, ensure that the parent directory exists. + // This happened in some tests where an image had a tarfile without any + // parent directories. + parent := filepath.Dir(hdr.Name) + parentPath := filepath.Join(dest, parent) + + if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { + err = system.MkdirAll(parentPath, 0600) + if err != nil { + return 0, err + } + } + } + + // Skip AUFS metadata dirs + if strings.HasPrefix(hdr.Name, WhiteoutMetaPrefix) { + // Regular files inside /.wh..wh.plnk can be used as hardlink targets + // We don't want this directory, but we need the files in them so that + // such hardlinks can be resolved. + if strings.HasPrefix(hdr.Name, WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg { + basename := filepath.Base(hdr.Name) + aufsHardlinks[basename] = hdr + if aufsTempdir == "" { + if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil { + return 0, err + } + defer os.RemoveAll(aufsTempdir) + } + if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil, options.InUserNS); err != nil { + return 0, err + } + } + + if hdr.Name != WhiteoutOpaqueDir { + continue + } + } + path := filepath.Join(dest, hdr.Name) + rel, err := filepath.Rel(dest, path) + if err != nil { + return 0, err + } + + // Note as these operations are platform specific, so must the slash be. + if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { + return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) + } + base := filepath.Base(path) + + if strings.HasPrefix(base, WhiteoutPrefix) { + dir := filepath.Dir(path) + if base == WhiteoutOpaqueDir { + _, err := os.Lstat(dir) + if err != nil { + return 0, err + } + err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { + if err != nil { + if os.IsNotExist(err) { + err = nil // parent was deleted + } + return err + } + if path == dir { + return nil + } + if _, exists := unpackedPaths[path]; !exists { + err := os.RemoveAll(path) + return err + } + return nil + }) + if err != nil { + return 0, err + } + } else { + originalBase := base[len(WhiteoutPrefix):] + originalPath := filepath.Join(dir, originalBase) + if err := os.RemoveAll(originalPath); err != nil { + return 0, err + } + } + } else { + // If path exits we almost always just want to remove and replace it. + // The only exception is when it is a directory *and* the file from + // the layer is also a directory. Then we want to merge them (i.e. + // just apply the metadata from the layer). + if fi, err := os.Lstat(path); err == nil { + if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { + if err := os.RemoveAll(path); err != nil { + return 0, err + } + } + } + + trBuf.Reset(tr) + srcData := io.Reader(trBuf) + srcHdr := hdr + + // Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so + // we manually retarget these into the temporary files we extracted them into + if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), WhiteoutLinkDir) { + linkBasename := filepath.Base(hdr.Linkname) + srcHdr = aufsHardlinks[linkBasename] + if srcHdr == nil { + return 0, fmt.Errorf("Invalid aufs hardlink") + } + tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename)) + if err != nil { + return 0, err + } + defer tmpFile.Close() + srcData = tmpFile + } + + if err := remapIDs(idMapping, srcHdr); err != nil { + return 0, err + } + + if err := createTarFile(path, dest, srcHdr, srcData, !options.NoLchown, nil, options.InUserNS); err != nil { + return 0, err + } + + // Directory mtimes must be handled at the end to avoid further + // file creation in them to modify the directory mtime + if hdr.Typeflag == tar.TypeDir { + dirs = append(dirs, hdr) + } + unpackedPaths[path] = struct{}{} + } + } + + for _, hdr := range dirs { + path := filepath.Join(dest, hdr.Name) + if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { + return 0, err + } + } + + return size, nil +} + +// ApplyLayer parses a diff in the standard layer format from `layer`, +// and applies it to the directory `dest`. The stream `layer` can be +// compressed or uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyLayer(dest string, layer io.Reader) (int64, error) { + return applyLayerHandler(dest, layer, &TarOptions{}, true) +} + +// ApplyUncompressedLayer parses a diff in the standard layer format from +// `layer`, and applies it to the directory `dest`. The stream `layer` +// can only be uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyUncompressedLayer(dest string, layer io.Reader, options *TarOptions) (int64, error) { + return applyLayerHandler(dest, layer, options, false) +} + +// do the bulk load of ApplyLayer, but allow for not calling DecompressStream +func applyLayerHandler(dest string, layer io.Reader, options *TarOptions, decompress bool) (int64, error) { + dest = filepath.Clean(dest) + + // We need to be able to set any perms + if runtime.GOOS != "windows" { + oldmask, err := system.Umask(0) + if err != nil { + return 0, err + } + defer system.Umask(oldmask) + } + + if decompress { + decompLayer, err := DecompressStream(layer) + if err != nil { + return 0, err + } + defer decompLayer.Close() + layer = decompLayer + } + return UnpackLayer(dest, layer, options) +} diff --git a/vendor/github.com/docker/docker/pkg/archive/time_linux.go b/vendor/github.com/docker/docker/pkg/archive/time_linux.go new file mode 100644 index 00000000000..797143ee84d --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/time_linux.go @@ -0,0 +1,16 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "syscall" + "time" +) + +func timeToTimespec(time time.Time) (ts syscall.Timespec) { + if time.IsZero() { + // Return UTIME_OMIT special value + ts.Sec = 0 + ts.Nsec = (1 << 30) - 2 + return + } + return syscall.NsecToTimespec(time.UnixNano()) +} diff --git a/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go b/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go new file mode 100644 index 00000000000..d0877968617 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go @@ -0,0 +1,17 @@ +//go:build !linux +// +build !linux + +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "syscall" + "time" +) + +func timeToTimespec(time time.Time) (ts syscall.Timespec) { + nsec := int64(0) + if !time.IsZero() { + nsec = time.UnixNano() + } + return syscall.NsecToTimespec(nsec) +} diff --git a/vendor/github.com/docker/docker/pkg/archive/whiteouts.go b/vendor/github.com/docker/docker/pkg/archive/whiteouts.go new file mode 100644 index 00000000000..4c072a87ee5 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/whiteouts.go @@ -0,0 +1,23 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +// Whiteouts are files with a special meaning for the layered filesystem. +// Docker uses AUFS whiteout files inside exported archives. In other +// filesystems these files are generated/handled on tar creation/extraction. + +// WhiteoutPrefix prefix means file is a whiteout. If this is followed by a +// filename this means that file has been removed from the base layer. +const WhiteoutPrefix = ".wh." + +// WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not +// for removing an actual file. Normally these files are excluded from exported +// archives. +const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix + +// WhiteoutLinkDir is a directory AUFS uses for storing hardlink links to other +// layers. Normally these should not go into exported archives and all changed +// hardlinks should be copied to the top layer. +const WhiteoutLinkDir = WhiteoutMetaPrefix + "plnk" + +// WhiteoutOpaqueDir file means directory has been made opaque - meaning +// readdir calls to this directory do not follow to lower layers. +const WhiteoutOpaqueDir = WhiteoutMetaPrefix + ".opq" diff --git a/vendor/github.com/docker/docker/pkg/archive/wrap.go b/vendor/github.com/docker/docker/pkg/archive/wrap.go new file mode 100644 index 00000000000..85435694cff --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/wrap.go @@ -0,0 +1,59 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "archive/tar" + "bytes" + "io" +) + +// Generate generates a new archive from the content provided +// as input. +// +// `files` is a sequence of path/content pairs. A new file is +// added to the archive for each pair. +// If the last pair is incomplete, the file is created with an +// empty content. For example: +// +// Generate("foo.txt", "hello world", "emptyfile") +// +// The above call will return an archive with 2 files: +// * ./foo.txt with content "hello world" +// * ./empty with empty content +// +// FIXME: stream content instead of buffering +// FIXME: specify permissions and other archive metadata +func Generate(input ...string) (io.Reader, error) { + files := parseStringPairs(input...) + buf := new(bytes.Buffer) + tw := tar.NewWriter(buf) + for _, file := range files { + name, content := file[0], file[1] + hdr := &tar.Header{ + Name: name, + Size: int64(len(content)), + } + if err := tw.WriteHeader(hdr); err != nil { + return nil, err + } + if _, err := tw.Write([]byte(content)); err != nil { + return nil, err + } + } + if err := tw.Close(); err != nil { + return nil, err + } + return buf, nil +} + +func parseStringPairs(input ...string) (output [][2]string) { + output = make([][2]string, 0, len(input)/2+1) + for i := 0; i < len(input); i += 2 { + var pair [2]string + pair[0] = input[i] + if i+1 < len(input) { + pair[1] = input[i+1] + } + output = append(output, pair) + } + return +} diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go index 77152a6678e..34f1c726fb5 100644 --- a/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go +++ b/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go @@ -9,29 +9,9 @@ import ( "regexp" "strings" "text/scanner" - "unicode/utf8" -) - -// escapeBytes is a bitmap used to check whether a character should be escaped when creating the regex. -var escapeBytes [8]byte - -// shouldEscape reports whether a rune should be escaped as part of the regex. -// -// This only includes characters that require escaping in regex but are also NOT valid filepath pattern characters. -// Additionally, '\' is not excluded because there is specific logic to properly handle this, as it's a path separator -// on Windows. -// -// Adapted from regexp::QuoteMeta in go stdlib. -// See https://cs.opensource.google/go/go/+/refs/tags/go1.17.2:src/regexp/regexp.go;l=703-715;drc=refs%2Ftags%2Fgo1.17.2 -func shouldEscape(b rune) bool { - return b < utf8.RuneSelf && escapeBytes[b%8]&(1<<(b/8)) != 0 -} -func init() { - for _, b := range []byte(`.+()|{}$`) { - escapeBytes[b%8] |= 1 << (b / 8) - } -} + "github.com/sirupsen/logrus" +) // PatternMatcher allows checking paths against a list of patterns type PatternMatcher struct { @@ -77,16 +57,8 @@ func NewPatternMatcher(patterns []string) (*PatternMatcher, error) { return pm, nil } -// Matches returns true if "file" matches any of the patterns -// and isn't excluded by any of the subsequent patterns. -// -// The "file" argument should be a slash-delimited path. -// -// Matches is not safe to call concurrently. -// -// Deprecated: This implementation is buggy (it only checks a single parent dir -// against the pattern) and will be removed soon. Use either -// MatchesOrParentMatches or MatchesUsingParentResults instead. +// Matches matches path against all the patterns. Matches is not safe to be +// called concurrently func (pm *PatternMatcher) Matches(file string) (bool, error) { matched := false file = filepath.FromSlash(file) @@ -94,11 +66,10 @@ func (pm *PatternMatcher) Matches(file string) (bool, error) { parentPathDirs := strings.Split(parentPath, string(os.PathSeparator)) for _, pattern := range pm.patterns { - // Skip evaluation if this is an inclusion and the filename - // already matched the pattern, or it's an exclusion and it has - // not matched the pattern yet. - if pattern.exclusion != matched { - continue + negative := false + + if pattern.exclusion { + negative = true } match, err := pattern.match(file) @@ -114,165 +85,17 @@ func (pm *PatternMatcher) Matches(file string) (bool, error) { } if match { - matched = !pattern.exclusion + matched = !negative } } - return matched, nil -} - -// MatchesOrParentMatches returns true if "file" matches any of the patterns -// and isn't excluded by any of the subsequent patterns. -// -// The "file" argument should be a slash-delimited path. -// -// Matches is not safe to call concurrently. -func (pm *PatternMatcher) MatchesOrParentMatches(file string) (bool, error) { - matched := false - file = filepath.FromSlash(file) - parentPath := filepath.Dir(file) - parentPathDirs := strings.Split(parentPath, string(os.PathSeparator)) - - for _, pattern := range pm.patterns { - // Skip evaluation if this is an inclusion and the filename - // already matched the pattern, or it's an exclusion and it has - // not matched the pattern yet. - if pattern.exclusion != matched { - continue - } - - match, err := pattern.match(file) - if err != nil { - return false, err - } - - if !match && parentPath != "." { - // Check to see if the pattern matches one of our parent dirs. - for i := range parentPathDirs { - match, _ = pattern.match(strings.Join(parentPathDirs[:i+1], string(os.PathSeparator))) - if match { - break - } - } - } - - if match { - matched = !pattern.exclusion - } + if matched { + logrus.Debugf("Skipping excluded path: %s", file) } return matched, nil } -// MatchesUsingParentResult returns true if "file" matches any of the patterns -// and isn't excluded by any of the subsequent patterns. The functionality is -// the same as Matches, but as an optimization, the caller keeps track of -// whether the parent directory matched. -// -// The "file" argument should be a slash-delimited path. -// -// MatchesUsingParentResult is not safe to call concurrently. -// -// Deprecated: this function does behave correctly in some cases (see -// https://github.com/docker/buildx/issues/850). -// -// Use MatchesUsingParentResults instead. -func (pm *PatternMatcher) MatchesUsingParentResult(file string, parentMatched bool) (bool, error) { - matched := parentMatched - file = filepath.FromSlash(file) - - for _, pattern := range pm.patterns { - // Skip evaluation if this is an inclusion and the filename - // already matched the pattern, or it's an exclusion and it has - // not matched the pattern yet. - if pattern.exclusion != matched { - continue - } - - match, err := pattern.match(file) - if err != nil { - return false, err - } - - if match { - matched = !pattern.exclusion - } - } - return matched, nil -} - -// MatchInfo tracks information about parent dir matches while traversing a -// filesystem. -type MatchInfo struct { - parentMatched []bool -} - -// MatchesUsingParentResults returns true if "file" matches any of the patterns -// and isn't excluded by any of the subsequent patterns. The functionality is -// the same as Matches, but as an optimization, the caller passes in -// intermediate results from matching the parent directory. -// -// The "file" argument should be a slash-delimited path. -// -// MatchesUsingParentResults is not safe to call concurrently. -func (pm *PatternMatcher) MatchesUsingParentResults(file string, parentMatchInfo MatchInfo) (bool, MatchInfo, error) { - parentMatched := parentMatchInfo.parentMatched - if len(parentMatched) != 0 && len(parentMatched) != len(pm.patterns) { - return false, MatchInfo{}, errors.New("wrong number of values in parentMatched") - } - - file = filepath.FromSlash(file) - matched := false - - matchInfo := MatchInfo{ - parentMatched: make([]bool, len(pm.patterns)), - } - for i, pattern := range pm.patterns { - match := false - // If the parent matched this pattern, we don't need to recheck. - if len(parentMatched) != 0 { - match = parentMatched[i] - } - - if !match { - // Skip evaluation if this is an inclusion and the filename - // already matched the pattern, or it's an exclusion and it has - // not matched the pattern yet. - if pattern.exclusion != matched { - continue - } - - var err error - match, err = pattern.match(file) - if err != nil { - return false, matchInfo, err - } - - // If the zero value of MatchInfo was passed in, we don't have - // any information about the parent dir's match results, and we - // apply the same logic as MatchesOrParentMatches. - if !match && len(parentMatched) == 0 { - if parentPath := filepath.Dir(file); parentPath != "." { - parentPathDirs := strings.Split(parentPath, string(os.PathSeparator)) - // Check to see if the pattern matches one of our parent dirs. - for i := range parentPathDirs { - match, _ = pattern.match(strings.Join(parentPathDirs[:i+1], string(os.PathSeparator))) - if match { - break - } - } - } - } - } - matchInfo.parentMatched[i] = match - - if match { - matched = !pattern.exclusion - } - } - return matched, matchInfo, nil -} - // Exclusions returns true if any of the patterns define exclusions func (pm *PatternMatcher) Exclusions() bool { return pm.exclusions @@ -301,6 +124,7 @@ func (p *Pattern) Exclusion() bool { } func (p *Pattern) match(path string) (bool, error) { + if p.regexp == nil { if err := p.compile(); err != nil { return false, filepath.ErrBadPattern @@ -355,7 +179,7 @@ func (p *Pattern) compile() error { } else if ch == '?' { // "?" is any char except "/" regStr += "[^" + escSL + "]" - } else if shouldEscape(ch) { + } else if ch == '.' || ch == '$' { // Escape some regexp special chars that have no meaning // in golang's filepath.Match regStr += `\` + string(ch) @@ -392,9 +216,6 @@ func (p *Pattern) compile() error { // Matches returns true if file matches any of the patterns // and isn't excluded by any of the subsequent patterns. -// -// This implementation is buggy (it only checks a single parent dir against the -// pattern) and will be removed soon. Use MatchesOrParentMatches instead. func Matches(file string, patterns []string) (bool, error) { pm, err := NewPatternMatcher(patterns) if err != nil { @@ -410,23 +231,6 @@ func Matches(file string, patterns []string) (bool, error) { return pm.Matches(file) } -// MatchesOrParentMatches returns true if file matches any of the patterns -// and isn't excluded by any of the subsequent patterns. -func MatchesOrParentMatches(file string, patterns []string) (bool, error) { - pm, err := NewPatternMatcher(patterns) - if err != nil { - return false, err - } - file = filepath.Clean(file) - - if file == "." { - // Don't let them exclude everything, kind of silly. - return false, nil - } - - return pm.MatchesOrParentMatches(file) -} - // CopyFile copies from src to dst until either EOF is reached // on src or an error occurs. It verifies src exists and removes // the dst if it exists. diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go index f782b4266aa..af0c26b614d 100644 --- a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go +++ b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go @@ -5,6 +5,7 @@ package fileutils // import "github.com/docker/docker/pkg/fileutils" import ( "fmt" + "io/ioutil" "os" "github.com/sirupsen/logrus" @@ -13,7 +14,7 @@ import ( // GetTotalUsedFds Returns the number of used File Descriptors by // reading it via /proc filesystem. func GetTotalUsedFds() int { - if fds, err := os.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil { + if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil { logrus.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err) } else { return len(fds) diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools.go b/vendor/github.com/docker/docker/pkg/idtools/idtools.go new file mode 100644 index 00000000000..25a57b231e0 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/idtools/idtools.go @@ -0,0 +1,241 @@ +package idtools // import "github.com/docker/docker/pkg/idtools" + +import ( + "bufio" + "fmt" + "os" + "strconv" + "strings" +) + +// IDMap contains a single entry for user namespace range remapping. An array +// of IDMap entries represents the structure that will be provided to the Linux +// kernel for creating a user namespace. +type IDMap struct { + ContainerID int `json:"container_id"` + HostID int `json:"host_id"` + Size int `json:"size"` +} + +type subIDRange struct { + Start int + Length int +} + +type ranges []subIDRange + +func (e ranges) Len() int { return len(e) } +func (e ranges) Swap(i, j int) { e[i], e[j] = e[j], e[i] } +func (e ranges) Less(i, j int) bool { return e[i].Start < e[j].Start } + +const ( + subuidFileName = "/etc/subuid" + subgidFileName = "/etc/subgid" +) + +// MkdirAllAndChown creates a directory (include any along the path) and then modifies +// ownership to the requested uid/gid. If the directory already exists, this +// function will still change ownership and permissions. +func MkdirAllAndChown(path string, mode os.FileMode, owner Identity) error { + return mkdirAs(path, mode, owner, true, true) +} + +// MkdirAndChown creates a directory and then modifies ownership to the requested uid/gid. +// If the directory already exists, this function still changes ownership and permissions. +// Note that unlike os.Mkdir(), this function does not return IsExist error +// in case path already exists. +func MkdirAndChown(path string, mode os.FileMode, owner Identity) error { + return mkdirAs(path, mode, owner, false, true) +} + +// MkdirAllAndChownNew creates a directory (include any along the path) and then modifies +// ownership ONLY of newly created directories to the requested uid/gid. If the +// directories along the path exist, no change of ownership or permissions will be performed +func MkdirAllAndChownNew(path string, mode os.FileMode, owner Identity) error { + return mkdirAs(path, mode, owner, true, false) +} + +// GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps. +// If the maps are empty, then the root uid/gid will default to "real" 0/0 +func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) { + uid, err := toHost(0, uidMap) + if err != nil { + return -1, -1, err + } + gid, err := toHost(0, gidMap) + if err != nil { + return -1, -1, err + } + return uid, gid, nil +} + +// toContainer takes an id mapping, and uses it to translate a +// host ID to the remapped ID. If no map is provided, then the translation +// assumes a 1-to-1 mapping and returns the passed in id +func toContainer(hostID int, idMap []IDMap) (int, error) { + if idMap == nil { + return hostID, nil + } + for _, m := range idMap { + if (hostID >= m.HostID) && (hostID <= (m.HostID + m.Size - 1)) { + contID := m.ContainerID + (hostID - m.HostID) + return contID, nil + } + } + return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID) +} + +// toHost takes an id mapping and a remapped ID, and translates the +// ID to the mapped host ID. If no map is provided, then the translation +// assumes a 1-to-1 mapping and returns the passed in id # +func toHost(contID int, idMap []IDMap) (int, error) { + if idMap == nil { + return contID, nil + } + for _, m := range idMap { + if (contID >= m.ContainerID) && (contID <= (m.ContainerID + m.Size - 1)) { + hostID := m.HostID + (contID - m.ContainerID) + return hostID, nil + } + } + return -1, fmt.Errorf("Container ID %d cannot be mapped to a host ID", contID) +} + +// Identity is either a UID and GID pair or a SID (but not both) +type Identity struct { + UID int + GID int + SID string +} + +// IdentityMapping contains a mappings of UIDs and GIDs +type IdentityMapping struct { + uids []IDMap + gids []IDMap +} + +// NewIDMappingsFromMaps creates a new mapping from two slices +// Deprecated: this is a temporary shim while transitioning to IDMapping +func NewIDMappingsFromMaps(uids []IDMap, gids []IDMap) *IdentityMapping { + return &IdentityMapping{uids: uids, gids: gids} +} + +// RootPair returns a uid and gid pair for the root user. The error is ignored +// because a root user always exists, and the defaults are correct when the uid +// and gid maps are empty. +func (i *IdentityMapping) RootPair() Identity { + uid, gid, _ := GetRootUIDGID(i.uids, i.gids) + return Identity{UID: uid, GID: gid} +} + +// ToHost returns the host UID and GID for the container uid, gid. +// Remapping is only performed if the ids aren't already the remapped root ids +func (i *IdentityMapping) ToHost(pair Identity) (Identity, error) { + var err error + target := i.RootPair() + + if pair.UID != target.UID { + target.UID, err = toHost(pair.UID, i.uids) + if err != nil { + return target, err + } + } + + if pair.GID != target.GID { + target.GID, err = toHost(pair.GID, i.gids) + } + return target, err +} + +// ToContainer returns the container UID and GID for the host uid and gid +func (i *IdentityMapping) ToContainer(pair Identity) (int, int, error) { + uid, err := toContainer(pair.UID, i.uids) + if err != nil { + return -1, -1, err + } + gid, err := toContainer(pair.GID, i.gids) + return uid, gid, err +} + +// Empty returns true if there are no id mappings +func (i *IdentityMapping) Empty() bool { + return len(i.uids) == 0 && len(i.gids) == 0 +} + +// UIDs return the UID mapping +// TODO: remove this once everything has been refactored to use pairs +func (i *IdentityMapping) UIDs() []IDMap { + return i.uids +} + +// GIDs return the UID mapping +// TODO: remove this once everything has been refactored to use pairs +func (i *IdentityMapping) GIDs() []IDMap { + return i.gids +} + +func createIDMap(subidRanges ranges) []IDMap { + idMap := []IDMap{} + + containerID := 0 + for _, idrange := range subidRanges { + idMap = append(idMap, IDMap{ + ContainerID: containerID, + HostID: idrange.Start, + Size: idrange.Length, + }) + containerID = containerID + idrange.Length + } + return idMap +} + +func parseSubuid(username string) (ranges, error) { + return parseSubidFile(subuidFileName, username) +} + +func parseSubgid(username string) (ranges, error) { + return parseSubidFile(subgidFileName, username) +} + +// parseSubidFile will read the appropriate file (/etc/subuid or /etc/subgid) +// and return all found ranges for a specified username. If the special value +// "ALL" is supplied for username, then all ranges in the file will be returned +func parseSubidFile(path, username string) (ranges, error) { + var rangeList ranges + + subidFile, err := os.Open(path) + if err != nil { + return rangeList, err + } + defer subidFile.Close() + + s := bufio.NewScanner(subidFile) + for s.Scan() { + text := strings.TrimSpace(s.Text()) + if text == "" || strings.HasPrefix(text, "#") { + continue + } + parts := strings.Split(text, ":") + if len(parts) != 3 { + return rangeList, fmt.Errorf("Cannot parse subuid/gid information: Format not correct for %s file", path) + } + if parts[0] == username || username == "ALL" { + startid, err := strconv.Atoi(parts[1]) + if err != nil { + return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err) + } + length, err := strconv.Atoi(parts[2]) + if err != nil { + return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err) + } + rangeList = append(rangeList, subIDRange{startid, length}) + } + } + + return rangeList, s.Err() +} + +// CurrentIdentity returns the identity of the current process +func CurrentIdentity() Identity { + return Identity{UID: os.Getuid(), GID: os.Getegid()} +} diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go new file mode 100644 index 00000000000..ceec0339b56 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go @@ -0,0 +1,296 @@ +//go:build !windows +// +build !windows + +package idtools // import "github.com/docker/docker/pkg/idtools" + +import ( + "bytes" + "fmt" + "io" + "os" + "path/filepath" + "strconv" + "sync" + "syscall" + + "github.com/docker/docker/pkg/system" + "github.com/opencontainers/runc/libcontainer/user" + "github.com/pkg/errors" +) + +var ( + entOnce sync.Once + getentCmd string +) + +func mkdirAs(path string, mode os.FileMode, owner Identity, mkAll, chownExisting bool) error { + // make an array containing the original path asked for, plus (for mkAll == true) + // all path components leading up to the complete path that don't exist before we MkdirAll + // so that we can chown all of them properly at the end. If chownExisting is false, we won't + // chown the full directory path if it exists + + var paths []string + + stat, err := system.Stat(path) + if err == nil { + if !stat.IsDir() { + return &os.PathError{Op: "mkdir", Path: path, Err: syscall.ENOTDIR} + } + if !chownExisting { + return nil + } + + // short-circuit--we were called with an existing directory and chown was requested + return setPermissions(path, mode, owner.UID, owner.GID, stat) + } + + if os.IsNotExist(err) { + paths = []string{path} + } + + if mkAll { + // walk back to "/" looking for directories which do not exist + // and add them to the paths array for chown after creation + dirPath := path + for { + dirPath = filepath.Dir(dirPath) + if dirPath == "/" { + break + } + if _, err := os.Stat(dirPath); err != nil && os.IsNotExist(err) { + paths = append(paths, dirPath) + } + } + if err := system.MkdirAll(path, mode); err != nil { + return err + } + } else { + if err := os.Mkdir(path, mode); err != nil && !os.IsExist(err) { + return err + } + } + // even if it existed, we will chown the requested path + any subpaths that + // didn't exist when we called MkdirAll + for _, pathComponent := range paths { + if err := setPermissions(pathComponent, mode, owner.UID, owner.GID, nil); err != nil { + return err + } + } + return nil +} + +// CanAccess takes a valid (existing) directory and a uid, gid pair and determines +// if that uid, gid pair has access (execute bit) to the directory +func CanAccess(path string, pair Identity) bool { + statInfo, err := system.Stat(path) + if err != nil { + return false + } + fileMode := os.FileMode(statInfo.Mode()) + permBits := fileMode.Perm() + return accessible(statInfo.UID() == uint32(pair.UID), + statInfo.GID() == uint32(pair.GID), permBits) +} + +func accessible(isOwner, isGroup bool, perms os.FileMode) bool { + if isOwner && (perms&0100 == 0100) { + return true + } + if isGroup && (perms&0010 == 0010) { + return true + } + if perms&0001 == 0001 { + return true + } + return false +} + +// LookupUser uses traditional local system files lookup (from libcontainer/user) on a username, +// followed by a call to `getent` for supporting host configured non-files passwd and group dbs +func LookupUser(name string) (user.User, error) { + // first try a local system files lookup using existing capabilities + usr, err := user.LookupUser(name) + if err == nil { + return usr, nil + } + // local files lookup failed; attempt to call `getent` to query configured passwd dbs + usr, err = getentUser(name) + if err != nil { + return user.User{}, err + } + return usr, nil +} + +// LookupUID uses traditional local system files lookup (from libcontainer/user) on a uid, +// followed by a call to `getent` for supporting host configured non-files passwd and group dbs +func LookupUID(uid int) (user.User, error) { + // first try a local system files lookup using existing capabilities + usr, err := user.LookupUid(uid) + if err == nil { + return usr, nil + } + // local files lookup failed; attempt to call `getent` to query configured passwd dbs + return getentUser(strconv.Itoa(uid)) +} + +func getentUser(name string) (user.User, error) { + reader, err := callGetent("passwd", name) + if err != nil { + return user.User{}, err + } + users, err := user.ParsePasswd(reader) + if err != nil { + return user.User{}, err + } + if len(users) == 0 { + return user.User{}, fmt.Errorf("getent failed to find passwd entry for %q", name) + } + return users[0], nil +} + +// LookupGroup uses traditional local system files lookup (from libcontainer/user) on a group name, +// followed by a call to `getent` for supporting host configured non-files passwd and group dbs +func LookupGroup(name string) (user.Group, error) { + // first try a local system files lookup using existing capabilities + group, err := user.LookupGroup(name) + if err == nil { + return group, nil + } + // local files lookup failed; attempt to call `getent` to query configured group dbs + return getentGroup(name) +} + +// LookupGID uses traditional local system files lookup (from libcontainer/user) on a group ID, +// followed by a call to `getent` for supporting host configured non-files passwd and group dbs +func LookupGID(gid int) (user.Group, error) { + // first try a local system files lookup using existing capabilities + group, err := user.LookupGid(gid) + if err == nil { + return group, nil + } + // local files lookup failed; attempt to call `getent` to query configured group dbs + return getentGroup(strconv.Itoa(gid)) +} + +func getentGroup(name string) (user.Group, error) { + reader, err := callGetent("group", name) + if err != nil { + return user.Group{}, err + } + groups, err := user.ParseGroup(reader) + if err != nil { + return user.Group{}, err + } + if len(groups) == 0 { + return user.Group{}, fmt.Errorf("getent failed to find groups entry for %q", name) + } + return groups[0], nil +} + +func callGetent(database, key string) (io.Reader, error) { + entOnce.Do(func() { getentCmd, _ = resolveBinary("getent") }) + // if no `getent` command on host, can't do anything else + if getentCmd == "" { + return nil, fmt.Errorf("unable to find getent command") + } + out, err := execCmd(getentCmd, database, key) + if err != nil { + exitCode, errC := system.GetExitCode(err) + if errC != nil { + return nil, err + } + switch exitCode { + case 1: + return nil, fmt.Errorf("getent reported invalid parameters/database unknown") + case 2: + return nil, fmt.Errorf("getent unable to find entry %q in %s database", key, database) + case 3: + return nil, fmt.Errorf("getent database doesn't support enumeration") + default: + return nil, err + } + + } + return bytes.NewReader(out), nil +} + +// setPermissions performs a chown/chmod only if the uid/gid don't match what's requested +// Normally a Chown is a no-op if uid/gid match, but in some cases this can still cause an error, e.g. if the +// dir is on an NFS share, so don't call chown unless we absolutely must. +// Likewise for setting permissions. +func setPermissions(p string, mode os.FileMode, uid, gid int, stat *system.StatT) error { + if stat == nil { + var err error + stat, err = system.Stat(p) + if err != nil { + return err + } + } + if os.FileMode(stat.Mode()).Perm() != mode.Perm() { + if err := os.Chmod(p, mode.Perm()); err != nil { + return err + } + } + if stat.UID() == uint32(uid) && stat.GID() == uint32(gid) { + return nil + } + return os.Chown(p, uid, gid) +} + +// NewIdentityMapping takes a requested username and +// using the data from /etc/sub{uid,gid} ranges, creates the +// proper uid and gid remapping ranges for that user/group pair +func NewIdentityMapping(name string) (*IdentityMapping, error) { + usr, err := LookupUser(name) + if err != nil { + return nil, fmt.Errorf("Could not get user for username %s: %v", name, err) + } + + subuidRanges, err := lookupSubUIDRanges(usr) + if err != nil { + return nil, err + } + subgidRanges, err := lookupSubGIDRanges(usr) + if err != nil { + return nil, err + } + + return &IdentityMapping{ + uids: subuidRanges, + gids: subgidRanges, + }, nil +} + +func lookupSubUIDRanges(usr user.User) ([]IDMap, error) { + rangeList, err := parseSubuid(strconv.Itoa(usr.Uid)) + if err != nil { + return nil, err + } + if len(rangeList) == 0 { + rangeList, err = parseSubuid(usr.Name) + if err != nil { + return nil, err + } + } + if len(rangeList) == 0 { + return nil, errors.Errorf("no subuid ranges found for user %q", usr.Name) + } + return createIDMap(rangeList), nil +} + +func lookupSubGIDRanges(usr user.User) ([]IDMap, error) { + rangeList, err := parseSubgid(strconv.Itoa(usr.Uid)) + if err != nil { + return nil, err + } + if len(rangeList) == 0 { + rangeList, err = parseSubgid(usr.Name) + if err != nil { + return nil, err + } + } + if len(rangeList) == 0 { + return nil, errors.Errorf("no subgid ranges found for user %q", usr.Name) + } + return createIDMap(rangeList), nil +} diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go new file mode 100644 index 00000000000..35ede0fffa7 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go @@ -0,0 +1,25 @@ +package idtools // import "github.com/docker/docker/pkg/idtools" + +import ( + "os" + + "github.com/docker/docker/pkg/system" +) + +// This is currently a wrapper around MkdirAll, however, since currently +// permissions aren't set through this path, the identity isn't utilized. +// Ownership is handled elsewhere, but in the future could be support here +// too. +func mkdirAs(path string, mode os.FileMode, owner Identity, mkAll, chownExisting bool) error { + if err := system.MkdirAll(path, mode); err != nil { + return err + } + return nil +} + +// CanAccess takes a valid (existing) directory and a uid, gid pair and determines +// if that uid, gid pair has access (execute bit) to the directory +// Windows does not require/support this function, so always return true +func CanAccess(path string, identity Identity) bool { + return true +} diff --git a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go new file mode 100644 index 00000000000..bf7ae0564ba --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go @@ -0,0 +1,164 @@ +package idtools // import "github.com/docker/docker/pkg/idtools" + +import ( + "fmt" + "regexp" + "sort" + "strconv" + "strings" + "sync" +) + +// add a user and/or group to Linux /etc/passwd, /etc/group using standard +// Linux distribution commands: +// adduser --system --shell /bin/false --disabled-login --disabled-password --no-create-home --group +// useradd -r -s /bin/false + +var ( + once sync.Once + userCommand string + idOutRegexp = regexp.MustCompile(`uid=([0-9]+).*gid=([0-9]+)`) +) + +const ( + // default length for a UID/GID subordinate range + defaultRangeLen = 65536 + defaultRangeStart = 100000 +) + +// AddNamespaceRangesUser takes a username and uses the standard system +// utility to create a system user/group pair used to hold the +// /etc/sub{uid,gid} ranges which will be used for user namespace +// mapping ranges in containers. +func AddNamespaceRangesUser(name string) (int, int, error) { + if err := addUser(name); err != nil { + return -1, -1, fmt.Errorf("Error adding user %q: %v", name, err) + } + + // Query the system for the created uid and gid pair + out, err := execCmd("id", name) + if err != nil { + return -1, -1, fmt.Errorf("Error trying to find uid/gid for new user %q: %v", name, err) + } + matches := idOutRegexp.FindStringSubmatch(strings.TrimSpace(string(out))) + if len(matches) != 3 { + return -1, -1, fmt.Errorf("Can't find uid, gid from `id` output: %q", string(out)) + } + uid, err := strconv.Atoi(matches[1]) + if err != nil { + return -1, -1, fmt.Errorf("Can't convert found uid (%s) to int: %v", matches[1], err) + } + gid, err := strconv.Atoi(matches[2]) + if err != nil { + return -1, -1, fmt.Errorf("Can't convert found gid (%s) to int: %v", matches[2], err) + } + + // Now we need to create the subuid/subgid ranges for our new user/group (system users + // do not get auto-created ranges in subuid/subgid) + + if err := createSubordinateRanges(name); err != nil { + return -1, -1, fmt.Errorf("Couldn't create subordinate ID ranges: %v", err) + } + return uid, gid, nil +} + +func addUser(name string) error { + once.Do(func() { + // set up which commands are used for adding users/groups dependent on distro + if _, err := resolveBinary("adduser"); err == nil { + userCommand = "adduser" + } else if _, err := resolveBinary("useradd"); err == nil { + userCommand = "useradd" + } + }) + var args []string + switch userCommand { + case "adduser": + args = []string{"--system", "--shell", "/bin/false", "--no-create-home", "--disabled-login", "--disabled-password", "--group", name} + case "useradd": + args = []string{"-r", "-s", "/bin/false", name} + default: + return fmt.Errorf("cannot add user; no useradd/adduser binary found") + } + + if out, err := execCmd(userCommand, args...); err != nil { + return fmt.Errorf("failed to add user with error: %v; output: %q", err, string(out)) + } + return nil +} + +func createSubordinateRanges(name string) error { + + // first, we should verify that ranges weren't automatically created + // by the distro tooling + ranges, err := parseSubuid(name) + if err != nil { + return fmt.Errorf("Error while looking for subuid ranges for user %q: %v", name, err) + } + if len(ranges) == 0 { + // no UID ranges; let's create one + startID, err := findNextUIDRange() + if err != nil { + return fmt.Errorf("Can't find available subuid range: %v", err) + } + out, err := execCmd("usermod", "-v", fmt.Sprintf("%d-%d", startID, startID+defaultRangeLen-1), name) + if err != nil { + return fmt.Errorf("Unable to add subuid range to user: %q; output: %s, err: %v", name, out, err) + } + } + + ranges, err = parseSubgid(name) + if err != nil { + return fmt.Errorf("Error while looking for subgid ranges for user %q: %v", name, err) + } + if len(ranges) == 0 { + // no GID ranges; let's create one + startID, err := findNextGIDRange() + if err != nil { + return fmt.Errorf("Can't find available subgid range: %v", err) + } + out, err := execCmd("usermod", "-w", fmt.Sprintf("%d-%d", startID, startID+defaultRangeLen-1), name) + if err != nil { + return fmt.Errorf("Unable to add subgid range to user: %q; output: %s, err: %v", name, out, err) + } + } + return nil +} + +func findNextUIDRange() (int, error) { + ranges, err := parseSubuid("ALL") + if err != nil { + return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subuid file: %v", err) + } + sort.Sort(ranges) + return findNextRangeStart(ranges) +} + +func findNextGIDRange() (int, error) { + ranges, err := parseSubgid("ALL") + if err != nil { + return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subgid file: %v", err) + } + sort.Sort(ranges) + return findNextRangeStart(ranges) +} + +func findNextRangeStart(rangeList ranges) (int, error) { + startID := defaultRangeStart + for _, arange := range rangeList { + if wouldOverlap(arange, startID) { + startID = arange.Start + arange.Length + } + } + return startID, nil +} + +func wouldOverlap(arange subIDRange, ID int) bool { + low := ID + high := ID + defaultRangeLen + if (low >= arange.Start && low <= arange.Start+arange.Length) || + (high <= arange.Start+arange.Length && high >= arange.Start) { + return true + } + return false +} diff --git a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go new file mode 100644 index 00000000000..5e24577e2c2 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go @@ -0,0 +1,13 @@ +//go:build !linux +// +build !linux + +package idtools // import "github.com/docker/docker/pkg/idtools" + +import "fmt" + +// AddNamespaceRangesUser takes a name and finds an unused uid, gid pair +// and calls the appropriate helper function to add the group and then +// the user to the group in /etc/group and /etc/passwd respectively. +func AddNamespaceRangesUser(name string) (int, int, error) { + return -1, -1, fmt.Errorf("No support for adding users or groups on this OS") +} diff --git a/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go b/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go new file mode 100644 index 00000000000..540672af5af --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go @@ -0,0 +1,32 @@ +//go:build !windows +// +build !windows + +package idtools // import "github.com/docker/docker/pkg/idtools" + +import ( + "fmt" + "os/exec" + "path/filepath" +) + +func resolveBinary(binname string) (string, error) { + binaryPath, err := exec.LookPath(binname) + if err != nil { + return "", err + } + resolvedPath, err := filepath.EvalSymlinks(binaryPath) + if err != nil { + return "", err + } + // only return no error if the final resolved binary basename + // matches what was searched for + if filepath.Base(resolvedPath) == binname { + return resolvedPath, nil + } + return "", fmt.Errorf("Binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath) +} + +func execCmd(cmd string, arg ...string) ([]byte, error) { + execCmd := exec.Command(cmd, arg...) + return execCmd.CombinedOutput() +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go b/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go index d1dfdae0cc2..87514b643d7 100644 --- a/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go +++ b/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go @@ -50,12 +50,12 @@ func NewBytesPipe() *BytesPipe { // It can allocate new []byte slices in a process of writing. func (bp *BytesPipe) Write(p []byte) (int, error) { bp.mu.Lock() - defer bp.mu.Unlock() written := 0 loop0: for { if bp.closeErr != nil { + bp.mu.Unlock() return written, ErrClosed } @@ -72,6 +72,7 @@ loop0: // errBufferFull is an error we expect to get if the buffer is full if err != nil && err != errBufferFull { bp.wait.Broadcast() + bp.mu.Unlock() return written, err } @@ -99,6 +100,7 @@ loop0: bp.buf = append(bp.buf, getBuffer(nextCap)) } bp.wait.Broadcast() + bp.mu.Unlock() return written, nil } @@ -124,14 +126,17 @@ func (bp *BytesPipe) Close() error { // Data could be read only once. func (bp *BytesPipe) Read(p []byte) (n int, err error) { bp.mu.Lock() - defer bp.mu.Unlock() if bp.bufLen == 0 { if bp.closeErr != nil { - return 0, bp.closeErr + err := bp.closeErr + bp.mu.Unlock() + return 0, err } bp.wait.Wait() if bp.bufLen == 0 && bp.closeErr != nil { - return 0, bp.closeErr + err := bp.closeErr + bp.mu.Unlock() + return 0, err } } @@ -156,6 +161,7 @@ func (bp *BytesPipe) Read(p []byte) (n int, err error) { } bp.wait.Broadcast() + bp.mu.Unlock() return } diff --git a/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go b/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go index 82671d8cd55..534d66ac268 100644 --- a/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go +++ b/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go @@ -2,6 +2,7 @@ package ioutils // import "github.com/docker/docker/pkg/ioutils" import ( "io" + "io/ioutil" "os" "path/filepath" ) @@ -10,7 +11,7 @@ import ( // temporary file and closing it atomically changes the temporary file to // destination path. Writing and closing concurrently is not allowed. func NewAtomicFileWriter(filename string, perm os.FileMode) (io.WriteCloser, error) { - f, err := os.CreateTemp(filepath.Dir(filename), ".tmp-"+filepath.Base(filename)) + f, err := ioutil.TempFile(filepath.Dir(filename), ".tmp-"+filepath.Base(filename)) if err != nil { return nil, err } @@ -93,7 +94,7 @@ type AtomicWriteSet struct { // commit. If no temporary directory is given the system // default is used. func NewAtomicWriteSet(tmpDir string) (*AtomicWriteSet, error) { - td, err := os.MkdirTemp(tmpDir, "write-set-") + td, err := ioutil.TempDir(tmpDir, "write-set-") if err != nil { return nil, err } diff --git a/vendor/github.com/docker/docker/pkg/ioutils/readers.go b/vendor/github.com/docker/docker/pkg/ioutils/readers.go index de00b95e3f6..1f657bd3dca 100644 --- a/vendor/github.com/docker/docker/pkg/ioutils/readers.go +++ b/vendor/github.com/docker/docker/pkg/ioutils/readers.go @@ -2,12 +2,9 @@ package ioutils // import "github.com/docker/docker/pkg/ioutils" import ( "context" + "crypto/sha256" + "encoding/hex" "io" - - // make sure crypto.SHA256, crypto.sha512 and crypto.SHA384 are registered - // TODO remove once https://github.com/opencontainers/go-digest/pull/64 is merged. - _ "crypto/sha256" - _ "crypto/sha512" ) // ReadCloserWrapper wraps an io.Reader, and implements an io.ReadCloser @@ -52,6 +49,15 @@ func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader { } } +// HashData returns the sha256 sum of src. +func HashData(src io.Reader) (string, error) { + h := sha256.New() + if _, err := io.Copy(h, src); err != nil { + return "", err + } + return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil +} + // OnEOFReader wraps an io.ReadCloser and a function // the function will run at the end of file or close the file. type OnEOFReader struct { diff --git a/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go b/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go index 74891223098..4e67ec2f53f 100644 --- a/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go +++ b/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go @@ -3,9 +3,9 @@ package ioutils // import "github.com/docker/docker/pkg/ioutils" -import "os" +import "io/ioutil" -// TempDir on Unix systems is equivalent to os.MkdirTemp. +// TempDir on Unix systems is equivalent to ioutil.TempDir. func TempDir(dir, prefix string) (string, error) { - return os.MkdirTemp(dir, prefix) + return ioutil.TempDir(dir, prefix) } diff --git a/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go b/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go index a57fd9af6a8..ecaba2e36d2 100644 --- a/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go +++ b/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go @@ -1,14 +1,14 @@ package ioutils // import "github.com/docker/docker/pkg/ioutils" import ( - "os" + "io/ioutil" "github.com/docker/docker/pkg/longpath" ) -// TempDir is the equivalent of os.MkdirTemp, except that the result is in Windows longpath format. +// TempDir is the equivalent of ioutil.TempDir, except that the result is in Windows longpath format. func TempDir(dir, prefix string) (string, error) { - tempDir, err := os.MkdirTemp(dir, prefix) + tempDir, err := ioutil.TempDir(dir, prefix) if err != nil { return "", err } diff --git a/vendor/github.com/docker/docker/pkg/pools/pools.go b/vendor/github.com/docker/docker/pkg/pools/pools.go new file mode 100644 index 00000000000..3792c67a9e4 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/pools/pools.go @@ -0,0 +1,137 @@ +// Package pools provides a collection of pools which provide various +// data types with buffers. These can be used to lower the number of +// memory allocations and reuse buffers. +// +// New pools should be added to this package to allow them to be +// shared across packages. +// +// Utility functions which operate on pools should be added to this +// package to allow them to be reused. +package pools // import "github.com/docker/docker/pkg/pools" + +import ( + "bufio" + "io" + "sync" + + "github.com/docker/docker/pkg/ioutils" +) + +const buffer32K = 32 * 1024 + +var ( + // BufioReader32KPool is a pool which returns bufio.Reader with a 32K buffer. + BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K) + // BufioWriter32KPool is a pool which returns bufio.Writer with a 32K buffer. + BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K) + buffer32KPool = newBufferPoolWithSize(buffer32K) +) + +// BufioReaderPool is a bufio reader that uses sync.Pool. +type BufioReaderPool struct { + pool sync.Pool +} + +// newBufioReaderPoolWithSize is unexported because new pools should be +// added here to be shared where required. +func newBufioReaderPoolWithSize(size int) *BufioReaderPool { + return &BufioReaderPool{ + pool: sync.Pool{ + New: func() interface{} { return bufio.NewReaderSize(nil, size) }, + }, + } +} + +// Get returns a bufio.Reader which reads from r. The buffer size is that of the pool. +func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader { + buf := bufPool.pool.Get().(*bufio.Reader) + buf.Reset(r) + return buf +} + +// Put puts the bufio.Reader back into the pool. +func (bufPool *BufioReaderPool) Put(b *bufio.Reader) { + b.Reset(nil) + bufPool.pool.Put(b) +} + +type bufferPool struct { + pool sync.Pool +} + +func newBufferPoolWithSize(size int) *bufferPool { + return &bufferPool{ + pool: sync.Pool{ + New: func() interface{} { s := make([]byte, size); return &s }, + }, + } +} + +func (bp *bufferPool) Get() *[]byte { + return bp.pool.Get().(*[]byte) +} + +func (bp *bufferPool) Put(b *[]byte) { + bp.pool.Put(b) +} + +// Copy is a convenience wrapper which uses a buffer to avoid allocation in io.Copy. +func Copy(dst io.Writer, src io.Reader) (written int64, err error) { + buf := buffer32KPool.Get() + written, err = io.CopyBuffer(dst, src, *buf) + buffer32KPool.Put(buf) + return +} + +// NewReadCloserWrapper returns a wrapper which puts the bufio.Reader back +// into the pool and closes the reader if it's an io.ReadCloser. +func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser { + return ioutils.NewReadCloserWrapper(r, func() error { + if readCloser, ok := r.(io.ReadCloser); ok { + readCloser.Close() + } + bufPool.Put(buf) + return nil + }) +} + +// BufioWriterPool is a bufio writer that uses sync.Pool. +type BufioWriterPool struct { + pool sync.Pool +} + +// newBufioWriterPoolWithSize is unexported because new pools should be +// added here to be shared where required. +func newBufioWriterPoolWithSize(size int) *BufioWriterPool { + return &BufioWriterPool{ + pool: sync.Pool{ + New: func() interface{} { return bufio.NewWriterSize(nil, size) }, + }, + } +} + +// Get returns a bufio.Writer which writes to w. The buffer size is that of the pool. +func (bufPool *BufioWriterPool) Get(w io.Writer) *bufio.Writer { + buf := bufPool.pool.Get().(*bufio.Writer) + buf.Reset(w) + return buf +} + +// Put puts the bufio.Writer back into the pool. +func (bufPool *BufioWriterPool) Put(b *bufio.Writer) { + b.Reset(nil) + bufPool.pool.Put(b) +} + +// NewWriteCloserWrapper returns a wrapper which puts the bufio.Writer back +// into the pool and closes the writer if it's an io.Writecloser. +func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser { + return ioutils.NewWriteCloserWrapper(w, func() error { + buf.Flush() + if writeCloser, ok := w.(io.WriteCloser); ok { + writeCloser.Close() + } + bufPool.Put(buf) + return nil + }) +} diff --git a/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go b/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go new file mode 100644 index 00000000000..8f6e0a737aa --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go @@ -0,0 +1,190 @@ +package stdcopy // import "github.com/docker/docker/pkg/stdcopy" + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "sync" +) + +// StdType is the type of standard stream +// a writer can multiplex to. +type StdType byte + +const ( + // Stdin represents standard input stream type. + Stdin StdType = iota + // Stdout represents standard output stream type. + Stdout + // Stderr represents standard error steam type. + Stderr + // Systemerr represents errors originating from the system that make it + // into the multiplexed stream. + Systemerr + + stdWriterPrefixLen = 8 + stdWriterFdIndex = 0 + stdWriterSizeIndex = 4 + + startingBufLen = 32*1024 + stdWriterPrefixLen + 1 +) + +var bufPool = &sync.Pool{New: func() interface{} { return bytes.NewBuffer(nil) }} + +// stdWriter is wrapper of io.Writer with extra customized info. +type stdWriter struct { + io.Writer + prefix byte +} + +// Write sends the buffer to the underneath writer. +// It inserts the prefix header before the buffer, +// so stdcopy.StdCopy knows where to multiplex the output. +// It makes stdWriter to implement io.Writer. +func (w *stdWriter) Write(p []byte) (n int, err error) { + if w == nil || w.Writer == nil { + return 0, errors.New("Writer not instantiated") + } + if p == nil { + return 0, nil + } + + header := [stdWriterPrefixLen]byte{stdWriterFdIndex: w.prefix} + binary.BigEndian.PutUint32(header[stdWriterSizeIndex:], uint32(len(p))) + buf := bufPool.Get().(*bytes.Buffer) + buf.Write(header[:]) + buf.Write(p) + + n, err = w.Writer.Write(buf.Bytes()) + n -= stdWriterPrefixLen + if n < 0 { + n = 0 + } + + buf.Reset() + bufPool.Put(buf) + return +} + +// NewStdWriter instantiates a new Writer. +// Everything written to it will be encapsulated using a custom format, +// and written to the underlying `w` stream. +// This allows multiple write streams (e.g. stdout and stderr) to be muxed into a single connection. +// `t` indicates the id of the stream to encapsulate. +// It can be stdcopy.Stdin, stdcopy.Stdout, stdcopy.Stderr. +func NewStdWriter(w io.Writer, t StdType) io.Writer { + return &stdWriter{ + Writer: w, + prefix: byte(t), + } +} + +// StdCopy is a modified version of io.Copy. +// +// StdCopy will demultiplex `src`, assuming that it contains two streams, +// previously multiplexed together using a StdWriter instance. +// As it reads from `src`, StdCopy will write to `dstout` and `dsterr`. +// +// StdCopy will read until it hits EOF on `src`. It will then return a nil error. +// In other words: if `err` is non nil, it indicates a real underlying error. +// +// `written` will hold the total number of bytes written to `dstout` and `dsterr`. +func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error) { + var ( + buf = make([]byte, startingBufLen) + bufLen = len(buf) + nr, nw int + er, ew error + out io.Writer + frameSize int + ) + + for { + // Make sure we have at least a full header + for nr < stdWriterPrefixLen { + var nr2 int + nr2, er = src.Read(buf[nr:]) + nr += nr2 + if er == io.EOF { + if nr < stdWriterPrefixLen { + return written, nil + } + break + } + if er != nil { + return 0, er + } + } + + stream := StdType(buf[stdWriterFdIndex]) + // Check the first byte to know where to write + switch stream { + case Stdin: + fallthrough + case Stdout: + // Write on stdout + out = dstout + case Stderr: + // Write on stderr + out = dsterr + case Systemerr: + // If we're on Systemerr, we won't write anywhere. + // NB: if this code changes later, make sure you don't try to write + // to outstream if Systemerr is the stream + out = nil + default: + return 0, fmt.Errorf("Unrecognized input header: %d", buf[stdWriterFdIndex]) + } + + // Retrieve the size of the frame + frameSize = int(binary.BigEndian.Uint32(buf[stdWriterSizeIndex : stdWriterSizeIndex+4])) + + // Check if the buffer is big enough to read the frame. + // Extend it if necessary. + if frameSize+stdWriterPrefixLen > bufLen { + buf = append(buf, make([]byte, frameSize+stdWriterPrefixLen-bufLen+1)...) + bufLen = len(buf) + } + + // While the amount of bytes read is less than the size of the frame + header, we keep reading + for nr < frameSize+stdWriterPrefixLen { + var nr2 int + nr2, er = src.Read(buf[nr:]) + nr += nr2 + if er == io.EOF { + if nr < frameSize+stdWriterPrefixLen { + return written, nil + } + break + } + if er != nil { + return 0, er + } + } + + // we might have an error from the source mixed up in our multiplexed + // stream. if we do, return it. + if stream == Systemerr { + return written, fmt.Errorf("error from daemon in stream: %s", string(buf[stdWriterPrefixLen:frameSize+stdWriterPrefixLen])) + } + + // Write the retrieved frame (without header) + nw, ew = out.Write(buf[stdWriterPrefixLen : frameSize+stdWriterPrefixLen]) + if ew != nil { + return 0, ew + } + + // If the frame has not been fully written: error + if nw != frameSize { + return 0, io.ErrShortWrite + } + written += int64(nw) + + // Move the rest of the buffer to the beginning + copy(buf, buf[frameSize+stdWriterPrefixLen:]) + // Move the index + nr -= frameSize + stdWriterPrefixLen + } +} diff --git a/vendor/github.com/docker/docker/pkg/system/args_windows.go b/vendor/github.com/docker/docker/pkg/system/args_windows.go new file mode 100644 index 00000000000..b7c9487a067 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/args_windows.go @@ -0,0 +1,16 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "strings" + + "golang.org/x/sys/windows" +) + +// EscapeArgs makes a Windows-style escaped command line from a set of arguments +func EscapeArgs(args []string) string { + escapedArgs := make([]string, len(args)) + for i, a := range args { + escapedArgs[i] = windows.EscapeArg(a) + } + return strings.Join(escapedArgs, " ") +} diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes.go b/vendor/github.com/docker/docker/pkg/system/chtimes.go new file mode 100644 index 00000000000..c26a4e24b66 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/chtimes.go @@ -0,0 +1,31 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "os" + "time" +) + +// Chtimes changes the access time and modified time of a file at the given path +func Chtimes(name string, atime time.Time, mtime time.Time) error { + unixMinTime := time.Unix(0, 0) + unixMaxTime := maxTime + + // If the modified time is prior to the Unix Epoch, or after the + // end of Unix Time, os.Chtimes has undefined behavior + // default to Unix Epoch in this case, just in case + + if atime.Before(unixMinTime) || atime.After(unixMaxTime) { + atime = unixMinTime + } + + if mtime.Before(unixMinTime) || mtime.After(unixMaxTime) { + mtime = unixMinTime + } + + if err := os.Chtimes(name, atime, mtime); err != nil { + return err + } + + // Take platform specific action for setting create time. + return setCTime(name, mtime) +} diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes_nowindows.go b/vendor/github.com/docker/docker/pkg/system/chtimes_nowindows.go new file mode 100644 index 00000000000..84ae1570513 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/chtimes_nowindows.go @@ -0,0 +1,15 @@ +//go:build !windows +// +build !windows + +package system // import "github.com/docker/docker/pkg/system" + +import ( + "time" +) + +// setCTime will set the create time on a file. On Unix, the create +// time is updated as a side effect of setting the modified time, so +// no action is required. +func setCTime(path string, ctime time.Time) error { + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go b/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go new file mode 100644 index 00000000000..6664b8bcad7 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go @@ -0,0 +1,26 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "time" + + "golang.org/x/sys/windows" +) + +// setCTime will set the create time on a file. On Windows, this requires +// calling SetFileTime and explicitly including the create time. +func setCTime(path string, ctime time.Time) error { + ctimespec := windows.NsecToTimespec(ctime.UnixNano()) + pathp, e := windows.UTF16PtrFromString(path) + if e != nil { + return e + } + h, e := windows.CreateFile(pathp, + windows.FILE_WRITE_ATTRIBUTES, windows.FILE_SHARE_WRITE, nil, + windows.OPEN_EXISTING, windows.FILE_FLAG_BACKUP_SEMANTICS, 0) + if e != nil { + return e + } + defer windows.Close(h) + c := windows.NsecToFiletime(windows.TimespecToNsec(ctimespec)) + return windows.SetFileTime(h, &c, nil, nil) +} diff --git a/vendor/github.com/docker/docker/pkg/system/errors.go b/vendor/github.com/docker/docker/pkg/system/errors.go new file mode 100644 index 00000000000..2573d716222 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/errors.go @@ -0,0 +1,13 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "errors" +) + +var ( + // ErrNotSupportedPlatform means the platform is not supported. + ErrNotSupportedPlatform = errors.New("platform and architecture is not supported") + + // ErrNotSupportedOperatingSystem means the operating system is not supported. + ErrNotSupportedOperatingSystem = errors.New("operating system is not supported") +) diff --git a/vendor/github.com/docker/docker/pkg/system/exitcode.go b/vendor/github.com/docker/docker/pkg/system/exitcode.go new file mode 100644 index 00000000000..4ba8fe35bfd --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/exitcode.go @@ -0,0 +1,19 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "fmt" + "os/exec" + "syscall" +) + +// GetExitCode returns the ExitStatus of the specified error if its type is +// exec.ExitError, returns 0 and an error otherwise. +func GetExitCode(err error) (int, error) { + exitCode := 0 + if exiterr, ok := err.(*exec.ExitError); ok { + if procExit, ok := exiterr.Sys().(syscall.WaitStatus); ok { + return procExit.ExitStatus(), nil + } + } + return exitCode, fmt.Errorf("failed to get exit code") +} diff --git a/vendor/github.com/docker/docker/pkg/system/filesys_unix.go b/vendor/github.com/docker/docker/pkg/system/filesys_unix.go new file mode 100644 index 00000000000..186d9d9a11d --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/filesys_unix.go @@ -0,0 +1,68 @@ +//go:build !windows +// +build !windows + +package system // import "github.com/docker/docker/pkg/system" + +import ( + "io/ioutil" + "os" + "path/filepath" +) + +// MkdirAllWithACL is a wrapper for os.MkdirAll on unix systems. +func MkdirAllWithACL(path string, perm os.FileMode, sddl string) error { + return os.MkdirAll(path, perm) +} + +// MkdirAll creates a directory named path along with any necessary parents, +// with permission specified by attribute perm for all dir created. +func MkdirAll(path string, perm os.FileMode) error { + return os.MkdirAll(path, perm) +} + +// IsAbs is a platform-specific wrapper for filepath.IsAbs. +func IsAbs(path string) bool { + return filepath.IsAbs(path) +} + +// The functions below here are wrappers for the equivalents in the os and ioutils packages. +// They are passthrough on Unix platforms, and only relevant on Windows. + +// CreateSequential creates the named file with mode 0666 (before umask), truncating +// it if it already exists. If successful, methods on the returned +// File can be used for I/O; the associated file descriptor has mode +// O_RDWR. +// If there is an error, it will be of type *PathError. +func CreateSequential(name string) (*os.File, error) { + return os.Create(name) +} + +// OpenSequential opens the named file for reading. If successful, methods on +// the returned file can be used for reading; the associated file +// descriptor has mode O_RDONLY. +// If there is an error, it will be of type *PathError. +func OpenSequential(name string) (*os.File, error) { + return os.Open(name) +} + +// OpenFileSequential is the generalized open call; most users will use Open +// or Create instead. It opens the named file with specified flag +// (O_RDONLY etc.) and perm, (0666 etc.) if applicable. If successful, +// methods on the returned File can be used for I/O. +// If there is an error, it will be of type *PathError. +func OpenFileSequential(name string, flag int, perm os.FileMode) (*os.File, error) { + return os.OpenFile(name, flag, perm) +} + +// TempFileSequential creates a new temporary file in the directory dir +// with a name beginning with prefix, opens the file for reading +// and writing, and returns the resulting *os.File. +// If dir is the empty string, TempFile uses the default directory +// for temporary files (see os.TempDir). +// Multiple programs calling TempFile simultaneously +// will not choose the same file. The caller can use f.Name() +// to find the pathname of the file. It is the caller's responsibility +// to remove the file when no longer needed. +func TempFileSequential(dir, prefix string) (f *os.File, err error) { + return ioutil.TempFile(dir, prefix) +} diff --git a/vendor/github.com/docker/docker/pkg/system/filesys_windows.go b/vendor/github.com/docker/docker/pkg/system/filesys_windows.go new file mode 100644 index 00000000000..b4646277abf --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/filesys_windows.go @@ -0,0 +1,292 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "os" + "path/filepath" + "regexp" + "strconv" + "strings" + "sync" + "syscall" + "time" + "unsafe" + + "golang.org/x/sys/windows" +) + +const ( + // SddlAdministratorsLocalSystem is local administrators plus NT AUTHORITY\System + SddlAdministratorsLocalSystem = "D:P(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)" +) + +// MkdirAllWithACL is a wrapper for MkdirAll that creates a directory +// with an appropriate SDDL defined ACL. +func MkdirAllWithACL(path string, perm os.FileMode, sddl string) error { + return mkdirall(path, true, sddl) +} + +// MkdirAll implementation that is volume path aware for Windows. It can be used +// as a drop-in replacement for os.MkdirAll() +func MkdirAll(path string, _ os.FileMode) error { + return mkdirall(path, false, "") +} + +// mkdirall is a custom version of os.MkdirAll modified for use on Windows +// so that it is both volume path aware, and can create a directory with +// a DACL. +func mkdirall(path string, applyACL bool, sddl string) error { + if re := regexp.MustCompile(`^\\\\\?\\Volume{[a-z0-9-]+}$`); re.MatchString(path) { + return nil + } + + // The rest of this method is largely copied from os.MkdirAll and should be kept + // as-is to ensure compatibility. + + // Fast path: if we can tell whether path is a directory or file, stop with success or error. + dir, err := os.Stat(path) + if err == nil { + if dir.IsDir() { + return nil + } + return &os.PathError{ + Op: "mkdir", + Path: path, + Err: syscall.ENOTDIR, + } + } + + // Slow path: make sure parent exists and then call Mkdir for path. + i := len(path) + for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator. + i-- + } + + j := i + for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element. + j-- + } + + if j > 1 { + // Create parent + err = mkdirall(path[0:j-1], false, sddl) + if err != nil { + return err + } + } + + // Parent now exists; invoke os.Mkdir or mkdirWithACL and use its result. + if applyACL { + err = mkdirWithACL(path, sddl) + } else { + err = os.Mkdir(path, 0) + } + + if err != nil { + // Handle arguments like "foo/." by + // double-checking that directory doesn't exist. + dir, err1 := os.Lstat(path) + if err1 == nil && dir.IsDir() { + return nil + } + return err + } + return nil +} + +// mkdirWithACL creates a new directory. If there is an error, it will be of +// type *PathError. . +// +// This is a modified and combined version of os.Mkdir and windows.Mkdir +// in golang to cater for creating a directory am ACL permitting full +// access, with inheritance, to any subfolder/file for Built-in Administrators +// and Local System. +func mkdirWithACL(name string, sddl string) error { + sa := windows.SecurityAttributes{Length: 0} + sd, err := windows.SecurityDescriptorFromString(sddl) + if err != nil { + return &os.PathError{Op: "mkdir", Path: name, Err: err} + } + sa.Length = uint32(unsafe.Sizeof(sa)) + sa.InheritHandle = 1 + sa.SecurityDescriptor = sd + + namep, err := windows.UTF16PtrFromString(name) + if err != nil { + return &os.PathError{Op: "mkdir", Path: name, Err: err} + } + + e := windows.CreateDirectory(namep, &sa) + if e != nil { + return &os.PathError{Op: "mkdir", Path: name, Err: e} + } + return nil +} + +// IsAbs is a platform-specific wrapper for filepath.IsAbs. On Windows, +// golang filepath.IsAbs does not consider a path \windows\system32 as absolute +// as it doesn't start with a drive-letter/colon combination. However, in +// docker we need to verify things such as WORKDIR /windows/system32 in +// a Dockerfile (which gets translated to \windows\system32 when being processed +// by the daemon. This SHOULD be treated as absolute from a docker processing +// perspective. +func IsAbs(path string) bool { + if filepath.IsAbs(path) || strings.HasPrefix(path, string(os.PathSeparator)) { + return true + } + return false +} + +// The origin of the functions below here are the golang OS and windows packages, +// slightly modified to only cope with files, not directories due to the +// specific use case. +// +// The alteration is to allow a file on Windows to be opened with +// FILE_FLAG_SEQUENTIAL_SCAN (particular for docker load), to avoid eating +// the standby list, particularly when accessing large files such as layer.tar. + +// CreateSequential creates the named file with mode 0666 (before umask), truncating +// it if it already exists. If successful, methods on the returned +// File can be used for I/O; the associated file descriptor has mode +// O_RDWR. +// If there is an error, it will be of type *PathError. +func CreateSequential(name string) (*os.File, error) { + return OpenFileSequential(name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0) +} + +// OpenSequential opens the named file for reading. If successful, methods on +// the returned file can be used for reading; the associated file +// descriptor has mode O_RDONLY. +// If there is an error, it will be of type *PathError. +func OpenSequential(name string) (*os.File, error) { + return OpenFileSequential(name, os.O_RDONLY, 0) +} + +// OpenFileSequential is the generalized open call; most users will use Open +// or Create instead. +// If there is an error, it will be of type *PathError. +func OpenFileSequential(name string, flag int, _ os.FileMode) (*os.File, error) { + if name == "" { + return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOENT} + } + r, errf := windowsOpenFileSequential(name, flag, 0) + if errf == nil { + return r, nil + } + return nil, &os.PathError{Op: "open", Path: name, Err: errf} +} + +func windowsOpenFileSequential(name string, flag int, _ os.FileMode) (file *os.File, err error) { + r, e := windowsOpenSequential(name, flag|windows.O_CLOEXEC, 0) + if e != nil { + return nil, e + } + return os.NewFile(uintptr(r), name), nil +} + +func makeInheritSa() *windows.SecurityAttributes { + var sa windows.SecurityAttributes + sa.Length = uint32(unsafe.Sizeof(sa)) + sa.InheritHandle = 1 + return &sa +} + +func windowsOpenSequential(path string, mode int, _ uint32) (fd windows.Handle, err error) { + if len(path) == 0 { + return windows.InvalidHandle, windows.ERROR_FILE_NOT_FOUND + } + pathp, err := windows.UTF16PtrFromString(path) + if err != nil { + return windows.InvalidHandle, err + } + var access uint32 + switch mode & (windows.O_RDONLY | windows.O_WRONLY | windows.O_RDWR) { + case windows.O_RDONLY: + access = windows.GENERIC_READ + case windows.O_WRONLY: + access = windows.GENERIC_WRITE + case windows.O_RDWR: + access = windows.GENERIC_READ | windows.GENERIC_WRITE + } + if mode&windows.O_CREAT != 0 { + access |= windows.GENERIC_WRITE + } + if mode&windows.O_APPEND != 0 { + access &^= windows.GENERIC_WRITE + access |= windows.FILE_APPEND_DATA + } + sharemode := uint32(windows.FILE_SHARE_READ | windows.FILE_SHARE_WRITE) + var sa *windows.SecurityAttributes + if mode&windows.O_CLOEXEC == 0 { + sa = makeInheritSa() + } + var createmode uint32 + switch { + case mode&(windows.O_CREAT|windows.O_EXCL) == (windows.O_CREAT | windows.O_EXCL): + createmode = windows.CREATE_NEW + case mode&(windows.O_CREAT|windows.O_TRUNC) == (windows.O_CREAT | windows.O_TRUNC): + createmode = windows.CREATE_ALWAYS + case mode&windows.O_CREAT == windows.O_CREAT: + createmode = windows.OPEN_ALWAYS + case mode&windows.O_TRUNC == windows.O_TRUNC: + createmode = windows.TRUNCATE_EXISTING + default: + createmode = windows.OPEN_EXISTING + } + // Use FILE_FLAG_SEQUENTIAL_SCAN rather than FILE_ATTRIBUTE_NORMAL as implemented in golang. + // https://msdn.microsoft.com/en-us/library/windows/desktop/aa363858(v=vs.85).aspx + const fileFlagSequentialScan = 0x08000000 // FILE_FLAG_SEQUENTIAL_SCAN + h, e := windows.CreateFile(pathp, access, sharemode, sa, createmode, fileFlagSequentialScan, 0) + return h, e +} + +// Helpers for TempFileSequential +var rand uint32 +var randmu sync.Mutex + +func reseed() uint32 { + return uint32(time.Now().UnixNano() + int64(os.Getpid())) +} +func nextSuffix() string { + randmu.Lock() + r := rand + if r == 0 { + r = reseed() + } + r = r*1664525 + 1013904223 // constants from Numerical Recipes + rand = r + randmu.Unlock() + return strconv.Itoa(int(1e9 + r%1e9))[1:] +} + +// TempFileSequential is a copy of ioutil.TempFile, modified to use sequential +// file access. Below is the original comment from golang: +// TempFile creates a new temporary file in the directory dir +// with a name beginning with prefix, opens the file for reading +// and writing, and returns the resulting *os.File. +// If dir is the empty string, TempFile uses the default directory +// for temporary files (see os.TempDir). +// Multiple programs calling TempFile simultaneously +// will not choose the same file. The caller can use f.Name() +// to find the pathname of the file. It is the caller's responsibility +// to remove the file when no longer needed. +func TempFileSequential(dir, prefix string) (f *os.File, err error) { + if dir == "" { + dir = os.TempDir() + } + + nconflict := 0 + for i := 0; i < 10000; i++ { + name := filepath.Join(dir, prefix+nextSuffix()) + f, err = OpenFileSequential(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) + if os.IsExist(err) { + if nconflict++; nconflict > 10 { + randmu.Lock() + rand = reseed() + randmu.Unlock() + } + continue + } + break + } + return +} diff --git a/vendor/github.com/docker/docker/pkg/system/init.go b/vendor/github.com/docker/docker/pkg/system/init.go new file mode 100644 index 00000000000..a17597aaba2 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/init.go @@ -0,0 +1,22 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "syscall" + "time" + "unsafe" +) + +// Used by chtimes +var maxTime time.Time + +func init() { + // chtimes initialization + if unsafe.Sizeof(syscall.Timespec{}.Nsec) == 8 { + // This is a 64 bit timespec + // os.Chtimes limits time to the following + maxTime = time.Unix(0, 1<<63-1) + } else { + // This is a 32 bit timespec + maxTime = time.Unix(1<<31-1, 0) + } +} diff --git a/vendor/github.com/docker/docker/pkg/system/init_windows.go b/vendor/github.com/docker/docker/pkg/system/init_windows.go new file mode 100644 index 00000000000..a91288c60b1 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/init_windows.go @@ -0,0 +1,29 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "os" + + "github.com/sirupsen/logrus" +) + +var ( + // containerdRuntimeSupported determines if ContainerD should be the runtime. + // As of March 2019, this is an experimental feature. + containerdRuntimeSupported = false +) + +// InitContainerdRuntime sets whether to use ContainerD for runtime +// on Windows. This is an experimental feature still in development, and +// also requires an environment variable to be set (so as not to turn the +// feature on from simply experimental which would also mean LCOW. +func InitContainerdRuntime(experimental bool, cdPath string) { + if experimental && len(cdPath) > 0 && len(os.Getenv("DOCKER_WINDOWS_CONTAINERD_RUNTIME")) > 0 { + logrus.Warnf("Using ContainerD runtime. This feature is experimental") + containerdRuntimeSupported = true + } +} + +// ContainerdRuntimeSupported returns true if the use of ContainerD runtime is supported. +func ContainerdRuntimeSupported() bool { + return containerdRuntimeSupported +} diff --git a/vendor/github.com/docker/docker/pkg/system/lcow.go b/vendor/github.com/docker/docker/pkg/system/lcow.go new file mode 100644 index 00000000000..4599a3f23c3 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/lcow.go @@ -0,0 +1,49 @@ +//go:build windows && !no_lcow +// +build windows,!no_lcow + +package system // import "github.com/docker/docker/pkg/system" + +import ( + "strings" + + "github.com/Microsoft/hcsshim/osversion" + specs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +var ( + // lcowSupported determines if Linux Containers on Windows are supported. + lcowSupported = false +) + +// InitLCOW sets whether LCOW is supported or not. Requires RS5+ +func InitLCOW(experimental bool) { + if experimental && osversion.Build() >= osversion.RS5 { + lcowSupported = true + } +} + +func LCOWSupported() bool { + return lcowSupported +} + +// ValidatePlatform determines if a platform structure is valid. +// TODO This is a temporary windows-only function, should be replaced by +// comparison of worker capabilities +func ValidatePlatform(platform specs.Platform) error { + if !IsOSSupported(platform.OS) { + return errors.Errorf("unsupported os %s", platform.OS) + } + return nil +} + +// IsOSSupported determines if an operating system is supported by the host +func IsOSSupported(os string) bool { + if strings.EqualFold("windows", os) { + return true + } + if LCOWSupported() && strings.EqualFold(os, "linux") { + return true + } + return false +} diff --git a/vendor/github.com/docker/docker/pkg/system/lcow_unsupported.go b/vendor/github.com/docker/docker/pkg/system/lcow_unsupported.go new file mode 100644 index 00000000000..daadef31d54 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/lcow_unsupported.go @@ -0,0 +1,29 @@ +//go:build !windows || (windows && no_lcow) +// +build !windows windows,no_lcow + +package system // import "github.com/docker/docker/pkg/system" +import ( + "runtime" + "strings" + + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +// InitLCOW does nothing since LCOW is a windows only feature +func InitLCOW(_ bool) {} + +// LCOWSupported returns true if Linux containers on Windows are supported. +func LCOWSupported() bool { + return false +} + +// ValidatePlatform determines if a platform structure is valid. This function +// is used for LCOW, and is a no-op on non-windows platforms. +func ValidatePlatform(_ specs.Platform) error { + return nil +} + +// IsOSSupported determines if an operating system is supported by the host. +func IsOSSupported(os string) bool { + return strings.EqualFold(runtime.GOOS, os) +} diff --git a/vendor/github.com/docker/docker/pkg/system/lstat_unix.go b/vendor/github.com/docker/docker/pkg/system/lstat_unix.go new file mode 100644 index 00000000000..654b9f2c9e6 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/lstat_unix.go @@ -0,0 +1,21 @@ +//go:build !windows +// +build !windows + +package system // import "github.com/docker/docker/pkg/system" + +import ( + "os" + "syscall" +) + +// Lstat takes a path to a file and returns +// a system.StatT type pertaining to that file. +// +// Throws an error if the file does not exist +func Lstat(path string) (*StatT, error) { + s := &syscall.Stat_t{} + if err := syscall.Lstat(path, s); err != nil { + return nil, &os.PathError{Op: "Lstat", Path: path, Err: err} + } + return fromStatT(s) +} diff --git a/vendor/github.com/docker/docker/pkg/system/lstat_windows.go b/vendor/github.com/docker/docker/pkg/system/lstat_windows.go new file mode 100644 index 00000000000..359c791d9b6 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/lstat_windows.go @@ -0,0 +1,14 @@ +package system // import "github.com/docker/docker/pkg/system" + +import "os" + +// Lstat calls os.Lstat to get a fileinfo interface back. +// This is then copied into our own locally defined structure. +func Lstat(path string) (*StatT, error) { + fi, err := os.Lstat(path) + if err != nil { + return nil, err + } + + return fromStatT(&fi) +} diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo.go b/vendor/github.com/docker/docker/pkg/system/meminfo.go new file mode 100644 index 00000000000..6667eb84dcb --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/meminfo.go @@ -0,0 +1,17 @@ +package system // import "github.com/docker/docker/pkg/system" + +// MemInfo contains memory statistics of the host system. +type MemInfo struct { + // Total usable RAM (i.e. physical RAM minus a few reserved bits and the + // kernel binary code). + MemTotal int64 + + // Amount of free memory. + MemFree int64 + + // Total amount of swap space available. + SwapTotal int64 + + // Amount of swap space that is currently unused. + SwapFree int64 +} diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go b/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go new file mode 100644 index 00000000000..cd060eff24d --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go @@ -0,0 +1,71 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "bufio" + "io" + "os" + "strconv" + "strings" + + units "github.com/docker/go-units" +) + +// ReadMemInfo retrieves memory statistics of the host system and returns a +// MemInfo type. +func ReadMemInfo() (*MemInfo, error) { + file, err := os.Open("/proc/meminfo") + if err != nil { + return nil, err + } + defer file.Close() + return parseMemInfo(file) +} + +// parseMemInfo parses the /proc/meminfo file into +// a MemInfo object given an io.Reader to the file. +// Throws error if there are problems reading from the file +func parseMemInfo(reader io.Reader) (*MemInfo, error) { + meminfo := &MemInfo{} + scanner := bufio.NewScanner(reader) + memAvailable := int64(-1) + for scanner.Scan() { + // Expected format: ["MemTotal:", "1234", "kB"] + parts := strings.Fields(scanner.Text()) + + // Sanity checks: Skip malformed entries. + if len(parts) < 3 || parts[2] != "kB" { + continue + } + + // Convert to bytes. + size, err := strconv.Atoi(parts[1]) + if err != nil { + continue + } + bytes := int64(size) * units.KiB + + switch parts[0] { + case "MemTotal:": + meminfo.MemTotal = bytes + case "MemFree:": + meminfo.MemFree = bytes + case "MemAvailable:": + memAvailable = bytes + case "SwapTotal:": + meminfo.SwapTotal = bytes + case "SwapFree:": + meminfo.SwapFree = bytes + } + + } + if memAvailable != -1 { + meminfo.MemFree = memAvailable + } + + // Handle errors that may have occurred during the reading of the file. + if err := scanner.Err(); err != nil { + return nil, err + } + + return meminfo, nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go b/vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go new file mode 100644 index 00000000000..207ee58ee6c --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go @@ -0,0 +1,9 @@ +//go:build !linux && !windows +// +build !linux,!windows + +package system // import "github.com/docker/docker/pkg/system" + +// ReadMemInfo is not supported on platforms other than linux and windows. +func ReadMemInfo() (*MemInfo, error) { + return nil, ErrNotSupportedPlatform +} diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go b/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go new file mode 100644 index 00000000000..6ed93f2fe26 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go @@ -0,0 +1,45 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "unsafe" + + "golang.org/x/sys/windows" +) + +var ( + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + + procGlobalMemoryStatusEx = modkernel32.NewProc("GlobalMemoryStatusEx") +) + +// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366589(v=vs.85).aspx +// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366770(v=vs.85).aspx +type memorystatusex struct { + dwLength uint32 + dwMemoryLoad uint32 + ullTotalPhys uint64 + ullAvailPhys uint64 + ullTotalPageFile uint64 + ullAvailPageFile uint64 + ullTotalVirtual uint64 + ullAvailVirtual uint64 + ullAvailExtendedVirtual uint64 +} + +// ReadMemInfo retrieves memory statistics of the host system and returns a +// MemInfo type. +func ReadMemInfo() (*MemInfo, error) { + msi := &memorystatusex{ + dwLength: 64, + } + r1, _, _ := procGlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(msi))) + if r1 == 0 { + return &MemInfo{}, nil + } + return &MemInfo{ + MemTotal: int64(msi.ullTotalPhys), + MemFree: int64(msi.ullAvailPhys), + SwapTotal: int64(msi.ullTotalPageFile), + SwapFree: int64(msi.ullAvailPageFile), + }, nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/mknod.go b/vendor/github.com/docker/docker/pkg/system/mknod.go new file mode 100644 index 00000000000..d27152c0f5b --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/mknod.go @@ -0,0 +1,17 @@ +//go:build !windows +// +build !windows + +package system // import "github.com/docker/docker/pkg/system" + +import ( + "golang.org/x/sys/unix" +) + +// Mkdev is used to build the value of linux devices (in /dev/) which specifies major +// and minor number of the newly created device special file. +// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes. +// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major, +// then the top 12 bits of the minor. +func Mkdev(major int64, minor int64) uint32 { + return uint32(unix.Mkdev(uint32(major), uint32(minor))) +} diff --git a/vendor/github.com/docker/docker/pkg/system/mknod_freebsd.go b/vendor/github.com/docker/docker/pkg/system/mknod_freebsd.go new file mode 100644 index 00000000000..c890be116f7 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/mknod_freebsd.go @@ -0,0 +1,14 @@ +//go:build freebsd +// +build freebsd + +package system // import "github.com/docker/docker/pkg/system" + +import ( + "golang.org/x/sys/unix" +) + +// Mknod creates a filesystem node (file, device special file or named pipe) named path +// with attributes specified by mode and dev. +func Mknod(path string, mode uint32, dev int) error { + return unix.Mknod(path, mode, uint64(dev)) +} diff --git a/vendor/github.com/docker/docker/pkg/system/mknod_unix.go b/vendor/github.com/docker/docker/pkg/system/mknod_unix.go new file mode 100644 index 00000000000..4586aad19e6 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/mknod_unix.go @@ -0,0 +1,14 @@ +//go:build !freebsd && !windows +// +build !freebsd,!windows + +package system // import "github.com/docker/docker/pkg/system" + +import ( + "golang.org/x/sys/unix" +) + +// Mknod creates a filesystem node (file, device special file or named pipe) named path +// with attributes specified by mode and dev. +func Mknod(path string, mode uint32, dev int) error { + return unix.Mknod(path, mode, dev) +} diff --git a/vendor/github.com/docker/docker/pkg/system/mknod_windows.go b/vendor/github.com/docker/docker/pkg/system/mknod_windows.go new file mode 100644 index 00000000000..ec89d7a15ea --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/mknod_windows.go @@ -0,0 +1,11 @@ +package system // import "github.com/docker/docker/pkg/system" + +// Mknod is not implemented on Windows. +func Mknod(path string, mode uint32, dev int) error { + return ErrNotSupportedPlatform +} + +// Mkdev is not implemented on Windows. +func Mkdev(major int64, minor int64) uint32 { + panic("Mkdev not implemented on Windows.") +} diff --git a/vendor/github.com/docker/docker/pkg/system/path.go b/vendor/github.com/docker/docker/pkg/system/path.go new file mode 100644 index 00000000000..64e892289a6 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/path.go @@ -0,0 +1,64 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "fmt" + "path/filepath" + "runtime" + "strings" +) + +const defaultUnixPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + +// DefaultPathEnv is unix style list of directories to search for +// executables. Each directory is separated from the next by a colon +// ':' character . +func DefaultPathEnv(os string) string { + if runtime.GOOS == "windows" { + if os != runtime.GOOS { + return defaultUnixPathEnv + } + // Deliberately empty on Windows containers on Windows as the default path will be set by + // the container. Docker has no context of what the default path should be. + return "" + } + return defaultUnixPathEnv + +} + +// PathVerifier defines the subset of a PathDriver that CheckSystemDriveAndRemoveDriveLetter +// actually uses in order to avoid system depending on containerd/continuity. +type PathVerifier interface { + IsAbs(string) bool +} + +// CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter, +// is the system drive. +// On Linux: this is a no-op. +// On Windows: this does the following> +// CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path. +// This is used, for example, when validating a user provided path in docker cp. +// If a drive letter is supplied, it must be the system drive. The drive letter +// is always removed. Also, it translates it to OS semantics (IOW / to \). We +// need the path in this syntax so that it can ultimately be concatenated with +// a Windows long-path which doesn't support drive-letters. Examples: +// C: --> Fail +// C:\ --> \ +// a --> a +// /a --> \a +// d:\ --> Fail +func CheckSystemDriveAndRemoveDriveLetter(path string, driver PathVerifier) (string, error) { + if runtime.GOOS != "windows" || LCOWSupported() { + return path, nil + } + + if len(path) == 2 && string(path[1]) == ":" { + return "", fmt.Errorf("No relative path specified in %q", path) + } + if !driver.IsAbs(path) || len(path) < 2 { + return filepath.FromSlash(path), nil + } + if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") { + return "", fmt.Errorf("The specified path is not on the system drive (C:)") + } + return filepath.FromSlash(path[2:]), nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/path_unix.go b/vendor/github.com/docker/docker/pkg/system/path_unix.go new file mode 100644 index 00000000000..2c85371b5e8 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/path_unix.go @@ -0,0 +1,11 @@ +//go:build !windows +// +build !windows + +package system // import "github.com/docker/docker/pkg/system" + +// GetLongPathName converts Windows short pathnames to full pathnames. +// For example C:\Users\ADMIN~1 --> C:\Users\Administrator. +// It is a no-op on non-Windows platforms +func GetLongPathName(path string) (string, error) { + return path, nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/path_windows.go b/vendor/github.com/docker/docker/pkg/system/path_windows.go new file mode 100644 index 00000000000..22a56136c8f --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/path_windows.go @@ -0,0 +1,27 @@ +package system // import "github.com/docker/docker/pkg/system" + +import "golang.org/x/sys/windows" + +// GetLongPathName converts Windows short pathnames to full pathnames. +// For example C:\Users\ADMIN~1 --> C:\Users\Administrator. +// It is a no-op on non-Windows platforms +func GetLongPathName(path string) (string, error) { + // See https://groups.google.com/forum/#!topic/golang-dev/1tufzkruoTg + p, err := windows.UTF16FromString(path) + if err != nil { + return "", err + } + b := p // GetLongPathName says we can reuse buffer + n, err := windows.GetLongPathName(&p[0], &b[0], uint32(len(b))) + if err != nil { + return "", err + } + if n > uint32(len(b)) { + b = make([]uint16, n) + _, err = windows.GetLongPathName(&p[0], &b[0], uint32(len(b))) + if err != nil { + return "", err + } + } + return windows.UTF16ToString(b), nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/process_unix.go b/vendor/github.com/docker/docker/pkg/system/process_unix.go new file mode 100644 index 00000000000..145689b88af --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/process_unix.go @@ -0,0 +1,45 @@ +//go:build linux || freebsd || darwin +// +build linux freebsd darwin + +package system // import "github.com/docker/docker/pkg/system" + +import ( + "fmt" + "io/ioutil" + "strings" + "syscall" + + "golang.org/x/sys/unix" +) + +// IsProcessAlive returns true if process with a given pid is running. +func IsProcessAlive(pid int) bool { + err := unix.Kill(pid, syscall.Signal(0)) + if err == nil || err == unix.EPERM { + return true + } + + return false +} + +// KillProcess force-stops a process. +func KillProcess(pid int) { + unix.Kill(pid, unix.SIGKILL) +} + +// IsProcessZombie return true if process has a state with "Z" +// http://man7.org/linux/man-pages/man5/proc.5.html +func IsProcessZombie(pid int) (bool, error) { + statPath := fmt.Sprintf("/proc/%d/stat", pid) + dataBytes, err := ioutil.ReadFile(statPath) + if err != nil { + return false, err + } + data := string(dataBytes) + sdata := strings.SplitN(data, " ", 4) + if len(sdata) >= 3 && sdata[2] == "Z" { + return true, nil + } + + return false, nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/process_windows.go b/vendor/github.com/docker/docker/pkg/system/process_windows.go new file mode 100644 index 00000000000..09bdfa0ca0a --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/process_windows.go @@ -0,0 +1,18 @@ +package system // import "github.com/docker/docker/pkg/system" + +import "os" + +// IsProcessAlive returns true if process with a given pid is running. +func IsProcessAlive(pid int) bool { + _, err := os.FindProcess(pid) + + return err == nil +} + +// KillProcess force-stops a process. +func KillProcess(pid int) { + p, err := os.FindProcess(pid) + if err == nil { + _ = p.Kill() + } +} diff --git a/vendor/github.com/docker/docker/pkg/system/rm.go b/vendor/github.com/docker/docker/pkg/system/rm.go new file mode 100644 index 00000000000..f2d81597c9d --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/rm.go @@ -0,0 +1,79 @@ +//go:build !darwin && !windows +// +build !darwin,!windows + +package system // import "github.com/docker/docker/pkg/system" + +import ( + "os" + "syscall" + "time" + + "github.com/moby/sys/mount" + "github.com/pkg/errors" +) + +// EnsureRemoveAll wraps `os.RemoveAll` to check for specific errors that can +// often be remedied. +// Only use `EnsureRemoveAll` if you really want to make every effort to remove +// a directory. +// +// Because of the way `os.Remove` (and by extension `os.RemoveAll`) works, there +// can be a race between reading directory entries and then actually attempting +// to remove everything in the directory. +// These types of errors do not need to be returned since it's ok for the dir to +// be gone we can just retry the remove operation. +// +// This should not return a `os.ErrNotExist` kind of error under any circumstances +func EnsureRemoveAll(dir string) error { + notExistErr := make(map[string]bool) + + // track retries + exitOnErr := make(map[string]int) + maxRetry := 50 + + // Attempt to unmount anything beneath this dir first + mount.RecursiveUnmount(dir) + + for { + err := os.RemoveAll(dir) + if err == nil { + return nil + } + + pe, ok := err.(*os.PathError) + if !ok { + return err + } + + if os.IsNotExist(err) { + if notExistErr[pe.Path] { + return err + } + notExistErr[pe.Path] = true + + // There is a race where some subdir can be removed but after the parent + // dir entries have been read. + // So the path could be from `os.Remove(subdir)` + // If the reported non-existent path is not the passed in `dir` we + // should just retry, but otherwise return with no error. + if pe.Path == dir { + return nil + } + continue + } + + if pe.Err != syscall.EBUSY { + return err + } + + if e := mount.Unmount(pe.Path); e != nil { + return errors.Wrapf(e, "error while removing %s", dir) + } + + if exitOnErr[pe.Path] == maxRetry { + return err + } + exitOnErr[pe.Path]++ + time.Sleep(100 * time.Millisecond) + } +} diff --git a/vendor/github.com/docker/docker/pkg/system/rm_windows.go b/vendor/github.com/docker/docker/pkg/system/rm_windows.go new file mode 100644 index 00000000000..ed9c5dcb8ae --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/rm_windows.go @@ -0,0 +1,6 @@ +package system + +import "os" + +// EnsureRemoveAll is an alias to os.RemoveAll on Windows +var EnsureRemoveAll = os.RemoveAll diff --git a/vendor/github.com/docker/docker/pkg/system/stat_bsd.go b/vendor/github.com/docker/docker/pkg/system/stat_bsd.go new file mode 100644 index 00000000000..8e61d820f02 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/stat_bsd.go @@ -0,0 +1,16 @@ +//go:build freebsd || netbsd +// +build freebsd netbsd + +package system // import "github.com/docker/docker/pkg/system" + +import "syscall" + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtimespec}, nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_darwin.go b/vendor/github.com/docker/docker/pkg/system/stat_darwin.go new file mode 100644 index 00000000000..c1c0ee9f386 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/stat_darwin.go @@ -0,0 +1,13 @@ +package system // import "github.com/docker/docker/pkg/system" + +import "syscall" + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtimespec}, nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_linux.go b/vendor/github.com/docker/docker/pkg/system/stat_linux.go new file mode 100644 index 00000000000..17d5d131a3c --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/stat_linux.go @@ -0,0 +1,20 @@ +package system // import "github.com/docker/docker/pkg/system" + +import "syscall" + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: s.Mode, + uid: s.Uid, + gid: s.Gid, + // the type is 32bit on mips + rdev: uint64(s.Rdev), // nolint: unconvert + mtim: s.Mtim}, nil +} + +// FromStatT converts a syscall.Stat_t type to a system.Stat_t type +// This is exposed on Linux as pkg/archive/changes uses it. +func FromStatT(s *syscall.Stat_t) (*StatT, error) { + return fromStatT(s) +} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go b/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go new file mode 100644 index 00000000000..756b92d1e6c --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go @@ -0,0 +1,13 @@ +package system // import "github.com/docker/docker/pkg/system" + +import "syscall" + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtim}, nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_solaris.go b/vendor/github.com/docker/docker/pkg/system/stat_solaris.go new file mode 100644 index 00000000000..6a51ccd6423 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/stat_solaris.go @@ -0,0 +1,13 @@ +package system // import "github.com/docker/docker/pkg/system" + +import "syscall" + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: s.Mode, + uid: s.Uid, + gid: s.Gid, + rdev: s.Rdev, + mtim: s.Mtim}, nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_unix.go b/vendor/github.com/docker/docker/pkg/system/stat_unix.go new file mode 100644 index 00000000000..a45ffddf750 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/stat_unix.go @@ -0,0 +1,67 @@ +//go:build !windows +// +build !windows + +package system // import "github.com/docker/docker/pkg/system" + +import ( + "os" + "syscall" +) + +// StatT type contains status of a file. It contains metadata +// like permission, owner, group, size, etc about a file. +type StatT struct { + mode uint32 + uid uint32 + gid uint32 + rdev uint64 + size int64 + mtim syscall.Timespec +} + +// Mode returns file's permission mode. +func (s StatT) Mode() uint32 { + return s.mode +} + +// UID returns file's user id of owner. +func (s StatT) UID() uint32 { + return s.uid +} + +// GID returns file's group id of owner. +func (s StatT) GID() uint32 { + return s.gid +} + +// Rdev returns file's device ID (if it's special file). +func (s StatT) Rdev() uint64 { + return s.rdev +} + +// Size returns file's size. +func (s StatT) Size() int64 { + return s.size +} + +// Mtim returns file's last modification time. +func (s StatT) Mtim() syscall.Timespec { + return s.mtim +} + +// IsDir reports whether s describes a directory. +func (s StatT) IsDir() bool { + return s.mode&syscall.S_IFDIR != 0 +} + +// Stat takes a path to a file and returns +// a system.StatT type pertaining to that file. +// +// Throws an error if the file does not exist +func Stat(path string) (*StatT, error) { + s := &syscall.Stat_t{} + if err := syscall.Stat(path, s); err != nil { + return nil, &os.PathError{Op: "Stat", Path: path, Err: err} + } + return fromStatT(s) +} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_windows.go b/vendor/github.com/docker/docker/pkg/system/stat_windows.go new file mode 100644 index 00000000000..b2456cb8870 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/stat_windows.go @@ -0,0 +1,49 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "os" + "time" +) + +// StatT type contains status of a file. It contains metadata +// like permission, size, etc about a file. +type StatT struct { + mode os.FileMode + size int64 + mtim time.Time +} + +// Size returns file's size. +func (s StatT) Size() int64 { + return s.size +} + +// Mode returns file's permission mode. +func (s StatT) Mode() os.FileMode { + return os.FileMode(s.mode) +} + +// Mtim returns file's last modification time. +func (s StatT) Mtim() time.Time { + return time.Time(s.mtim) +} + +// Stat takes a path to a file and returns +// a system.StatT type pertaining to that file. +// +// Throws an error if the file does not exist +func Stat(path string) (*StatT, error) { + fi, err := os.Stat(path) + if err != nil { + return nil, err + } + return fromStatT(&fi) +} + +// fromStatT converts a os.FileInfo type to a system.StatT type +func fromStatT(fi *os.FileInfo) (*StatT, error) { + return &StatT{ + size: (*fi).Size(), + mode: (*fi).Mode(), + mtim: (*fi).ModTime()}, nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/syscall_unix.go b/vendor/github.com/docker/docker/pkg/system/syscall_unix.go new file mode 100644 index 00000000000..7c90bffaa59 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/syscall_unix.go @@ -0,0 +1,12 @@ +//go:build linux || freebsd +// +build linux freebsd + +package system // import "github.com/docker/docker/pkg/system" + +import "golang.org/x/sys/unix" + +// Unmount is a platform-specific helper function to call +// the unmount syscall. +func Unmount(dest string) error { + return unix.Unmount(dest, 0) +} diff --git a/vendor/github.com/docker/docker/pkg/system/syscall_windows.go b/vendor/github.com/docker/docker/pkg/system/syscall_windows.go new file mode 100644 index 00000000000..1588aa3ef9f --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/syscall_windows.go @@ -0,0 +1,136 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "syscall" + "unsafe" + + "github.com/Microsoft/hcsshim/osversion" + "github.com/sirupsen/logrus" + "golang.org/x/sys/windows" +) + +const ( + OWNER_SECURITY_INFORMATION = windows.OWNER_SECURITY_INFORMATION // Deprecated: use golang.org/x/sys/windows.OWNER_SECURITY_INFORMATION + GROUP_SECURITY_INFORMATION = windows.GROUP_SECURITY_INFORMATION // Deprecated: use golang.org/x/sys/windows.GROUP_SECURITY_INFORMATION + DACL_SECURITY_INFORMATION = windows.DACL_SECURITY_INFORMATION // Deprecated: use golang.org/x/sys/windows.DACL_SECURITY_INFORMATION + SACL_SECURITY_INFORMATION = windows.SACL_SECURITY_INFORMATION // Deprecated: use golang.org/x/sys/windows.SACL_SECURITY_INFORMATION + LABEL_SECURITY_INFORMATION = windows.LABEL_SECURITY_INFORMATION // Deprecated: use golang.org/x/sys/windows.LABEL_SECURITY_INFORMATION + ATTRIBUTE_SECURITY_INFORMATION = windows.ATTRIBUTE_SECURITY_INFORMATION // Deprecated: use golang.org/x/sys/windows.ATTRIBUTE_SECURITY_INFORMATION + SCOPE_SECURITY_INFORMATION = windows.SCOPE_SECURITY_INFORMATION // Deprecated: use golang.org/x/sys/windows.SCOPE_SECURITY_INFORMATION + PROCESS_TRUST_LABEL_SECURITY_INFORMATION = 0x00000080 + ACCESS_FILTER_SECURITY_INFORMATION = 0x00000100 + BACKUP_SECURITY_INFORMATION = windows.BACKUP_SECURITY_INFORMATION // Deprecated: use golang.org/x/sys/windows.BACKUP_SECURITY_INFORMATION + PROTECTED_DACL_SECURITY_INFORMATION = windows.PROTECTED_DACL_SECURITY_INFORMATION // Deprecated: use golang.org/x/sys/windows.PROTECTED_DACL_SECURITY_INFORMATION + PROTECTED_SACL_SECURITY_INFORMATION = windows.PROTECTED_SACL_SECURITY_INFORMATION // Deprecated: use golang.org/x/sys/windows.PROTECTED_SACL_SECURITY_INFORMATION + UNPROTECTED_DACL_SECURITY_INFORMATION = windows.UNPROTECTED_DACL_SECURITY_INFORMATION // Deprecated: use golang.org/x/sys/windows.UNPROTECTED_DACL_SECURITY_INFORMATION + UNPROTECTED_SACL_SECURITY_INFORMATION = windows.UNPROTECTED_SACL_SECURITY_INFORMATION // Deprecated: use golang.org/x/sys/windows.UNPROTECTED_SACL_SECURITY_INFORMATION +) + +const ( + SE_UNKNOWN_OBJECT_TYPE = windows.SE_UNKNOWN_OBJECT_TYPE // Deprecated: use golang.org/x/sys/windows.SE_UNKNOWN_OBJECT_TYPE + SE_FILE_OBJECT = windows.SE_FILE_OBJECT // Deprecated: use golang.org/x/sys/windows.SE_FILE_OBJECT + SE_SERVICE = windows.SE_SERVICE // Deprecated: use golang.org/x/sys/windows.SE_SERVICE + SE_PRINTER = windows.SE_PRINTER // Deprecated: use golang.org/x/sys/windows.SE_PRINTER + SE_REGISTRY_KEY = windows.SE_REGISTRY_KEY // Deprecated: use golang.org/x/sys/windows.SE_REGISTRY_KEY + SE_LMSHARE = windows.SE_LMSHARE // Deprecated: use golang.org/x/sys/windows.SE_LMSHARE + SE_KERNEL_OBJECT = windows.SE_KERNEL_OBJECT // Deprecated: use golang.org/x/sys/windows.SE_KERNEL_OBJECT + SE_WINDOW_OBJECT = windows.SE_WINDOW_OBJECT // Deprecated: use golang.org/x/sys/windows.SE_WINDOW_OBJECT + SE_DS_OBJECT = windows.SE_DS_OBJECT // Deprecated: use golang.org/x/sys/windows.SE_DS_OBJECT + SE_DS_OBJECT_ALL = windows.SE_DS_OBJECT_ALL // Deprecated: use golang.org/x/sys/windows.SE_DS_OBJECT_ALL + SE_PROVIDER_DEFINED_OBJECT = windows.SE_PROVIDER_DEFINED_OBJECT // Deprecated: use golang.org/x/sys/windows.SE_PROVIDER_DEFINED_OBJECT + SE_WMIGUID_OBJECT = windows.SE_WMIGUID_OBJECT // Deprecated: use golang.org/x/sys/windows.SE_WMIGUID_OBJECT + SE_REGISTRY_WOW64_32KEY = windows.SE_REGISTRY_WOW64_32KEY // Deprecated: use golang.org/x/sys/windows.SE_REGISTRY_WOW64_32KEY +) + +const ( + SeTakeOwnershipPrivilege = "SeTakeOwnershipPrivilege" +) + +const ( + ContainerAdministratorSidString = "S-1-5-93-2-1" + ContainerUserSidString = "S-1-5-93-2-2" +) + +var ( + ntuserApiset = windows.NewLazyDLL("ext-ms-win-ntuser-window-l1-1-0") + modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") + procGetVersionExW = modkernel32.NewProc("GetVersionExW") + procSetNamedSecurityInfo = modadvapi32.NewProc("SetNamedSecurityInfoW") + procGetSecurityDescriptorDacl = modadvapi32.NewProc("GetSecurityDescriptorDacl") +) + +// OSVersion is a wrapper for Windows version information +// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx +type OSVersion = osversion.OSVersion + +// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724833(v=vs.85).aspx +// TODO: use golang.org/x/sys/windows.OsVersionInfoEx (needs OSVersionInfoSize to be exported) +type osVersionInfoEx struct { + OSVersionInfoSize uint32 + MajorVersion uint32 + MinorVersion uint32 + BuildNumber uint32 + PlatformID uint32 + CSDVersion [128]uint16 + ServicePackMajor uint16 + ServicePackMinor uint16 + SuiteMask uint16 + ProductType byte + Reserve byte +} + +// GetOSVersion gets the operating system version on Windows. Note that +// dockerd.exe must be manifested to get the correct version information. +// Deprecated: use github.com/Microsoft/hcsshim/osversion.Get() instead +func GetOSVersion() OSVersion { + return osversion.Get() +} + +// IsWindowsClient returns true if the SKU is client +func IsWindowsClient() bool { + osviex := &osVersionInfoEx{OSVersionInfoSize: 284} + r1, _, err := procGetVersionExW.Call(uintptr(unsafe.Pointer(osviex))) + if r1 == 0 { + logrus.Warnf("GetVersionExW failed - assuming server SKU: %v", err) + return false + } + const verNTWorkstation = 0x00000001 + return osviex.ProductType == verNTWorkstation +} + +// Unmount is a platform-specific helper function to call +// the unmount syscall. Not supported on Windows +func Unmount(_ string) error { + return nil +} + +// HasWin32KSupport determines whether containers that depend on win32k can +// run on this machine. Win32k is the driver used to implement windowing. +func HasWin32KSupport() bool { + // For now, check for ntuser API support on the host. In the future, a host + // may support win32k in containers even if the host does not support ntuser + // APIs. + return ntuserApiset.Load() == nil +} + +// Deprecated: use golang.org/x/sys/windows.SetNamedSecurityInfo() +func SetNamedSecurityInfo(objectName *uint16, objectType uint32, securityInformation uint32, sidOwner *windows.SID, sidGroup *windows.SID, dacl *byte, sacl *byte) (result error) { + r0, _, _ := syscall.Syscall9(procSetNamedSecurityInfo.Addr(), 7, uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(sidOwner)), uintptr(unsafe.Pointer(sidGroup)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), 0, 0) + if r0 != 0 { + result = syscall.Errno(r0) + } + return +} + +// Deprecated: uses golang.org/x/sys/windows.SecurityDescriptorFromString() and golang.org/x/sys/windows.SECURITY_DESCRIPTOR.DACL() +func GetSecurityDescriptorDacl(securityDescriptor *byte, daclPresent *uint32, dacl **byte, daclDefaulted *uint32) (result error) { + r1, _, e1 := syscall.Syscall6(procGetSecurityDescriptorDacl.Addr(), 4, uintptr(unsafe.Pointer(securityDescriptor)), uintptr(unsafe.Pointer(daclPresent)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(daclDefaulted)), 0, 0) + if r1 == 0 { + if e1 != 0 { + result = e1 + } else { + result = syscall.EINVAL + } + } + return +} diff --git a/vendor/github.com/docker/docker/pkg/system/umask.go b/vendor/github.com/docker/docker/pkg/system/umask.go new file mode 100644 index 00000000000..d4a15cbedc3 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/umask.go @@ -0,0 +1,14 @@ +//go:build !windows +// +build !windows + +package system // import "github.com/docker/docker/pkg/system" + +import ( + "golang.org/x/sys/unix" +) + +// Umask sets current process's file mode creation mask to newmask +// and returns oldmask. +func Umask(newmask int) (oldmask int, err error) { + return unix.Umask(newmask), nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/umask_windows.go b/vendor/github.com/docker/docker/pkg/system/umask_windows.go new file mode 100644 index 00000000000..fc62388c389 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/umask_windows.go @@ -0,0 +1,7 @@ +package system // import "github.com/docker/docker/pkg/system" + +// Umask is not supported on the windows platform. +func Umask(newmask int) (oldmask int, err error) { + // should not be called on cli code path + return 0, ErrNotSupportedPlatform +} diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_unix.go b/vendor/github.com/docker/docker/pkg/system/utimes_unix.go new file mode 100644 index 00000000000..2768750a00b --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/utimes_unix.go @@ -0,0 +1,25 @@ +//go:build linux || freebsd +// +build linux freebsd + +package system // import "github.com/docker/docker/pkg/system" + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +// LUtimesNano is used to change access and modification time of the specified path. +// It's used for symbol link file because unix.UtimesNano doesn't support a NOFOLLOW flag atm. +func LUtimesNano(path string, ts []syscall.Timespec) error { + uts := []unix.Timespec{ + unix.NsecToTimespec(syscall.TimespecToNsec(ts[0])), + unix.NsecToTimespec(syscall.TimespecToNsec(ts[1])), + } + err := unix.UtimesNanoAt(unix.AT_FDCWD, path, uts, unix.AT_SYMLINK_NOFOLLOW) + if err != nil && err != unix.ENOSYS { + return err + } + + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go b/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go new file mode 100644 index 00000000000..bfed4af0325 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go @@ -0,0 +1,11 @@ +//go:build !linux && !freebsd +// +build !linux,!freebsd + +package system // import "github.com/docker/docker/pkg/system" + +import "syscall" + +// LUtimesNano is only supported on linux and freebsd. +func LUtimesNano(path string, ts []syscall.Timespec) error { + return ErrNotSupportedPlatform +} diff --git a/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go b/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go new file mode 100644 index 00000000000..95b609fe7a8 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go @@ -0,0 +1,37 @@ +package system // import "github.com/docker/docker/pkg/system" + +import "golang.org/x/sys/unix" + +// Lgetxattr retrieves the value of the extended attribute identified by attr +// and associated with the given path in the file system. +// It will returns a nil slice and nil error if the xattr is not set. +func Lgetxattr(path string, attr string) ([]byte, error) { + // Start with a 128 length byte array + dest := make([]byte, 128) + sz, errno := unix.Lgetxattr(path, attr, dest) + + for errno == unix.ERANGE { + // Buffer too small, use zero-sized buffer to get the actual size + sz, errno = unix.Lgetxattr(path, attr, []byte{}) + if errno != nil { + return nil, errno + } + dest = make([]byte, sz) + sz, errno = unix.Lgetxattr(path, attr, dest) + } + + switch { + case errno == unix.ENODATA: + return nil, nil + case errno != nil: + return nil, errno + } + + return dest[:sz], nil +} + +// Lsetxattr sets the value of the extended attribute identified by attr +// and associated with the given path in the file system. +func Lsetxattr(path string, attr string, data []byte, flags int) error { + return unix.Lsetxattr(path, attr, data, flags) +} diff --git a/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go b/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go new file mode 100644 index 00000000000..b165a5dbfe9 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go @@ -0,0 +1,14 @@ +//go:build !linux +// +build !linux + +package system // import "github.com/docker/docker/pkg/system" + +// Lgetxattr is not supported on platforms other than linux. +func Lgetxattr(path string, attr string) ([]byte, error) { + return nil, ErrNotSupportedPlatform +} + +// Lsetxattr is not supported on platforms other than linux. +func Lsetxattr(path string, attr string, data []byte, flags int) error { + return ErrNotSupportedPlatform +} diff --git a/vendor/github.com/docker/docker/registry/auth.go b/vendor/github.com/docker/docker/registry/auth.go index 81533d269fb..2d0ecde2d47 100644 --- a/vendor/github.com/docker/docker/registry/auth.go +++ b/vendor/github.com/docker/docker/registry/auth.go @@ -87,19 +87,26 @@ func loginV2(authConfig *types.AuthConfig, endpoint APIEndpoint, userAgent strin logrus.Debugf("attempting v2 login to registry endpoint %s", endpointStr) - loginClient, err := v2AuthHTTPClient(endpoint.URL, authTransport, modifiers, creds, nil) + loginClient, foundV2, err := v2AuthHTTPClient(endpoint.URL, authTransport, modifiers, creds, nil) if err != nil { return "", "", err } req, err := http.NewRequest(http.MethodGet, endpointStr, nil) if err != nil { + if !foundV2 { + err = fallbackError{err: err} + } return "", "", err } resp, err := loginClient.Do(req) if err != nil { err = translateV2AuthError(err) + if !foundV2 { + err = fallbackError{err: err} + } + return "", "", err } defer resp.Body.Close() @@ -110,13 +117,19 @@ func loginV2(authConfig *types.AuthConfig, endpoint APIEndpoint, userAgent strin // TODO(dmcgowan): Attempt to further interpret result, status code and error code string err = errors.Errorf("login attempt to %s failed with status: %d %s", endpointStr, resp.StatusCode, http.StatusText(resp.StatusCode)) + if !foundV2 { + err = fallbackError{err: err} + } return "", "", err } -func v2AuthHTTPClient(endpoint *url.URL, authTransport http.RoundTripper, modifiers []transport.RequestModifier, creds auth.CredentialStore, scopes []auth.Scope) (*http.Client, error) { - challengeManager, err := PingV2Registry(endpoint, authTransport) +func v2AuthHTTPClient(endpoint *url.URL, authTransport http.RoundTripper, modifiers []transport.RequestModifier, creds auth.CredentialStore, scopes []auth.Scope) (*http.Client, bool, error) { + challengeManager, foundV2, err := PingV2Registry(endpoint, authTransport) if err != nil { - return nil, err + if !foundV2 { + err = fallbackError{err: err} + } + return nil, foundV2, err } tokenHandlerOptions := auth.TokenHandlerOptions{ @@ -134,7 +147,8 @@ func v2AuthHTTPClient(endpoint *url.URL, authTransport http.RoundTripper, modifi return &http.Client{ Transport: tr, Timeout: 15 * time.Second, - }, nil + }, foundV2, nil + } // ConvertToHostname converts a registry url which has http|https prepended @@ -183,9 +197,18 @@ func (err PingResponseError) Error() string { } // PingV2Registry attempts to ping a v2 registry and on success return a -// challenge manager for the supported authentication types. -// If a response is received but cannot be interpreted, a PingResponseError will be returned. -func PingV2Registry(endpoint *url.URL, transport http.RoundTripper) (challenge.Manager, error) { +// challenge manager for the supported authentication types and +// whether v2 was confirmed by the response. If a response is received but +// cannot be interpreted a PingResponseError will be returned. +func PingV2Registry(endpoint *url.URL, transport http.RoundTripper) (challenge.Manager, bool, error) { + var ( + foundV2 = false + v2Version = auth.APIVersion{ + Type: "registry", + Version: "2.0", + } + ) + pingClient := &http.Client{ Transport: transport, Timeout: 15 * time.Second, @@ -193,20 +216,32 @@ func PingV2Registry(endpoint *url.URL, transport http.RoundTripper) (challenge.M endpointStr := strings.TrimRight(endpoint.String(), "/") + "/v2/" req, err := http.NewRequest(http.MethodGet, endpointStr, nil) if err != nil { - return nil, err + return nil, false, err } resp, err := pingClient.Do(req) if err != nil { - return nil, err + return nil, false, err } defer resp.Body.Close() + versions := auth.APIVersions(resp, DefaultRegistryVersionHeader) + for _, pingVersion := range versions { + if pingVersion == v2Version { + // The version header indicates we're definitely + // talking to a v2 registry. So don't allow future + // fallbacks to the v1 protocol. + + foundV2 = true + break + } + } + challengeManager := challenge.NewSimpleManager() if err := challengeManager.AddResponse(resp); err != nil { - return nil, PingResponseError{ + return nil, foundV2, PingResponseError{ Err: err, } } - return challengeManager, nil + return challengeManager, foundV2, nil } diff --git a/vendor/github.com/docker/docker/registry/config.go b/vendor/github.com/docker/docker/registry/config.go index cfb87f0d0d9..54b83fa40aa 100644 --- a/vendor/github.com/docker/docker/registry/config.go +++ b/vendor/github.com/docker/docker/registry/config.go @@ -29,6 +29,10 @@ type serviceConfig struct { const ( // DefaultNamespace is the default namespace DefaultNamespace = "docker.io" + // DefaultRegistryVersionHeader is the name of the default HTTP header + // that carries Registry version info + DefaultRegistryVersionHeader = "Docker-Distribution-Api-Version" + // IndexHostname is the index hostname IndexHostname = "index.docker.io" // IndexServer is used for user auth and image search diff --git a/vendor/github.com/docker/docker/registry/endpoint_v1.go b/vendor/github.com/docker/docker/registry/endpoint_v1.go index 684c330dc30..db342d1412b 100644 --- a/vendor/github.com/docker/docker/registry/endpoint_v1.go +++ b/vendor/github.com/docker/docker/registry/endpoint_v1.go @@ -4,7 +4,7 @@ import ( "crypto/tls" "encoding/json" "fmt" - "io" + "io/ioutil" "net/http" "net/url" "strings" @@ -22,7 +22,6 @@ type V1Endpoint struct { } // NewV1Endpoint parses the given address to return a registry endpoint. -// TODO: remove. This is only used by search. func NewV1Endpoint(index *registrytypes.IndexInfo, userAgent string, metaHeaders http.Header) (*V1Endpoint, error) { tlsConfig, err := newTLSConfig(index.Name, index.Secure) if err != nil { @@ -34,8 +33,7 @@ func NewV1Endpoint(index *registrytypes.IndexInfo, userAgent string, metaHeaders return nil, err } - err = validateEndpoint(endpoint) - if err != nil { + if err := validateEndpoint(endpoint); err != nil { return nil, err } @@ -69,6 +67,20 @@ func validateEndpoint(endpoint *V1Endpoint) error { return nil } +func newV1Endpoint(address url.URL, tlsConfig *tls.Config, userAgent string, metaHeaders http.Header) *V1Endpoint { + endpoint := &V1Endpoint{ + IsSecure: tlsConfig == nil || !tlsConfig.InsecureSkipVerify, + URL: new(url.URL), + } + + *endpoint.URL = address + + // TODO(tiborvass): make sure a ConnectTimeout transport is used + tr := NewTransport(tlsConfig) + endpoint.client = HTTPClient(transport.NewTransport(tr, Headers(userAgent, metaHeaders)...)) + return endpoint +} + // trimV1Address trims the version off the address and returns the // trimmed address or an error if there is a non-V1 version. func trimV1Address(address string) (string, error) { @@ -77,7 +89,10 @@ func trimV1Address(address string) (string, error) { apiVersionStr string ) - address = strings.TrimSuffix(address, "/") + if strings.HasSuffix(address, "/") { + address = address[:len(address)-1] + } + chunks = strings.Split(address, "/") apiVersionStr = chunks[len(chunks)-1] if apiVersionStr == "v1" { @@ -108,14 +123,9 @@ func newV1EndpointFromStr(address string, tlsConfig *tls.Config, userAgent strin return nil, err } - // TODO(tiborvass): make sure a ConnectTimeout transport is used - tr := NewTransport(tlsConfig) + endpoint := newV1Endpoint(*uri, tlsConfig, userAgent, metaHeaders) - return &V1Endpoint{ - IsSecure: tlsConfig == nil || !tlsConfig.InsecureSkipVerify, - URL: uri, - client: HTTPClient(transport.NewTransport(tr, Headers(userAgent, metaHeaders)...)), - }, nil + return endpoint, nil } // Get the formatted URL for the root of this registry Endpoint @@ -131,28 +141,29 @@ func (e *V1Endpoint) Path(path string) string { // Ping returns a PingResult which indicates whether the registry is standalone or not. func (e *V1Endpoint) Ping() (PingResult, error) { + logrus.Debugf("attempting v1 ping for registry endpoint %s", e) + if e.String() == IndexServer { // Skip the check, we know this one is valid // (and we never want to fallback to http in case of error) - return PingResult{}, nil + return PingResult{Standalone: false}, nil } - logrus.Debugf("attempting v1 ping for registry endpoint %s", e) req, err := http.NewRequest(http.MethodGet, e.Path("_ping"), nil) if err != nil { - return PingResult{}, err + return PingResult{Standalone: false}, err } resp, err := e.client.Do(req) if err != nil { - return PingResult{}, err + return PingResult{Standalone: false}, err } defer resp.Body.Close() - jsonString, err := io.ReadAll(resp.Body) + jsonString, err := ioutil.ReadAll(resp.Body) if err != nil { - return PingResult{}, fmt.Errorf("error while reading the http response: %s", err) + return PingResult{Standalone: false}, fmt.Errorf("error while reading the http response: %s", err) } // If the header is absent, we assume true for compatibility with earlier @@ -165,12 +176,13 @@ func (e *V1Endpoint) Ping() (PingResult, error) { // don't stop here. Just assume sane defaults } if hdr := resp.Header.Get("X-Docker-Registry-Version"); hdr != "" { + logrus.Debugf("Registry version header: '%s'", hdr) info.Version = hdr } logrus.Debugf("PingResult.Version: %q", info.Version) standalone := resp.Header.Get("X-Docker-Registry-Standalone") - + logrus.Debugf("Registry standalone header: '%s'", standalone) // Accepted values are "true" (case-insensitive) and "1". if strings.EqualFold(standalone, "true") || standalone == "1" { info.Standalone = true diff --git a/vendor/github.com/docker/docker/registry/registry.go b/vendor/github.com/docker/docker/registry/registry.go index 55fc117c827..7a70bf28b51 100644 --- a/vendor/github.com/docker/docker/registry/registry.go +++ b/vendor/github.com/docker/docker/registry/registry.go @@ -3,7 +3,9 @@ package registry // import "github.com/docker/docker/registry" import ( "crypto/tls" + "errors" "fmt" + "io/ioutil" "net" "net/http" "os" @@ -16,6 +18,12 @@ import ( "github.com/sirupsen/logrus" ) +var ( + // ErrAlreadyExists is an error returned if an image being pushed + // already exists on the remote side + ErrAlreadyExists = errors.New("Image already exists") +) + // HostCertsDir returns the config directory for a specific host func HostCertsDir(hostname string) (string, error) { certsDir := CertsDir() @@ -46,7 +54,7 @@ func newTLSConfig(hostname string, isSecure bool) (*tls.Config, error) { return tlsConfig, nil } -func hasFile(files []os.DirEntry, name string) bool { +func hasFile(files []os.FileInfo, name string) bool { for _, f := range files { if f.Name() == name { return true @@ -59,7 +67,7 @@ func hasFile(files []os.DirEntry, name string) bool { // including roots and certificate pairs and updates the // provided TLS configuration. func ReadCertsDirectory(tlsConfig *tls.Config, directory string) error { - fs, err := os.ReadDir(directory) + fs, err := ioutil.ReadDir(directory) if err != nil && !os.IsNotExist(err) { return err } @@ -74,7 +82,7 @@ func ReadCertsDirectory(tlsConfig *tls.Config, directory string) error { tlsConfig.RootCAs = systemPool } logrus.Debugf("crt: %s", filepath.Join(directory, f.Name())) - data, err := os.ReadFile(filepath.Join(directory, f.Name())) + data, err := ioutil.ReadFile(filepath.Join(directory, f.Name())) if err != nil { return err } diff --git a/vendor/github.com/docker/docker/registry/service.go b/vendor/github.com/docker/docker/registry/service.go index 276b04724ab..3b08e39da2c 100644 --- a/vendor/github.com/docker/docker/registry/service.go +++ b/vendor/github.com/docker/docker/registry/service.go @@ -135,11 +135,12 @@ func (s *DefaultService) Auth(ctx context.Context, authConfig *types.AuthConfig, if err == nil { return } - if errdefs.IsUnauthorized(err) { - // Failed to authenticate; don't continue with (non-TLS) endpoints. - return status, token, err + if fErr, ok := err.(fallbackError); ok { + logrus.WithError(fErr.err).Infof("Error logging in to endpoint, trying next endpoint") + continue } - logrus.WithError(err).Infof("Error logging in to endpoint, trying next endpoint") + + return "", "", err } return "", "", err @@ -193,14 +194,14 @@ func (s *DefaultService) Search(ctx context.Context, term string, limit int, aut } modifiers := Headers(userAgent, nil) - v2Client, err := v2AuthHTTPClient(endpoint.URL, endpoint.client.Transport, modifiers, creds, scopes) + v2Client, foundV2, err := v2AuthHTTPClient(endpoint.URL, endpoint.client.Transport, modifiers, creds, scopes) if err != nil { if fErr, ok := err.(fallbackError); ok { logrus.Errorf("Cannot use identity token for search, v2 auth not supported: %v", fErr.err) } else { return nil, err } - } else { + } else if foundV2 { // Copy non transport http client features v2Client.Timeout = endpoint.client.Timeout v2Client.CheckRedirect = endpoint.client.CheckRedirect @@ -246,12 +247,18 @@ type APIEndpoint struct { TLSConfig *tls.Config } +// ToV1Endpoint returns a V1 API endpoint based on the APIEndpoint +// Deprecated: this function is deprecated and will be removed in a future update +func (e APIEndpoint) ToV1Endpoint(userAgent string, metaHeaders http.Header) *V1Endpoint { + return newV1Endpoint(*e.URL, e.TLSConfig, userAgent, metaHeaders) +} + // TLSConfig constructs a client TLS configuration based on server defaults func (s *DefaultService) TLSConfig(hostname string) (*tls.Config, error) { s.mu.Lock() defer s.mu.Unlock() - return s.tlsConfig(hostname) + return newTLSConfig(hostname, isSecureIndex(s.config, hostname)) } // tlsConfig constructs a client TLS configuration based on server defaults @@ -259,6 +266,10 @@ func (s *DefaultService) tlsConfig(hostname string) (*tls.Config, error) { return newTLSConfig(hostname, isSecureIndex(s.config, hostname)) } +func (s *DefaultService) tlsConfigForMirror(mirrorURL *url.URL) (*tls.Config, error) { + return s.tlsConfig(mirrorURL.Host) +} + // LookupPullEndpoints creates a list of v2 endpoints to try to pull from, in order of preference. // It gives preference to mirrors over the actual registry, and HTTPS over plain HTTP. func (s *DefaultService) LookupPullEndpoints(hostname string) (endpoints []APIEndpoint, err error) { diff --git a/vendor/github.com/docker/docker/registry/service_v2.go b/vendor/github.com/docker/docker/registry/service_v2.go index 46f28ebccfd..3e3a5b41ffb 100644 --- a/vendor/github.com/docker/docker/registry/service_v2.go +++ b/vendor/github.com/docker/docker/registry/service_v2.go @@ -18,7 +18,7 @@ func (s *DefaultService) lookupV2Endpoints(hostname string) (endpoints []APIEndp if err != nil { return nil, err } - mirrorTLSConfig, err := s.tlsConfig(mirrorURL.Host) + mirrorTLSConfig, err := s.tlsConfigForMirror(mirrorURL) if err != nil { return nil, err } diff --git a/vendor/github.com/docker/docker/registry/types.go b/vendor/github.com/docker/docker/registry/types.go index 073e244ba8b..28ed2bfa5e8 100644 --- a/vendor/github.com/docker/docker/registry/types.go +++ b/vendor/github.com/docker/docker/registry/types.go @@ -45,8 +45,9 @@ func (av APIVersion) String() string { // API Version identifiers. const ( - APIVersion1 APIVersion = 1 - APIVersion2 APIVersion = 2 + _ = iota + APIVersion1 APIVersion = iota + APIVersion2 ) var apiVersions = map[APIVersion]string{ diff --git a/vendor/github.com/docker/go-connections/nat/nat.go b/vendor/github.com/docker/go-connections/nat/nat.go index bb7e4e33695..296c96a6334 100644 --- a/vendor/github.com/docker/go-connections/nat/nat.go +++ b/vendor/github.com/docker/go-connections/nat/nat.go @@ -8,11 +8,6 @@ import ( "strings" ) -const ( - // portSpecTemplate is the expected format for port specifications - portSpecTemplate = "ip:hostPort:containerPort" -) - // PortBinding represents a binding between a Host IP address and a Host Port type PortBinding struct { // HostIP is the host IP Address @@ -158,30 +153,33 @@ type PortMapping struct { func splitParts(rawport string) (string, string, string) { parts := strings.Split(rawport, ":") n := len(parts) - containerport := parts[n-1] + containerPort := parts[n-1] switch n { case 1: - return "", "", containerport + return "", "", containerPort case 2: - return "", parts[0], containerport + return "", parts[0], containerPort case 3: - return parts[0], parts[1], containerport + return parts[0], parts[1], containerPort default: - return strings.Join(parts[:n-2], ":"), parts[n-2], containerport + return strings.Join(parts[:n-2], ":"), parts[n-2], containerPort } } // ParsePortSpec parses a port specification string into a slice of PortMappings func ParsePortSpec(rawPort string) ([]PortMapping, error) { var proto string - rawIP, hostPort, containerPort := splitParts(rawPort) + ip, hostPort, containerPort := splitParts(rawPort) proto, containerPort = SplitProtoPort(containerPort) - // Strip [] from IPV6 addresses - ip, _, err := net.SplitHostPort(rawIP + ":") - if err != nil { - return nil, fmt.Errorf("Invalid ip address %v: %s", rawIP, err) + if ip != "" && ip[0] == '[' { + // Strip [] from IPV6 addresses + rawIP, _, err := net.SplitHostPort(ip + ":") + if err != nil { + return nil, fmt.Errorf("Invalid ip address %v: %s", ip, err) + } + ip = rawIP } if ip != "" && net.ParseIP(ip) == nil { return nil, fmt.Errorf("Invalid ip address: %s", ip) diff --git a/vendor/github.com/docker/go-connections/nat/sort.go b/vendor/github.com/docker/go-connections/nat/sort.go index ce950171e31..b6eed145e1c 100644 --- a/vendor/github.com/docker/go-connections/nat/sort.go +++ b/vendor/github.com/docker/go-connections/nat/sort.go @@ -43,7 +43,7 @@ type portMapSorter []portMapEntry func (s portMapSorter) Len() int { return len(s) } func (s portMapSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -// sort the port so that the order is: +// Less sorts the port so that the order is: // 1. port with larger specified bindings // 2. larger port // 3. port with tcp protocol @@ -58,7 +58,7 @@ func (s portMapSorter) Less(i, j int) bool { func SortPortMap(ports []Port, bindings PortMap) { s := portMapSorter{} for _, p := range ports { - if binding, ok := bindings[p]; ok { + if binding, ok := bindings[p]; ok && len(binding) > 0 { for _, b := range binding { s = append(s, portMapEntry{port: p, binding: b}) } diff --git a/vendor/github.com/docker/go-connections/sockets/proxy.go b/vendor/github.com/docker/go-connections/sockets/proxy.go index 98e9a1dc61b..c897cb02ade 100644 --- a/vendor/github.com/docker/go-connections/sockets/proxy.go +++ b/vendor/github.com/docker/go-connections/sockets/proxy.go @@ -2,11 +2,8 @@ package sockets import ( "net" - "net/url" "os" "strings" - - "golang.org/x/net/proxy" ) // GetProxyEnv allows access to the uppercase and the lowercase forms of @@ -20,32 +17,12 @@ func GetProxyEnv(key string) string { return proxyValue } -// DialerFromEnvironment takes in a "direct" *net.Dialer and returns a -// proxy.Dialer which will route the connections through the proxy using the -// given dialer. -func DialerFromEnvironment(direct *net.Dialer) (proxy.Dialer, error) { - allProxy := GetProxyEnv("all_proxy") - if len(allProxy) == 0 { - return direct, nil - } - - proxyURL, err := url.Parse(allProxy) - if err != nil { - return direct, err - } - - proxyFromURL, err := proxy.FromURL(proxyURL, direct) - if err != nil { - return direct, err - } - - noProxy := GetProxyEnv("no_proxy") - if len(noProxy) == 0 { - return proxyFromURL, nil - } - - perHost := proxy.NewPerHost(proxyFromURL, direct) - perHost.AddFromString(noProxy) - - return perHost, nil +// DialerFromEnvironment was previously used to configure a net.Dialer to route +// connections through a SOCKS proxy. +// DEPRECATED: SOCKS proxies are now supported by configuring only +// http.Transport.Proxy, and no longer require changing http.Transport.Dial. +// Therefore, only sockets.ConfigureTransport() needs to be called, and any +// sockets.DialerFromEnvironment() calls can be dropped. +func DialerFromEnvironment(direct *net.Dialer) (*net.Dialer, error) { + return direct, nil } diff --git a/vendor/github.com/docker/go-connections/sockets/sockets.go b/vendor/github.com/docker/go-connections/sockets/sockets.go index a1d7beb4d80..2e9e9006f61 100644 --- a/vendor/github.com/docker/go-connections/sockets/sockets.go +++ b/vendor/github.com/docker/go-connections/sockets/sockets.go @@ -3,14 +3,9 @@ package sockets import ( "errors" - "net" "net/http" - "time" ) -// Why 32? See https://github.com/docker/docker/pull/8035. -const defaultTimeout = 32 * time.Second - // ErrProtocolNotAvailable is returned when a given transport protocol is not provided by the operating system. var ErrProtocolNotAvailable = errors.New("protocol not available") @@ -26,13 +21,6 @@ func ConfigureTransport(tr *http.Transport, proto, addr string) error { return configureNpipeTransport(tr, proto, addr) default: tr.Proxy = http.ProxyFromEnvironment - dialer, err := DialerFromEnvironment(&net.Dialer{ - Timeout: defaultTimeout, - }) - if err != nil { - return err - } - tr.Dial = dialer.Dial } return nil } diff --git a/vendor/github.com/docker/go-connections/sockets/sockets_unix.go b/vendor/github.com/docker/go-connections/sockets/sockets_unix.go index 386cf0dbbde..10d76342652 100644 --- a/vendor/github.com/docker/go-connections/sockets/sockets_unix.go +++ b/vendor/github.com/docker/go-connections/sockets/sockets_unix.go @@ -3,6 +3,7 @@ package sockets import ( + "context" "fmt" "net" "net/http" @@ -10,7 +11,10 @@ import ( "time" ) -const maxUnixSocketPathSize = len(syscall.RawSockaddrUnix{}.Path) +const ( + defaultTimeout = 10 * time.Second + maxUnixSocketPathSize = len(syscall.RawSockaddrUnix{}.Path) +) func configureUnixTransport(tr *http.Transport, proto, addr string) error { if len(addr) > maxUnixSocketPathSize { @@ -18,8 +22,11 @@ func configureUnixTransport(tr *http.Transport, proto, addr string) error { } // No need for compression in local communications. tr.DisableCompression = true - tr.Dial = func(_, _ string) (net.Conn, error) { - return net.DialTimeout(proto, addr, defaultTimeout) + dialer := &net.Dialer{ + Timeout: defaultTimeout, + } + tr.DialContext = func(ctx context.Context, _, _ string) (net.Conn, error) { + return dialer.DialContext(ctx, proto, addr) } return nil } diff --git a/vendor/github.com/docker/go-connections/sockets/sockets_windows.go b/vendor/github.com/docker/go-connections/sockets/sockets_windows.go index 5c21644e1fe..7acafc5a2ad 100644 --- a/vendor/github.com/docker/go-connections/sockets/sockets_windows.go +++ b/vendor/github.com/docker/go-connections/sockets/sockets_windows.go @@ -1,6 +1,7 @@ package sockets import ( + "context" "net" "net/http" "time" @@ -15,8 +16,8 @@ func configureUnixTransport(tr *http.Transport, proto, addr string) error { func configureNpipeTransport(tr *http.Transport, proto, addr string) error { // No need for compression in local communications. tr.DisableCompression = true - tr.Dial = func(_, _ string) (net.Conn, error) { - return DialPipe(addr, defaultTimeout) + tr.DialContext = func(ctx context.Context, _, _ string) (net.Conn, error) { + return winio.DialPipeContext(ctx, addr) } return nil } diff --git a/vendor/github.com/docker/go-connections/sockets/unix_socket.go b/vendor/github.com/docker/go-connections/sockets/unix_socket.go index a8b5dbb6fdc..e7591e6edbf 100644 --- a/vendor/github.com/docker/go-connections/sockets/unix_socket.go +++ b/vendor/github.com/docker/go-connections/sockets/unix_socket.go @@ -1,5 +1,51 @@ // +build !windows +/* +Package sockets is a simple unix domain socket wrapper. + +Usage + +For example: + + import( + "fmt" + "net" + "os" + "github.com/docker/go-connections/sockets" + ) + + func main() { + l, err := sockets.NewUnixSocketWithOpts("/path/to/sockets", + sockets.WithChown(0,0),sockets.WithChmod(0660)) + if err != nil { + panic(err) + } + echoStr := "hello" + + go func() { + for { + conn, err := l.Accept() + if err != nil { + return + } + conn.Write([]byte(echoStr)) + conn.Close() + } + }() + + conn, err := net.Dial("unix", path) + if err != nil { + t.Fatal(err) + } + + buf := make([]byte, 5) + if _, err := conn.Read(buf); err != nil { + panic(err) + } else if string(buf) != echoStr { + panic(fmt.Errorf("Msg may lost")) + } + } +*/ package sockets import ( @@ -8,25 +54,73 @@ import ( "syscall" ) -// NewUnixSocket creates a unix socket with the specified path and group. -func NewUnixSocket(path string, gid int) (net.Listener, error) { +// SockOption sets up socket file's creating option +type SockOption func(string) error + +// WithChown modifies the socket file's uid and gid +func WithChown(uid, gid int) SockOption { + return func(path string) error { + if err := os.Chown(path, uid, gid); err != nil { + return err + } + return nil + } +} + +// WithChmod modifies socket file's access mode. +func WithChmod(mask os.FileMode) SockOption { + return func(path string) error { + if err := os.Chmod(path, mask); err != nil { + return err + } + return nil + } +} + +// NewUnixSocketWithOpts creates a unix socket with the specified options. +// By default, socket permissions are 0000 (i.e.: no access for anyone); pass +// WithChmod() and WithChown() to set the desired ownership and permissions. +// +// This function temporarily changes the system's "umask" to 0777 to work around +// a race condition between creating the socket and setting its permissions. While +// this should only be for a short duration, it may affect other processes that +// create files/directories during that period. +func NewUnixSocketWithOpts(path string, opts ...SockOption) (net.Listener, error) { if err := syscall.Unlink(path); err != nil && !os.IsNotExist(err) { return nil, err } - mask := syscall.Umask(0777) - defer syscall.Umask(mask) + // net.Listen does not allow for permissions to be set. As a result, when + // specifying custom permissions ("WithChmod()"), there is a short time + // between creating the socket and applying the permissions, during which + // the socket permissions are Less restrictive than desired. + // + // To work around this limitation of net.Listen(), we temporarily set the + // umask to 0777, which forces the socket to be created with 000 permissions + // (i.e.: no access for anyone). After that, WithChmod() must be used to set + // the desired permissions. + // + // We don't use "defer" here, to reset the umask to its original value as soon + // as possible. Ideally we'd be able to detect if WithChmod() was passed as + // an option, and skip changing umask if default permissions are used. + origUmask := syscall.Umask(0777) l, err := net.Listen("unix", path) + syscall.Umask(origUmask) if err != nil { return nil, err } - if err := os.Chown(path, 0, gid); err != nil { - l.Close() - return nil, err - } - if err := os.Chmod(path, 0660); err != nil { - l.Close() - return nil, err + + for _, op := range opts { + if err := op(path); err != nil { + _ = l.Close() + return nil, err + } } + return l, nil } + +// NewUnixSocket creates a unix socket with the specified path and group. +func NewUnixSocket(path string, gid int) (net.Listener, error) { + return NewUnixSocketWithOpts(path, WithChown(0, gid), WithChmod(0660)) +} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config.go b/vendor/github.com/docker/go-connections/tlsconfig/config.go index 0ef3fdcb469..992968373eb 100644 --- a/vendor/github.com/docker/go-connections/tlsconfig/config.go +++ b/vendor/github.com/docker/go-connections/tlsconfig/config.go @@ -53,18 +53,9 @@ var acceptedCBCCiphers = []uint16{ // known weak algorithms removed. var DefaultServerAcceptedCiphers = append(clientCipherSuites, acceptedCBCCiphers...) -// allTLSVersions lists all the TLS versions and is used by the code that validates -// a uint16 value as a TLS version. -var allTLSVersions = map[uint16]struct{}{ - tls.VersionSSL30: {}, - tls.VersionTLS10: {}, - tls.VersionTLS11: {}, - tls.VersionTLS12: {}, -} - // ServerDefault returns a secure-enough TLS configuration for the server TLS configuration. func ServerDefault(ops ...func(*tls.Config)) *tls.Config { - tlsconfig := &tls.Config{ + tlsConfig := &tls.Config{ // Avoid fallback by default to SSL protocols < TLS1.2 MinVersion: tls.VersionTLS12, PreferServerCipherSuites: true, @@ -72,25 +63,25 @@ func ServerDefault(ops ...func(*tls.Config)) *tls.Config { } for _, op := range ops { - op(tlsconfig) + op(tlsConfig) } - return tlsconfig + return tlsConfig } // ClientDefault returns a secure-enough TLS configuration for the client TLS configuration. func ClientDefault(ops ...func(*tls.Config)) *tls.Config { - tlsconfig := &tls.Config{ + tlsConfig := &tls.Config{ // Prefer TLS1.2 as the client minimum MinVersion: tls.VersionTLS12, CipherSuites: clientCipherSuites, } for _, op := range ops { - op(tlsconfig) + op(tlsConfig) } - return tlsconfig + return tlsConfig } // certPool returns an X.509 certificate pool from `caFile`, the certificate file. @@ -108,11 +99,11 @@ func certPool(caFile string, exclusivePool bool) (*x509.CertPool, error) { return nil, fmt.Errorf("failed to read system certificates: %v", err) } } - pem, err := ioutil.ReadFile(caFile) + pemData, err := ioutil.ReadFile(caFile) if err != nil { return nil, fmt.Errorf("could not read CA certificate %q: %v", caFile, err) } - if !certPool.AppendCertsFromPEM(pem) { + if !certPool.AppendCertsFromPEM(pemData) { return nil, fmt.Errorf("failed to append certificates from PEM file: %q", caFile) } return certPool, nil @@ -141,7 +132,7 @@ func adjustMinVersion(options Options, config *tls.Config) error { } // IsErrEncryptedKey returns true if the 'err' is an error of incorrect -// password when tryin to decrypt a TLS private key +// password when trying to decrypt a TLS private key func IsErrEncryptedKey(err error) bool { return errors.Cause(err) == x509.IncorrectPasswordError } @@ -157,8 +148,8 @@ func getPrivateKey(keyBytes []byte, passphrase string) ([]byte, error) { } var err error - if x509.IsEncryptedPEMBlock(pemBlock) { - keyBytes, err = x509.DecryptPEMBlock(pemBlock, []byte(passphrase)) + if x509.IsEncryptedPEMBlock(pemBlock) { //nolint:staticcheck // Ignore SA1019 (IsEncryptedPEMBlock is deprecated) + keyBytes, err = x509.DecryptPEMBlock(pemBlock, []byte(passphrase)) //nolint:staticcheck // Ignore SA1019 (DecryptPEMBlock is deprecated) if err != nil { return nil, errors.Wrap(err, "private key is encrypted, but could not decrypt it") } diff --git a/vendor/github.com/docker/go-connections/tlsconfig/versions_go113.go b/vendor/github.com/docker/go-connections/tlsconfig/versions_go113.go new file mode 100644 index 00000000000..d8215f8e78a --- /dev/null +++ b/vendor/github.com/docker/go-connections/tlsconfig/versions_go113.go @@ -0,0 +1,16 @@ +// +build go1.13 + +package tlsconfig + +import ( + "crypto/tls" +) + +// allTLSVersions lists all the TLS versions and is used by the code that validates +// a uint16 value as a TLS version. +var allTLSVersions = map[uint16]struct{}{ + tls.VersionTLS10: {}, + tls.VersionTLS11: {}, + tls.VersionTLS12: {}, + tls.VersionTLS13: {}, +} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/versions_other.go b/vendor/github.com/docker/go-connections/tlsconfig/versions_other.go new file mode 100644 index 00000000000..a5ba7f4a388 --- /dev/null +++ b/vendor/github.com/docker/go-connections/tlsconfig/versions_other.go @@ -0,0 +1,15 @@ +// +build !go1.13 + +package tlsconfig + +import ( + "crypto/tls" +) + +// allTLSVersions lists all the TLS versions and is used by the code that validates +// a uint16 value as a TLS version. +var allTLSVersions = map[uint16]struct{}{ + tls.VersionTLS10: {}, + tls.VersionTLS11: {}, + tls.VersionTLS12: {}, +} diff --git a/vendor/github.com/docker/libnetwork/LICENSE b/vendor/github.com/docker/libnetwork/LICENSE new file mode 100644 index 00000000000..e06d2081865 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/LICENSE @@ -0,0 +1,202 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/github.com/docker/libnetwork/resolvconf/README.md b/vendor/github.com/docker/libnetwork/resolvconf/README.md new file mode 100644 index 00000000000..cdda554ba57 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/resolvconf/README.md @@ -0,0 +1 @@ +Package resolvconf provides utility code to query and update DNS configuration in /etc/resolv.conf diff --git a/vendor/github.com/docker/libnetwork/resolvconf/dns/resolvconf.go b/vendor/github.com/docker/libnetwork/resolvconf/dns/resolvconf.go new file mode 100644 index 00000000000..e348bc57f56 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/resolvconf/dns/resolvconf.go @@ -0,0 +1,26 @@ +package dns + +import ( + "regexp" +) + +// IPLocalhost is a regex pattern for IPv4 or IPv6 loopback range. +const IPLocalhost = `((127\.([0-9]{1,3}\.){2}[0-9]{1,3})|(::1)$)` + +// IPv4Localhost is a regex pattern for IPv4 localhost address range. +const IPv4Localhost = `(127\.([0-9]{1,3}\.){2}[0-9]{1,3})` + +var localhostIPRegexp = regexp.MustCompile(IPLocalhost) +var localhostIPv4Regexp = regexp.MustCompile(IPv4Localhost) + +// IsLocalhost returns true if ip matches the localhost IP regular expression. +// Used for determining if nameserver settings are being passed which are +// localhost addresses +func IsLocalhost(ip string) bool { + return localhostIPRegexp.MatchString(ip) +} + +// IsIPv4Localhost returns true if ip matches the IPv4 localhost regular expression. +func IsIPv4Localhost(ip string) bool { + return localhostIPv4Regexp.MatchString(ip) +} diff --git a/vendor/github.com/docker/libnetwork/resolvconf/resolvconf.go b/vendor/github.com/docker/libnetwork/resolvconf/resolvconf.go new file mode 100644 index 00000000000..946bb87123e --- /dev/null +++ b/vendor/github.com/docker/libnetwork/resolvconf/resolvconf.go @@ -0,0 +1,285 @@ +// Package resolvconf provides utility code to query and update DNS configuration in /etc/resolv.conf +package resolvconf + +import ( + "bytes" + "io/ioutil" + "regexp" + "strings" + "sync" + + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/libnetwork/resolvconf/dns" + "github.com/docker/libnetwork/types" + "github.com/sirupsen/logrus" +) + +const ( + // defaultPath is the default path to the resolv.conf that contains information to resolve DNS. See Path(). + defaultPath = "/etc/resolv.conf" + // alternatePath is a path different from defaultPath, that may be used to resolve DNS. See Path(). + alternatePath = "/run/systemd/resolve/resolv.conf" +) + +var ( + detectSystemdResolvConfOnce sync.Once + pathAfterSystemdDetection = defaultPath +) + +// Path returns the path to the resolv.conf file that libnetwork should use. +// +// When /etc/resolv.conf contains 127.0.0.53 as the only nameserver, then +// it is assumed systemd-resolved manages DNS. Because inside the container 127.0.0.53 +// is not a valid DNS server, Path() returns /run/systemd/resolve/resolv.conf +// which is the resolv.conf that systemd-resolved generates and manages. +// Otherwise Path() returns /etc/resolv.conf. +// +// Errors are silenced as they will inevitably resurface at future open/read calls. +// +// More information at https://www.freedesktop.org/software/systemd/man/systemd-resolved.service.html#/etc/resolv.conf +func Path() string { + detectSystemdResolvConfOnce.Do(func() { + candidateResolvConf, err := ioutil.ReadFile(defaultPath) + if err != nil { + // silencing error as it will resurface at next calls trying to read defaultPath + return + } + ns := GetNameservers(candidateResolvConf, types.IP) + if len(ns) == 1 && ns[0] == "127.0.0.53" { + pathAfterSystemdDetection = alternatePath + logrus.Infof("detected 127.0.0.53 nameserver, assuming systemd-resolved, so using resolv.conf: %s", alternatePath) + } + }) + return pathAfterSystemdDetection +} + +var ( + // Note: the default IPv4 & IPv6 resolvers are set to Google's Public DNS + defaultIPv4Dns = []string{"nameserver 8.8.8.8", "nameserver 8.8.4.4"} + defaultIPv6Dns = []string{"nameserver 2001:4860:4860::8888", "nameserver 2001:4860:4860::8844"} + ipv4NumBlock = `(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)` + ipv4Address = `(` + ipv4NumBlock + `\.){3}` + ipv4NumBlock + // This is not an IPv6 address verifier as it will accept a super-set of IPv6, and also + // will *not match* IPv4-Embedded IPv6 Addresses (RFC6052), but that and other variants + // -- e.g. other link-local types -- either won't work in containers or are unnecessary. + // For readability and sufficiency for Docker purposes this seemed more reasonable than a + // 1000+ character regexp with exact and complete IPv6 validation + ipv6Address = `([0-9A-Fa-f]{0,4}:){2,7}([0-9A-Fa-f]{0,4})(%\w+)?` + + localhostNSRegexp = regexp.MustCompile(`(?m)^nameserver\s+` + dns.IPLocalhost + `\s*\n*`) + nsIPv6Regexp = regexp.MustCompile(`(?m)^nameserver\s+` + ipv6Address + `\s*\n*`) + nsRegexp = regexp.MustCompile(`^\s*nameserver\s*((` + ipv4Address + `)|(` + ipv6Address + `))\s*$`) + nsIPv6Regexpmatch = regexp.MustCompile(`^\s*nameserver\s*((` + ipv6Address + `))\s*$`) + nsIPv4Regexpmatch = regexp.MustCompile(`^\s*nameserver\s*((` + ipv4Address + `))\s*$`) + searchRegexp = regexp.MustCompile(`^\s*search\s*(([^\s]+\s*)*)$`) + optionsRegexp = regexp.MustCompile(`^\s*options\s*(([^\s]+\s*)*)$`) +) + +var lastModified struct { + sync.Mutex + sha256 string + contents []byte +} + +// File contains the resolv.conf content and its hash +type File struct { + Content []byte + Hash string +} + +// Get returns the contents of /etc/resolv.conf and its hash +func Get() (*File, error) { + return GetSpecific(Path()) +} + +// GetSpecific returns the contents of the user specified resolv.conf file and its hash +func GetSpecific(path string) (*File, error) { + resolv, err := ioutil.ReadFile(path) + if err != nil { + return nil, err + } + hash, err := ioutils.HashData(bytes.NewReader(resolv)) + if err != nil { + return nil, err + } + return &File{Content: resolv, Hash: hash}, nil +} + +// GetIfChanged retrieves the host /etc/resolv.conf file, checks against the last hash +// and, if modified since last check, returns the bytes and new hash. +// This feature is used by the resolv.conf updater for containers +func GetIfChanged() (*File, error) { + lastModified.Lock() + defer lastModified.Unlock() + + resolv, err := ioutil.ReadFile(Path()) + if err != nil { + return nil, err + } + newHash, err := ioutils.HashData(bytes.NewReader(resolv)) + if err != nil { + return nil, err + } + if lastModified.sha256 != newHash { + lastModified.sha256 = newHash + lastModified.contents = resolv + return &File{Content: resolv, Hash: newHash}, nil + } + // nothing changed, so return no data + return nil, nil +} + +// GetLastModified retrieves the last used contents and hash of the host resolv.conf. +// Used by containers updating on restart +func GetLastModified() *File { + lastModified.Lock() + defer lastModified.Unlock() + + return &File{Content: lastModified.contents, Hash: lastModified.sha256} +} + +// FilterResolvDNS cleans up the config in resolvConf. It has two main jobs: +// 1. It looks for localhost (127.*|::1) entries in the provided +// resolv.conf, removing local nameserver entries, and, if the resulting +// cleaned config has no defined nameservers left, adds default DNS entries +// 2. Given the caller provides the enable/disable state of IPv6, the filter +// code will remove all IPv6 nameservers if it is not enabled for containers +// +func FilterResolvDNS(resolvConf []byte, ipv6Enabled bool) (*File, error) { + cleanedResolvConf := localhostNSRegexp.ReplaceAll(resolvConf, []byte{}) + // if IPv6 is not enabled, also clean out any IPv6 address nameserver + if !ipv6Enabled { + cleanedResolvConf = nsIPv6Regexp.ReplaceAll(cleanedResolvConf, []byte{}) + } + // if the resulting resolvConf has no more nameservers defined, add appropriate + // default DNS servers for IPv4 and (optionally) IPv6 + if len(GetNameservers(cleanedResolvConf, types.IP)) == 0 { + logrus.Infof("No non-localhost DNS nameservers are left in resolv.conf. Using default external servers: %v", defaultIPv4Dns) + dns := defaultIPv4Dns + if ipv6Enabled { + logrus.Infof("IPv6 enabled; Adding default IPv6 external servers: %v", defaultIPv6Dns) + dns = append(dns, defaultIPv6Dns...) + } + cleanedResolvConf = append(cleanedResolvConf, []byte("\n"+strings.Join(dns, "\n"))...) + } + hash, err := ioutils.HashData(bytes.NewReader(cleanedResolvConf)) + if err != nil { + return nil, err + } + return &File{Content: cleanedResolvConf, Hash: hash}, nil +} + +// getLines parses input into lines and strips away comments. +func getLines(input []byte, commentMarker []byte) [][]byte { + lines := bytes.Split(input, []byte("\n")) + var output [][]byte + for _, currentLine := range lines { + var commentIndex = bytes.Index(currentLine, commentMarker) + if commentIndex == -1 { + output = append(output, currentLine) + } else { + output = append(output, currentLine[:commentIndex]) + } + } + return output +} + +// GetNameservers returns nameservers (if any) listed in /etc/resolv.conf +func GetNameservers(resolvConf []byte, kind int) []string { + nameservers := []string{} + for _, line := range getLines(resolvConf, []byte("#")) { + var ns [][]byte + if kind == types.IP { + ns = nsRegexp.FindSubmatch(line) + } else if kind == types.IPv4 { + ns = nsIPv4Regexpmatch.FindSubmatch(line) + } else if kind == types.IPv6 { + ns = nsIPv6Regexpmatch.FindSubmatch(line) + } + if len(ns) > 0 { + nameservers = append(nameservers, string(ns[1])) + } + } + return nameservers +} + +// GetNameserversAsCIDR returns nameservers (if any) listed in +// /etc/resolv.conf as CIDR blocks (e.g., "1.2.3.4/32") +// This function's output is intended for net.ParseCIDR +func GetNameserversAsCIDR(resolvConf []byte) []string { + nameservers := []string{} + for _, nameserver := range GetNameservers(resolvConf, types.IP) { + var address string + // If IPv6, strip zone if present + if strings.Contains(nameserver, ":") { + address = strings.Split(nameserver, "%")[0] + "/128" + } else { + address = nameserver + "/32" + } + nameservers = append(nameservers, address) + } + return nameservers +} + +// GetSearchDomains returns search domains (if any) listed in /etc/resolv.conf +// If more than one search line is encountered, only the contents of the last +// one is returned. +func GetSearchDomains(resolvConf []byte) []string { + domains := []string{} + for _, line := range getLines(resolvConf, []byte("#")) { + match := searchRegexp.FindSubmatch(line) + if match == nil { + continue + } + domains = strings.Fields(string(match[1])) + } + return domains +} + +// GetOptions returns options (if any) listed in /etc/resolv.conf +// If more than one options line is encountered, only the contents of the last +// one is returned. +func GetOptions(resolvConf []byte) []string { + options := []string{} + for _, line := range getLines(resolvConf, []byte("#")) { + match := optionsRegexp.FindSubmatch(line) + if match == nil { + continue + } + options = strings.Fields(string(match[1])) + } + return options +} + +// Build writes a configuration file to path containing a "nameserver" entry +// for every element in dns, a "search" entry for every element in +// dnsSearch, and an "options" entry for every element in dnsOptions. +func Build(path string, dns, dnsSearch, dnsOptions []string) (*File, error) { + content := bytes.NewBuffer(nil) + if len(dnsSearch) > 0 { + if searchString := strings.Join(dnsSearch, " "); strings.Trim(searchString, " ") != "." { + if _, err := content.WriteString("search " + searchString + "\n"); err != nil { + return nil, err + } + } + } + for _, dns := range dns { + if _, err := content.WriteString("nameserver " + dns + "\n"); err != nil { + return nil, err + } + } + if len(dnsOptions) > 0 { + if optsString := strings.Join(dnsOptions, " "); strings.Trim(optsString, " ") != "" { + if _, err := content.WriteString("options " + optsString + "\n"); err != nil { + return nil, err + } + } + } + + hash, err := ioutils.HashData(bytes.NewReader(content.Bytes())) + if err != nil { + return nil, err + } + + return &File{Content: content.Bytes(), Hash: hash}, ioutil.WriteFile(path, content.Bytes(), 0644) +} diff --git a/vendor/github.com/docker/libnetwork/types/types.go b/vendor/github.com/docker/libnetwork/types/types.go new file mode 100644 index 00000000000..42da71be0ed --- /dev/null +++ b/vendor/github.com/docker/libnetwork/types/types.go @@ -0,0 +1,649 @@ +// Package types contains types that are common across libnetwork project +package types + +import ( + "bytes" + "fmt" + "net" + "strconv" + "strings" + + "github.com/ishidawataru/sctp" +) + +// constants for the IP address type +const ( + IP = iota // IPv4 and IPv6 + IPv4 + IPv6 +) + +// EncryptionKey is the libnetwork representation of the key distributed by the lead +// manager. +type EncryptionKey struct { + Subsystem string + Algorithm int32 + Key []byte + LamportTime uint64 +} + +// UUID represents a globally unique ID of various resources like network and endpoint +type UUID string + +// QosPolicy represents a quality of service policy on an endpoint +type QosPolicy struct { + MaxEgressBandwidth uint64 +} + +// TransportPort represents a local Layer 4 endpoint +type TransportPort struct { + Proto Protocol + Port uint16 +} + +// Equal checks if this instance of Transportport is equal to the passed one +func (t *TransportPort) Equal(o *TransportPort) bool { + if t == o { + return true + } + + if o == nil { + return false + } + + if t.Proto != o.Proto || t.Port != o.Port { + return false + } + + return true +} + +// GetCopy returns a copy of this TransportPort structure instance +func (t *TransportPort) GetCopy() TransportPort { + return TransportPort{Proto: t.Proto, Port: t.Port} +} + +// String returns the TransportPort structure in string form +func (t *TransportPort) String() string { + return fmt.Sprintf("%s/%d", t.Proto.String(), t.Port) +} + +// FromString reads the TransportPort structure from string +func (t *TransportPort) FromString(s string) error { + ps := strings.Split(s, "/") + if len(ps) == 2 { + t.Proto = ParseProtocol(ps[0]) + if p, err := strconv.ParseUint(ps[1], 10, 16); err == nil { + t.Port = uint16(p) + return nil + } + } + return BadRequestErrorf("invalid format for transport port: %s", s) +} + +// PortBinding represents a port binding between the container and the host +type PortBinding struct { + Proto Protocol + IP net.IP + Port uint16 + HostIP net.IP + HostPort uint16 + HostPortEnd uint16 +} + +// HostAddr returns the host side transport address +func (p PortBinding) HostAddr() (net.Addr, error) { + switch p.Proto { + case UDP: + return &net.UDPAddr{IP: p.HostIP, Port: int(p.HostPort)}, nil + case TCP: + return &net.TCPAddr{IP: p.HostIP, Port: int(p.HostPort)}, nil + case SCTP: + return &sctp.SCTPAddr{IPAddrs: []net.IPAddr{{IP: p.HostIP}}, Port: int(p.HostPort)}, nil + default: + return nil, ErrInvalidProtocolBinding(p.Proto.String()) + } +} + +// ContainerAddr returns the container side transport address +func (p PortBinding) ContainerAddr() (net.Addr, error) { + switch p.Proto { + case UDP: + return &net.UDPAddr{IP: p.IP, Port: int(p.Port)}, nil + case TCP: + return &net.TCPAddr{IP: p.IP, Port: int(p.Port)}, nil + case SCTP: + return &sctp.SCTPAddr{IPAddrs: []net.IPAddr{{IP: p.IP}}, Port: int(p.Port)}, nil + default: + return nil, ErrInvalidProtocolBinding(p.Proto.String()) + } +} + +// GetCopy returns a copy of this PortBinding structure instance +func (p *PortBinding) GetCopy() PortBinding { + return PortBinding{ + Proto: p.Proto, + IP: GetIPCopy(p.IP), + Port: p.Port, + HostIP: GetIPCopy(p.HostIP), + HostPort: p.HostPort, + HostPortEnd: p.HostPortEnd, + } +} + +// String returns the PortBinding structure in string form +func (p *PortBinding) String() string { + ret := fmt.Sprintf("%s/", p.Proto) + if p.IP != nil { + ret += p.IP.String() + } + ret = fmt.Sprintf("%s:%d/", ret, p.Port) + if p.HostIP != nil { + ret += p.HostIP.String() + } + ret = fmt.Sprintf("%s:%d", ret, p.HostPort) + return ret +} + +// FromString reads the PortBinding structure from string s. +// String s is a triple of "protocol/containerIP:port/hostIP:port" +// containerIP and hostIP can be in dotted decimal ("192.0.2.1") or IPv6 ("2001:db8::68") form. +// Zoned addresses ("169.254.0.23%eth0" or "fe80::1ff:fe23:4567:890a%eth0") are not supported. +// If string s is incorrectly formatted or the IP addresses or ports cannot be parsed, FromString +// returns an error. +func (p *PortBinding) FromString(s string) error { + ps := strings.Split(s, "/") + if len(ps) != 3 { + return BadRequestErrorf("invalid format for port binding: %s", s) + } + + p.Proto = ParseProtocol(ps[0]) + + var err error + if p.IP, p.Port, err = parseIPPort(ps[1]); err != nil { + return BadRequestErrorf("failed to parse Container IP/Port in port binding: %s", err.Error()) + } + + if p.HostIP, p.HostPort, err = parseIPPort(ps[2]); err != nil { + return BadRequestErrorf("failed to parse Host IP/Port in port binding: %s", err.Error()) + } + + return nil +} + +func parseIPPort(s string) (net.IP, uint16, error) { + hoststr, portstr, err := net.SplitHostPort(s) + if err != nil { + return nil, 0, err + } + + ip := net.ParseIP(hoststr) + if ip == nil { + return nil, 0, BadRequestErrorf("invalid ip: %s", hoststr) + } + + port, err := strconv.ParseUint(portstr, 10, 16) + if err != nil { + return nil, 0, BadRequestErrorf("invalid port: %s", portstr) + } + + return ip, uint16(port), nil +} + +// Equal checks if this instance of PortBinding is equal to the passed one +func (p *PortBinding) Equal(o *PortBinding) bool { + if p == o { + return true + } + + if o == nil { + return false + } + + if p.Proto != o.Proto || p.Port != o.Port || + p.HostPort != o.HostPort || p.HostPortEnd != o.HostPortEnd { + return false + } + + if p.IP != nil { + if !p.IP.Equal(o.IP) { + return false + } + } else { + if o.IP != nil { + return false + } + } + + if p.HostIP != nil { + if !p.HostIP.Equal(o.HostIP) { + return false + } + } else { + if o.HostIP != nil { + return false + } + } + + return true +} + +// ErrInvalidProtocolBinding is returned when the port binding protocol is not valid. +type ErrInvalidProtocolBinding string + +func (ipb ErrInvalidProtocolBinding) Error() string { + return fmt.Sprintf("invalid transport protocol: %s", string(ipb)) +} + +const ( + // ICMP is for the ICMP ip protocol + ICMP = 1 + // TCP is for the TCP ip protocol + TCP = 6 + // UDP is for the UDP ip protocol + UDP = 17 + // SCTP is for the SCTP ip protocol + SCTP = 132 +) + +// Protocol represents an IP protocol number +type Protocol uint8 + +func (p Protocol) String() string { + switch p { + case ICMP: + return "icmp" + case TCP: + return "tcp" + case UDP: + return "udp" + case SCTP: + return "sctp" + default: + return fmt.Sprintf("%d", p) + } +} + +// ParseProtocol returns the respective Protocol type for the passed string +func ParseProtocol(s string) Protocol { + switch strings.ToLower(s) { + case "icmp": + return ICMP + case "udp": + return UDP + case "tcp": + return TCP + case "sctp": + return SCTP + default: + return 0 + } +} + +// GetMacCopy returns a copy of the passed MAC address +func GetMacCopy(from net.HardwareAddr) net.HardwareAddr { + if from == nil { + return nil + } + to := make(net.HardwareAddr, len(from)) + copy(to, from) + return to +} + +// GetIPCopy returns a copy of the passed IP address +func GetIPCopy(from net.IP) net.IP { + if from == nil { + return nil + } + to := make(net.IP, len(from)) + copy(to, from) + return to +} + +// GetIPNetCopy returns a copy of the passed IP Network +func GetIPNetCopy(from *net.IPNet) *net.IPNet { + if from == nil { + return nil + } + bm := make(net.IPMask, len(from.Mask)) + copy(bm, from.Mask) + return &net.IPNet{IP: GetIPCopy(from.IP), Mask: bm} +} + +// GetIPNetCanonical returns the canonical form for the passed network +func GetIPNetCanonical(nw *net.IPNet) *net.IPNet { + if nw == nil { + return nil + } + c := GetIPNetCopy(nw) + c.IP = c.IP.Mask(nw.Mask) + return c +} + +// CompareIPNet returns equal if the two IP Networks are equal +func CompareIPNet(a, b *net.IPNet) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.IP.Equal(b.IP) && bytes.Equal(a.Mask, b.Mask) +} + +// GetMinimalIP returns the address in its shortest form +// If ip contains an IPv4-mapped IPv6 address, the 4-octet form of the IPv4 address will be returned. +// Otherwise ip is returned unchanged. +func GetMinimalIP(ip net.IP) net.IP { + if ip != nil && ip.To4() != nil { + return ip.To4() + } + return ip +} + +// GetMinimalIPNet returns a copy of the passed IP Network with congruent ip and mask notation +func GetMinimalIPNet(nw *net.IPNet) *net.IPNet { + if nw == nil { + return nil + } + if len(nw.IP) == 16 && nw.IP.To4() != nil { + m := nw.Mask + if len(m) == 16 { + m = m[12:16] + } + return &net.IPNet{IP: nw.IP.To4(), Mask: m} + } + return nw +} + +// IsIPNetValid returns true if the ipnet is a valid network/mask +// combination. Otherwise returns false. +func IsIPNetValid(nw *net.IPNet) bool { + return nw.String() != "0.0.0.0/0" +} + +var v4inV6MaskPrefix = []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} + +// compareIPMask checks if the passed ip and mask are semantically compatible. +// It returns the byte indexes for the address and mask so that caller can +// do bitwise operations without modifying address representation. +func compareIPMask(ip net.IP, mask net.IPMask) (is int, ms int, err error) { + // Find the effective starting of address and mask + if len(ip) == net.IPv6len && ip.To4() != nil { + is = 12 + } + if len(ip[is:]) == net.IPv4len && len(mask) == net.IPv6len && bytes.Equal(mask[:12], v4inV6MaskPrefix) { + ms = 12 + } + // Check if address and mask are semantically compatible + if len(ip[is:]) != len(mask[ms:]) { + err = fmt.Errorf("ip and mask are not compatible: (%#v, %#v)", ip, mask) + } + return +} + +// GetHostPartIP returns the host portion of the ip address identified by the mask. +// IP address representation is not modified. If address and mask are not compatible +// an error is returned. +func GetHostPartIP(ip net.IP, mask net.IPMask) (net.IP, error) { + // Find the effective starting of address and mask + is, ms, err := compareIPMask(ip, mask) + if err != nil { + return nil, fmt.Errorf("cannot compute host portion ip address because %s", err) + } + + // Compute host portion + out := GetIPCopy(ip) + for i := 0; i < len(mask[ms:]); i++ { + out[is+i] &= ^mask[ms+i] + } + + return out, nil +} + +// GetBroadcastIP returns the broadcast ip address for the passed network (ip and mask). +// IP address representation is not modified. If address and mask are not compatible +// an error is returned. +func GetBroadcastIP(ip net.IP, mask net.IPMask) (net.IP, error) { + // Find the effective starting of address and mask + is, ms, err := compareIPMask(ip, mask) + if err != nil { + return nil, fmt.Errorf("cannot compute broadcast ip address because %s", err) + } + + // Compute broadcast address + out := GetIPCopy(ip) + for i := 0; i < len(mask[ms:]); i++ { + out[is+i] |= ^mask[ms+i] + } + + return out, nil +} + +// ParseCIDR returns the *net.IPNet represented by the passed CIDR notation +func ParseCIDR(cidr string) (n *net.IPNet, e error) { + var i net.IP + if i, n, e = net.ParseCIDR(cidr); e == nil { + n.IP = i + } + return +} + +const ( + // NEXTHOP indicates a StaticRoute with an IP next hop. + NEXTHOP = iota + + // CONNECTED indicates a StaticRoute with an interface for directly connected peers. + CONNECTED +) + +// StaticRoute is a statically-provisioned IP route. +type StaticRoute struct { + Destination *net.IPNet + + RouteType int // NEXT_HOP or CONNECTED + + // NextHop will be resolved by the kernel (i.e. as a loose hop). + NextHop net.IP +} + +// GetCopy returns a copy of this StaticRoute structure +func (r *StaticRoute) GetCopy() *StaticRoute { + d := GetIPNetCopy(r.Destination) + nh := GetIPCopy(r.NextHop) + return &StaticRoute{Destination: d, + RouteType: r.RouteType, + NextHop: nh, + } +} + +// InterfaceStatistics represents the interface's statistics +type InterfaceStatistics struct { + RxBytes uint64 + RxPackets uint64 + RxErrors uint64 + RxDropped uint64 + TxBytes uint64 + TxPackets uint64 + TxErrors uint64 + TxDropped uint64 +} + +func (is *InterfaceStatistics) String() string { + return fmt.Sprintf("\nRxBytes: %d, RxPackets: %d, RxErrors: %d, RxDropped: %d, TxBytes: %d, TxPackets: %d, TxErrors: %d, TxDropped: %d", + is.RxBytes, is.RxPackets, is.RxErrors, is.RxDropped, is.TxBytes, is.TxPackets, is.TxErrors, is.TxDropped) +} + +/****************************** + * Well-known Error Interfaces + ******************************/ + +// MaskableError is an interface for errors which can be ignored by caller +type MaskableError interface { + // Maskable makes implementer into MaskableError type + Maskable() +} + +// RetryError is an interface for errors which might get resolved through retry +type RetryError interface { + // Retry makes implementer into RetryError type + Retry() +} + +// BadRequestError is an interface for errors originated by a bad request +type BadRequestError interface { + // BadRequest makes implementer into BadRequestError type + BadRequest() +} + +// NotFoundError is an interface for errors raised because a needed resource is not available +type NotFoundError interface { + // NotFound makes implementer into NotFoundError type + NotFound() +} + +// ForbiddenError is an interface for errors which denote a valid request that cannot be honored +type ForbiddenError interface { + // Forbidden makes implementer into ForbiddenError type + Forbidden() +} + +// NoServiceError is an interface for errors returned when the required service is not available +type NoServiceError interface { + // NoService makes implementer into NoServiceError type + NoService() +} + +// TimeoutError is an interface for errors raised because of timeout +type TimeoutError interface { + // Timeout makes implementer into TimeoutError type + Timeout() +} + +// NotImplementedError is an interface for errors raised because of requested functionality is not yet implemented +type NotImplementedError interface { + // NotImplemented makes implementer into NotImplementedError type + NotImplemented() +} + +// InternalError is an interface for errors raised because of an internal error +type InternalError interface { + // Internal makes implementer into InternalError type + Internal() +} + +/****************************** + * Well-known Error Formatters + ******************************/ + +// BadRequestErrorf creates an instance of BadRequestError +func BadRequestErrorf(format string, params ...interface{}) error { + return badRequest(fmt.Sprintf(format, params...)) +} + +// NotFoundErrorf creates an instance of NotFoundError +func NotFoundErrorf(format string, params ...interface{}) error { + return notFound(fmt.Sprintf(format, params...)) +} + +// ForbiddenErrorf creates an instance of ForbiddenError +func ForbiddenErrorf(format string, params ...interface{}) error { + return forbidden(fmt.Sprintf(format, params...)) +} + +// NoServiceErrorf creates an instance of NoServiceError +func NoServiceErrorf(format string, params ...interface{}) error { + return noService(fmt.Sprintf(format, params...)) +} + +// NotImplementedErrorf creates an instance of NotImplementedError +func NotImplementedErrorf(format string, params ...interface{}) error { + return notImpl(fmt.Sprintf(format, params...)) +} + +// TimeoutErrorf creates an instance of TimeoutError +func TimeoutErrorf(format string, params ...interface{}) error { + return timeout(fmt.Sprintf(format, params...)) +} + +// InternalErrorf creates an instance of InternalError +func InternalErrorf(format string, params ...interface{}) error { + return internal(fmt.Sprintf(format, params...)) +} + +// InternalMaskableErrorf creates an instance of InternalError and MaskableError +func InternalMaskableErrorf(format string, params ...interface{}) error { + return maskInternal(fmt.Sprintf(format, params...)) +} + +// RetryErrorf creates an instance of RetryError +func RetryErrorf(format string, params ...interface{}) error { + return retry(fmt.Sprintf(format, params...)) +} + +/*********************** + * Internal Error Types + ***********************/ +type badRequest string + +func (br badRequest) Error() string { + return string(br) +} +func (br badRequest) BadRequest() {} + +type notFound string + +func (nf notFound) Error() string { + return string(nf) +} +func (nf notFound) NotFound() {} + +type forbidden string + +func (frb forbidden) Error() string { + return string(frb) +} +func (frb forbidden) Forbidden() {} + +type noService string + +func (ns noService) Error() string { + return string(ns) +} +func (ns noService) NoService() {} + +type timeout string + +func (to timeout) Error() string { + return string(to) +} +func (to timeout) Timeout() {} + +type notImpl string + +func (ni notImpl) Error() string { + return string(ni) +} +func (ni notImpl) NotImplemented() {} + +type internal string + +func (nt internal) Error() string { + return string(nt) +} +func (nt internal) Internal() {} + +type maskInternal string + +func (mnt maskInternal) Error() string { + return string(mnt) +} +func (mnt maskInternal) Internal() {} +func (mnt maskInternal) Maskable() {} + +type retry string + +func (r retry) Error() string { + return string(r) +} +func (r retry) Retry() {} diff --git a/vendor/github.com/fsnotify/fsnotify/.mailmap b/vendor/github.com/fsnotify/fsnotify/.mailmap new file mode 100644 index 00000000000..a04f2907fed --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/.mailmap @@ -0,0 +1,2 @@ +Chris Howey +Nathan Youngman <4566+nathany@users.noreply.github.com> diff --git a/vendor/github.com/fsnotify/fsnotify/.travis.yml b/vendor/github.com/fsnotify/fsnotify/.travis.yml deleted file mode 100644 index a9c30165cdd..00000000000 --- a/vendor/github.com/fsnotify/fsnotify/.travis.yml +++ /dev/null @@ -1,36 +0,0 @@ -sudo: false -language: go - -go: - - "stable" - - "1.11.x" - - "1.10.x" - - "1.9.x" - -matrix: - include: - - go: "stable" - env: GOLINT=true - allow_failures: - - go: tip - fast_finish: true - - -before_install: - - if [ ! -z "${GOLINT}" ]; then go get -u golang.org/x/lint/golint; fi - -script: - - go test --race ./... - -after_script: - - test -z "$(gofmt -s -l -w . | tee /dev/stderr)" - - if [ ! -z "${GOLINT}" ]; then echo running golint; golint --set_exit_status ./...; else echo skipping golint; fi - - go vet ./... - -os: - - linux - - osx - - windows - -notifications: - email: false diff --git a/vendor/github.com/fsnotify/fsnotify/AUTHORS b/vendor/github.com/fsnotify/fsnotify/AUTHORS index 5ab5d41c547..6cbabe5ef50 100644 --- a/vendor/github.com/fsnotify/fsnotify/AUTHORS +++ b/vendor/github.com/fsnotify/fsnotify/AUTHORS @@ -4,35 +4,44 @@ # You can update this list using the following command: # -# $ git shortlog -se | awk '{print $2 " " $3 " " $4}' +# $ (head -n10 AUTHORS && git shortlog -se | sed -E 's/^\s+[0-9]+\t//') | tee AUTHORS # Please keep the list sorted. Aaron L Adrien Bustany +Alexey Kazakov Amit Krishnan Anmol Sethi Bjørn Erik Pedersen +Brian Goff Bruno Bigras Caleb Spare Case Nelson -Chris Howey +Chris Howey Christoffer Buchholz Daniel Wagner-Hall Dave Cheney +Eric Lin Evan Phoenix Francisco Souza +Gautam Dey Hari haran -John C Barstow +Ichinose Shogo +Johannes Ebke +John C Barstow Kelvin Fo Ken-ichirou MATSUZAWA Matt Layher +Matthias Stone Nathan Youngman Nickolai Zeldovich +Oliver Bristow Patrick Paul Hammond Pawel Knap Pieter Droogendijk +Pratik Shinde Pursuit92 Riku Voipio Rob Figueiredo @@ -41,6 +50,7 @@ Slawek Ligus Soge Zhang Tiffany Jernigan Tilak Sharma +Tobias Klauser Tom Payne Travis Cline Tudor Golubenco diff --git a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md index be4d7ea2c14..a438fe4b4a5 100644 --- a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md +++ b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md @@ -1,6 +1,28 @@ # Changelog -## v1.4.7 / 2018-01-09 +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] + +## [1.5.1] - 2021-08-24 + +* Revert Add AddRaw to not follow symlinks + +## [1.5.0] - 2021-08-20 + +* Go: Increase minimum required version to Go 1.12 [#381](https://github.com/fsnotify/fsnotify/pull/381) +* Feature: Add AddRaw method which does not follow symlinks when adding a watch [#289](https://github.com/fsnotify/fsnotify/pull/298) +* Windows: Follow symlinks by default like on all other systems [#289](https://github.com/fsnotify/fsnotify/pull/289) +* CI: Use GitHub Actions for CI and cover go 1.12-1.17 + [#378](https://github.com/fsnotify/fsnotify/pull/378) + [#381](https://github.com/fsnotify/fsnotify/pull/381) + [#385](https://github.com/fsnotify/fsnotify/pull/385) +* Go 1.14+: Fix unsafe pointer conversion [#325](https://github.com/fsnotify/fsnotify/pull/325) + +## [1.4.7] - 2018-01-09 * BSD/macOS: Fix possible deadlock on closing the watcher on kqueue (thanks @nhooyr and @glycerine) * Tests: Fix missing verb on format string (thanks @rchiossi) @@ -10,62 +32,62 @@ * Linux: Properly handle inotify's IN_Q_OVERFLOW event (thanks @zeldovich) * Docs: replace references to OS X with macOS -## v1.4.2 / 2016-10-10 +## [1.4.2] - 2016-10-10 * Linux: use InotifyInit1 with IN_CLOEXEC to stop leaking a file descriptor to a child process when using fork/exec [#178](https://github.com/fsnotify/fsnotify/pull/178) (thanks @pattyshack) -## v1.4.1 / 2016-10-04 +## [1.4.1] - 2016-10-04 * Fix flaky inotify stress test on Linux [#177](https://github.com/fsnotify/fsnotify/pull/177) (thanks @pattyshack) -## v1.4.0 / 2016-10-01 +## [1.4.0] - 2016-10-01 * add a String() method to Event.Op [#165](https://github.com/fsnotify/fsnotify/pull/165) (thanks @oozie) -## v1.3.1 / 2016-06-28 +## [1.3.1] - 2016-06-28 * Windows: fix for double backslash when watching the root of a drive [#151](https://github.com/fsnotify/fsnotify/issues/151) (thanks @brunoqc) -## v1.3.0 / 2016-04-19 +## [1.3.0] - 2016-04-19 * Support linux/arm64 by [patching](https://go-review.googlesource.com/#/c/21971/) x/sys/unix and switching to to it from syscall (thanks @suihkulokki) [#135](https://github.com/fsnotify/fsnotify/pull/135) -## v1.2.10 / 2016-03-02 +## [1.2.10] - 2016-03-02 * Fix golint errors in windows.go [#121](https://github.com/fsnotify/fsnotify/pull/121) (thanks @tiffanyfj) -## v1.2.9 / 2016-01-13 +## [1.2.9] - 2016-01-13 kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsnotify/pull/111) (thanks @bep) -## v1.2.8 / 2015-12-17 +## [1.2.8] - 2015-12-17 * kqueue: fix race condition in Close [#105](https://github.com/fsnotify/fsnotify/pull/105) (thanks @djui for reporting the issue and @ppknap for writing a failing test) * inotify: fix race in test * enable race detection for continuous integration (Linux, Mac, Windows) -## v1.2.5 / 2015-10-17 +## [1.2.5] - 2015-10-17 * inotify: use epoll_create1 for arm64 support (requires Linux 2.6.27 or later) [#100](https://github.com/fsnotify/fsnotify/pull/100) (thanks @suihkulokki) * inotify: fix path leaks [#73](https://github.com/fsnotify/fsnotify/pull/73) (thanks @chamaken) * kqueue: watch for rename events on subdirectories [#83](https://github.com/fsnotify/fsnotify/pull/83) (thanks @guotie) * kqueue: avoid infinite loops from symlinks cycles [#101](https://github.com/fsnotify/fsnotify/pull/101) (thanks @illicitonion) -## v1.2.1 / 2015-10-14 +## [1.2.1] - 2015-10-14 * kqueue: don't watch named pipes [#98](https://github.com/fsnotify/fsnotify/pull/98) (thanks @evanphx) -## v1.2.0 / 2015-02-08 +## [1.2.0] - 2015-02-08 * inotify: use epoll to wake up readEvents [#66](https://github.com/fsnotify/fsnotify/pull/66) (thanks @PieterD) * inotify: closing watcher should now always shut down goroutine [#63](https://github.com/fsnotify/fsnotify/pull/63) (thanks @PieterD) * kqueue: close kqueue after removing watches, fixes [#59](https://github.com/fsnotify/fsnotify/issues/59) -## v1.1.1 / 2015-02-05 +## [1.1.1] - 2015-02-05 * inotify: Retry read on EINTR [#61](https://github.com/fsnotify/fsnotify/issues/61) (thanks @PieterD) -## v1.1.0 / 2014-12-12 +## [1.1.0] - 2014-12-12 * kqueue: rework internals [#43](https://github.com/fsnotify/fsnotify/pull/43) * add low-level functions @@ -77,22 +99,22 @@ kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsn * kqueue: fix regression in rework causing subdirectories to be watched [#48](https://github.com/fsnotify/fsnotify/issues/48) * kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51) -## v1.0.4 / 2014-09-07 +## [1.0.4] - 2014-09-07 * kqueue: add dragonfly to the build tags. * Rename source code files, rearrange code so exported APIs are at the top. * Add done channel to example code. [#37](https://github.com/fsnotify/fsnotify/pull/37) (thanks @chenyukang) -## v1.0.3 / 2014-08-19 +## [1.0.3] - 2014-08-19 * [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/fsnotify/fsnotify/issues/36) -## v1.0.2 / 2014-08-17 +## [1.0.2] - 2014-08-17 * [Fix] Missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso) * [Fix] Make ./path and path equivalent. (thanks @zhsso) -## v1.0.0 / 2014-08-15 +## [1.0.0] - 2014-08-15 * [API] Remove AddWatch on Windows, use Add. * Improve documentation for exported identifiers. [#30](https://github.com/fsnotify/fsnotify/issues/30) @@ -146,51 +168,51 @@ kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsn * no tests for the current implementation * not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195) -## v0.9.3 / 2014-12-31 +## [0.9.3] - 2014-12-31 * kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51) -## v0.9.2 / 2014-08-17 +## [0.9.2] - 2014-08-17 * [Backport] Fix missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso) -## v0.9.1 / 2014-06-12 +## [0.9.1] - 2014-06-12 * Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98) -## v0.9.0 / 2014-01-17 +## [0.9.0] - 2014-01-17 * IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany) * [Fix] kqueue: fix deadlock [#77][] (thanks @cespare) * [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library. -## v0.8.12 / 2013-11-13 +## [0.8.12] - 2013-11-13 * [API] Remove FD_SET and friends from Linux adapter -## v0.8.11 / 2013-11-02 +## [0.8.11] - 2013-11-02 * [Doc] Add Changelog [#72][] (thanks @nathany) * [Doc] Spotlight and double modify events on macOS [#62][] (reported by @paulhammond) -## v0.8.10 / 2013-10-19 +## [0.8.10] - 2013-10-19 * [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott) * [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer) * [Doc] specify OS-specific limits in README (thanks @debrando) -## v0.8.9 / 2013-09-08 +## [0.8.9] - 2013-09-08 * [Doc] Contributing (thanks @nathany) * [Doc] update package path in example code [#63][] (thanks @paulhammond) * [Doc] GoCI badge in README (Linux only) [#60][] * [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany) -## v0.8.8 / 2013-06-17 +## [0.8.8] - 2013-06-17 * [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie) -## v0.8.7 / 2013-06-03 +## [0.8.7] - 2013-06-03 * [API] Make syscall flags internal * [Fix] inotify: ignore event changes @@ -198,74 +220,74 @@ kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsn * [Fix] tests on Windows * lower case error messages -## v0.8.6 / 2013-05-23 +## [0.8.6] - 2013-05-23 * kqueue: Use EVT_ONLY flag on Darwin * [Doc] Update README with full example -## v0.8.5 / 2013-05-09 +## [0.8.5] - 2013-05-09 * [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg) -## v0.8.4 / 2013-04-07 +## [0.8.4] - 2013-04-07 * [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz) -## v0.8.3 / 2013-03-13 +## [0.8.3] - 2013-03-13 * [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin) * [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin) -## v0.8.2 / 2013-02-07 +## [0.8.2] - 2013-02-07 * [Doc] add Authors * [Fix] fix data races for map access [#29][] (thanks @fsouza) -## v0.8.1 / 2013-01-09 +## [0.8.1] - 2013-01-09 * [Fix] Windows path separators * [Doc] BSD License -## v0.8.0 / 2012-11-09 +## [0.8.0] - 2012-11-09 * kqueue: directory watching improvements (thanks @vmirage) * inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto) * [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr) -## v0.7.4 / 2012-10-09 +## [0.7.4] - 2012-10-09 * [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji) * [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig) * [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig) * [Fix] kqueue: modify after recreation of file -## v0.7.3 / 2012-09-27 +## [0.7.3] - 2012-09-27 * [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage) * [Fix] kqueue: no longer get duplicate CREATE events -## v0.7.2 / 2012-09-01 +## [0.7.2] - 2012-09-01 * kqueue: events for created directories -## v0.7.1 / 2012-07-14 +## [0.7.1] - 2012-07-14 * [Fix] for renaming files -## v0.7.0 / 2012-07-02 +## [0.7.0] - 2012-07-02 * [Feature] FSNotify flags * [Fix] inotify: Added file name back to event path -## v0.6.0 / 2012-06-06 +## [0.6.0] - 2012-06-06 * kqueue: watch files after directory created (thanks @tmc) -## v0.5.1 / 2012-05-22 +## [0.5.1] - 2012-05-22 * [Fix] inotify: remove all watches before Close() -## v0.5.0 / 2012-05-03 +## [0.5.0] - 2012-05-03 * [API] kqueue: return errors during watch instead of sending over channel * kqueue: match symlink behavior on Linux @@ -273,22 +295,22 @@ kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsn * [Fix] kqueue: handle EINTR (reported by @robfig) * [Doc] Godoc example [#1][] (thanks @davecheney) -## v0.4.0 / 2012-03-30 +## [0.4.0] - 2012-03-30 * Go 1 released: build with go tool * [Feature] Windows support using winfsnotify * Windows does not have attribute change notifications * Roll attribute notifications into IsModify -## v0.3.0 / 2012-02-19 +## [0.3.0] - 2012-02-19 * kqueue: add files when watch directory -## v0.2.0 / 2011-12-30 +## [0.2.0] - 2011-12-30 * update to latest Go weekly code -## v0.1.0 / 2011-10-19 +## [0.1.0] - 2011-10-19 * kqueue: add watch on file creation to match inotify * kqueue: create file event diff --git a/vendor/github.com/fsnotify/fsnotify/README.md b/vendor/github.com/fsnotify/fsnotify/README.md index b2629e5229c..df57b1b282c 100644 --- a/vendor/github.com/fsnotify/fsnotify/README.md +++ b/vendor/github.com/fsnotify/fsnotify/README.md @@ -12,9 +12,9 @@ Cross platform: Windows, Linux, BSD and macOS. | Adapter | OS | Status | | --------------------- | -------------------------------- | ------------------------------------------------------------------------------------------------------------------------------- | -| inotify | Linux 2.6.27 or later, Android\* | Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify) | -| kqueue | BSD, macOS, iOS\* | Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify) | -| ReadDirectoryChangesW | Windows | Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify) | +| inotify | Linux 2.6.27 or later, Android\* | Supported | +| kqueue | BSD, macOS, iOS\* | Supported | +| ReadDirectoryChangesW | Windows | Supported | | FSEvents | macOS | [Planned](https://github.com/fsnotify/fsnotify/issues/11) | | FEN | Solaris 11 | [In Progress](https://github.com/fsnotify/fsnotify/issues/12) | | fanotify | Linux 2.6.37+ | [Planned](https://github.com/fsnotify/fsnotify/issues/114) | diff --git a/vendor/github.com/fsnotify/fsnotify/fen.go b/vendor/github.com/fsnotify/fsnotify/fen.go index ced39cb881e..b3ac3d8f55f 100644 --- a/vendor/github.com/fsnotify/fsnotify/fen.go +++ b/vendor/github.com/fsnotify/fsnotify/fen.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build solaris // +build solaris package fsnotify diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/vendor/github.com/fsnotify/fsnotify/fsnotify.go index 89cab046d12..0f4ee52e8aa 100644 --- a/vendor/github.com/fsnotify/fsnotify/fsnotify.go +++ b/vendor/github.com/fsnotify/fsnotify/fsnotify.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !plan9 // +build !plan9 // Package fsnotify provides a platform-independent interface for file system notifications. diff --git a/vendor/github.com/fsnotify/fsnotify/go.mod b/vendor/github.com/fsnotify/fsnotify/go.mod index ff11e13f224..54089e48b7b 100644 --- a/vendor/github.com/fsnotify/fsnotify/go.mod +++ b/vendor/github.com/fsnotify/fsnotify/go.mod @@ -2,4 +2,6 @@ module github.com/fsnotify/fsnotify go 1.13 -require golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9 +require golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c + +retract v1.5.0 diff --git a/vendor/github.com/fsnotify/fsnotify/go.sum b/vendor/github.com/fsnotify/fsnotify/go.sum index f60af9855da..0f478630ca0 100644 --- a/vendor/github.com/fsnotify/fsnotify/go.sum +++ b/vendor/github.com/fsnotify/fsnotify/go.sum @@ -1,2 +1,2 @@ -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9 h1:L2auWcuQIvxz9xSEqzESnV/QN/gNRXNApHi3fYwl2w0= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= diff --git a/vendor/github.com/fsnotify/fsnotify/inotify.go b/vendor/github.com/fsnotify/fsnotify/inotify.go index d9fd1b88a05..eb87699b5b4 100644 --- a/vendor/github.com/fsnotify/fsnotify/inotify.go +++ b/vendor/github.com/fsnotify/fsnotify/inotify.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build linux // +build linux package fsnotify @@ -272,7 +273,7 @@ func (w *Watcher) readEvents() { if nameLen > 0 { // Point "bytes" at the first byte of the filename - bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent])) + bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] // The filename is padded with NULL bytes. TrimRight() gets rid of those. name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") } diff --git a/vendor/github.com/fsnotify/fsnotify/inotify_poller.go b/vendor/github.com/fsnotify/fsnotify/inotify_poller.go index b33f2b4d4b7..e9ff9439f7f 100644 --- a/vendor/github.com/fsnotify/fsnotify/inotify_poller.go +++ b/vendor/github.com/fsnotify/fsnotify/inotify_poller.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build linux // +build linux package fsnotify diff --git a/vendor/github.com/fsnotify/fsnotify/kqueue.go b/vendor/github.com/fsnotify/fsnotify/kqueue.go index 86e76a3d676..368f5b790d4 100644 --- a/vendor/github.com/fsnotify/fsnotify/kqueue.go +++ b/vendor/github.com/fsnotify/fsnotify/kqueue.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build freebsd || openbsd || netbsd || dragonfly || darwin // +build freebsd openbsd netbsd dragonfly darwin package fsnotify diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go b/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go index 2306c4620bf..36cc3845b6e 100644 --- a/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go +++ b/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build freebsd || openbsd || netbsd || dragonfly // +build freebsd openbsd netbsd dragonfly package fsnotify diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go b/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go index 870c4d6d184..98cd8476ffb 100644 --- a/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go +++ b/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build darwin // +build darwin package fsnotify diff --git a/vendor/github.com/fsnotify/fsnotify/windows.go b/vendor/github.com/fsnotify/fsnotify/windows.go index 09436f31d82..c02b75f7c37 100644 --- a/vendor/github.com/fsnotify/fsnotify/windows.go +++ b/vendor/github.com/fsnotify/fsnotify/windows.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build windows // +build windows package fsnotify diff --git a/vendor/github.com/fsouza/go-dockerclient/.gitattributes b/vendor/github.com/fsouza/go-dockerclient/.gitattributes new file mode 100644 index 00000000000..6313b56c578 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/.gitattributes @@ -0,0 +1 @@ +* text=auto eol=lf diff --git a/vendor/github.com/fsouza/go-dockerclient/.gitignore b/vendor/github.com/fsouza/go-dockerclient/.gitignore new file mode 100644 index 00000000000..5f6b48eae0d --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/.gitignore @@ -0,0 +1,2 @@ +# temporary symlink for testing +testing/data/symlink diff --git a/vendor/github.com/fsouza/go-dockerclient/.golangci.yaml b/vendor/github.com/fsouza/go-dockerclient/.golangci.yaml new file mode 100644 index 00000000000..d5b0fe128a7 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/.golangci.yaml @@ -0,0 +1,8 @@ +run: + deadline: 5m + +linters: + disable-all: true + enable: + - gofumpt + - gci diff --git a/vendor/github.com/fsouza/go-dockerclient/AUTHORS b/vendor/github.com/fsouza/go-dockerclient/AUTHORS new file mode 100644 index 00000000000..12daa346144 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/AUTHORS @@ -0,0 +1,209 @@ +# This is the official list of go-dockerclient authors for copyright purposes. + +Abhishek Chanda +Adam Bell-Hanssen +Adnan Khan +Adrien Kohlbecker +Aithal +Aldrin Leal +Alex Dadgar +Alfonso Acosta +André Carvalho +Andreas Jaekle +Andrew Snodgrass +Andrews Medina +Andrey Sibiryov +Andy Goldstein +Anirudh Aithal +Antoine Brechon +Antonio Murdaca +Artem Sidorenko +Arthur Rodrigues +Ben Marini +Ben McCann +Ben Parees +Benno van den Berg +Bradley Cicenas +Brendan Fosberry +Brett Buddin +Brian Lalor +Brian P. Hamachek +Brian Palmer +Bryan Boreham +Burke Libbey +Carlos Diaz-Padron +Carson A +Cássio Botaro +Cesar Wong +Cezar Sa Espinola +Changping Chen +Charles Teinturier +Cheah Chu Yeow +cheneydeng +Chris Bednarski +Chris Stavropoulos +Christian Stewart +Christophe Mourette +Clayton Coleman +Clint Armstrong +CMGS +Colin Hebert +Craig Jellick +Damien Lespiau +Damon Wang +Dan Williams +Daniel, Dao Quang Minh +Daniel Black +Daniel Garcia +Daniel Hess +Daniel Hiltgen +Daniel Nephin +Daniel Tsui +Darren Shepherd +Dave Choi +David Huie +Dawn Chen +Denis Makogon +Derek Petersen +Dinesh Subhraveti +Drew Wells +Ed +Elias G. Schneevoigt +Erez Horev +Eric Anderson +Eric Fode +Eric J. Holmes +Eric Mountain +Erwin van Eyk +Ethan Mosbaugh +Ewout Prangsma +Fabio Rehm +Fatih Arslan +Faye Salwin +Felipe Oliveira +Flavia Missi +Florent Aide +Francisco Souza +Frank Groeneveld +George MacRorie +George Moura +Grégoire Delattre +Guilherme Rezende +Guillermo Álvarez Fernández +Harry Zhang +He Simei +Isaac Schnitzer +Ivan Mikushin +James Bardin +James Nugent +Jamie Snell +Januar Wayong +Jari Kolehmainen +Jason Wilder +Jawher Moussa +Jean-Baptiste Dalido +Jeff Mitchell +Jeffrey Hulten +Jen Andre +Jérôme Laurens +Jim Minter +Johan Euphrosine +Johannes Scheuermann +John Hughes +Jorge Marey +Julian Einwag +Kamil Domanski +Karan Misra +Ken Herner +Kevin Lin +Kevin Xu +Kim, Hirokuni +Kostas Lekkas +Kyle Allan +Kyle Quest +Yunhee Lee +Liron Levin +Lior Yankovich +Liu Peng +Lorenz Leutgeb +Lucas Clemente +Lucas Weiblen +Lyon Hill +Mantas Matelis +Manuel Vogel +Marguerite des Trois Maisons +Mariusz Borsa +Martin Sweeney +Máximo Cuadros Ortiz +Michael Schmatz +Michal Fojtik +Mike Dillon +Mrunal Patel +Nate Jones +Nathan Pemberton +Nguyen Sy Thanh Son +Nicholas Van Wiggeren +Nick Ethier +niko83 +Omeid Matten +Orivej Desh +Paul Bellamy +Paul Morie +Paul Weil +Peng Yin +Peter Edge +Peter Jihoon Kim +Peter Teich +Phil Lu +Philippe Lafoucrière +Radek Simko +Rafe Colton +Randy Fay +Raphaël Pinson +Reed Allman +RJ Catalano +Rob Miller +Robbert Klarenbeek +Robert Williamson +Roman Khlystik +Russell Haering +Salvador Gironès +Sam Rijs +Sami Wagiaalla +Samuel Archambault +Samuel Karp +Sebastian Borza +Sergey Ponomarev +Seth Jennings +Shane Xie +Silas Sewell +Simon Eskildsen +Simon Menke +Skolos +Soulou +Sridhar Ratnakumar +Steven Jack +Summer Mousa +Sunjin Lee +Sunny +Swaroop Ramachandra +Tarsis Azevedo +Tim Schindler +Timothy St. Clair +Tobi Knaup +Tom Wilkie +Tomas Knappek +Tonic +ttyh061 +Umut Çömlekçioğlu +upccup +Victor Marmol +Vijay Krishnan +Vincenzo Prignano +Vlad Alexandru Ionescu +Weitao Zhou +Wiliam Souza +Ye Yin +Yosuke Otosu +Yu, Zou +Yuriy Bogdanov diff --git a/vendor/github.com/fsouza/go-dockerclient/DOCKER-LICENSE b/vendor/github.com/fsouza/go-dockerclient/DOCKER-LICENSE new file mode 100644 index 00000000000..db092935f50 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/DOCKER-LICENSE @@ -0,0 +1,6 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +You can find the Docker license at the following link: +https://raw.githubusercontent.com/docker/docker/HEAD/LICENSE diff --git a/vendor/github.com/fsouza/go-dockerclient/LICENSE b/vendor/github.com/fsouza/go-dockerclient/LICENSE new file mode 100644 index 00000000000..20837167a9a --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/LICENSE @@ -0,0 +1,23 @@ +Copyright (c) go-dockerclient authors +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/fsouza/go-dockerclient/Makefile b/vendor/github.com/fsouza/go-dockerclient/Makefile new file mode 100644 index 00000000000..2f5d9fcc6a9 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/Makefile @@ -0,0 +1,30 @@ +ifeq "$(strip $(shell go env GOARCH))" "amd64" +RACE_FLAG := -race +endif + +.PHONY: test +test: pretest gotest + +.PHONY: golangci-lint +golangci-lint: + go install github.com/golangci/golangci-lint/cmd/golangci-lint@latest + golangci-lint run + +.PHONY: staticcheck +staticcheck: + go install honnef.co/go/tools/cmd/staticcheck@master + staticcheck ./... + +.PHONY: lint +lint: golangci-lint staticcheck + +.PHONY: pretest +pretest: lint + +.PHONY: gotest +gotest: + go test $(RACE_FLAG) -vet all ./... + +.PHONY: integration +integration: + go test -tags docker_integration -run TestIntegration -v diff --git a/vendor/github.com/fsouza/go-dockerclient/README.md b/vendor/github.com/fsouza/go-dockerclient/README.md new file mode 100644 index 00000000000..a9a74fbc445 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/README.md @@ -0,0 +1,128 @@ +# go-dockerclient + +[![Build Status](https://github.com/fsouza/go-dockerclient/workflows/Build/badge.svg)](https://github.com/fsouza/go-dockerclient/actions?query=branch:main+workflow:Build) +[![GoDoc](https://img.shields.io/badge/api-Godoc-blue.svg?style=flat-square)](https://pkg.go.dev/github.com/fsouza/go-dockerclient) + +This package presents a client for the Docker remote API. It also provides +support for the extensions in the [Swarm API](https://docs.docker.com/swarm/swarm-api/). + +This package also provides support for docker's network API, which is a simple +passthrough to the libnetwork remote API. + +For more details, check the [remote API +documentation](https://docs.docker.com/engine/api/latest/). + +## Difference between go-dockerclient and the official SDK + +Link for the official SDK: https://docs.docker.com/develop/sdk/ + +go-dockerclient was created before Docker had an official Go SDK and is +still maintained and active because it's still used out there. New features in +the Docker API do not get automatically implemented here: it's based on demand, +if someone wants it, they can file an issue or a PR and the feature may get +implemented/merged. + +For new projects, using the official SDK is probably more appropriate as +go-dockerclient lags behind the official SDK. + +When using the official SDK, keep in mind that because of how the its +dependencies are organized, you may need some extra steps in order to be able +to import it in your projects (see +[#784](https://github.com/fsouza/go-dockerclient/issues/784) and +[moby/moby#28269](https://github.com/moby/moby/issues/28269)). + +## Example + +```go +package main + +import ( + "fmt" + + docker "github.com/fsouza/go-dockerclient" +) + +func main() { + client, err := docker.NewClientFromEnv() + if err != nil { + panic(err) + } + imgs, err := client.ListImages(docker.ListImagesOptions{All: false}) + if err != nil { + panic(err) + } + for _, img := range imgs { + fmt.Println("ID: ", img.ID) + fmt.Println("RepoTags: ", img.RepoTags) + fmt.Println("Created: ", img.Created) + fmt.Println("Size: ", img.Size) + fmt.Println("VirtualSize: ", img.VirtualSize) + fmt.Println("ParentId: ", img.ParentID) + } +} +``` + +## Using with TLS + +In order to instantiate the client for a TLS-enabled daemon, you should use +NewTLSClient, passing the endpoint and path for key and certificates as +parameters. + +```go +package main + +import ( + "fmt" + + docker "github.com/fsouza/go-dockerclient" +) + +func main() { + const endpoint = "tcp://[ip]:[port]" + path := os.Getenv("DOCKER_CERT_PATH") + ca := fmt.Sprintf("%s/ca.pem", path) + cert := fmt.Sprintf("%s/cert.pem", path) + key := fmt.Sprintf("%s/key.pem", path) + client, _ := docker.NewTLSClient(endpoint, cert, key, ca) + // use client +} +``` + +If using [docker-machine](https://docs.docker.com/machine/), or another +application that exports environment variables `DOCKER_HOST`, +`DOCKER_TLS_VERIFY`, `DOCKER_CERT_PATH`, `DOCKER_API_VERSION`, you can use +NewClientFromEnv. + + +```go +package main + +import ( + "fmt" + + docker "github.com/fsouza/go-dockerclient" +) + +func main() { + client, err := docker.NewClientFromEnv() + if err != nil { + // handle err + } + // use client +} +``` + +See the documentation for more details. + +## Developing + +All development commands can be seen in the [Makefile](Makefile). + +Committed code must pass: + +* [golangci-lint](https://github.com/golangci/golangci-lint) +* [go test](https://golang.org/cmd/go/#hdr-Test_packages) +* [staticcheck](https://staticcheck.io/) + +Running ``make test`` will run all checks, as well as install any required +dependencies. diff --git a/vendor/github.com/fsouza/go-dockerclient/auth.go b/vendor/github.com/fsouza/go-dockerclient/auth.go new file mode 100644 index 00000000000..bc949dc3590 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/auth.go @@ -0,0 +1,385 @@ +// Copyright 2015 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "errors" + "io" + "io/ioutil" + "net/http" + "os" + "os/exec" + "path" + "strings" +) + +// ErrCannotParseDockercfg is the error returned by NewAuthConfigurations when the dockercfg cannot be parsed. +var ErrCannotParseDockercfg = errors.New("failed to read authentication from dockercfg") + +// AuthConfiguration represents authentication options to use in the PushImage +// method. It represents the authentication in the Docker index server. +type AuthConfiguration struct { + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + Email string `json:"email,omitempty"` + ServerAddress string `json:"serveraddress,omitempty"` + + // IdentityToken can be supplied with the identitytoken response of the AuthCheck call + // see https://pkg.go.dev/github.com/docker/docker/api/types?tab=doc#AuthConfig + // It can be used in place of password not in conjunction with it + IdentityToken string `json:"identitytoken,omitempty"` + + // RegistryToken can be supplied with the registrytoken + RegistryToken string `json:"registrytoken,omitempty"` +} + +func (c AuthConfiguration) isEmpty() bool { + return c == AuthConfiguration{} +} + +func (c AuthConfiguration) headerKey() string { + return "X-Registry-Auth" +} + +// AuthConfigurations represents authentication options to use for the +// PushImage method accommodating the new X-Registry-Config header +type AuthConfigurations struct { + Configs map[string]AuthConfiguration `json:"configs"` +} + +func (c AuthConfigurations) isEmpty() bool { + return len(c.Configs) == 0 +} + +func (AuthConfigurations) headerKey() string { + return "X-Registry-Config" +} + +// merge updates the configuration. If a key is defined in both maps, the one +// in c.Configs takes precedence. +func (c *AuthConfigurations) merge(other AuthConfigurations) { + for k, v := range other.Configs { + if c.Configs == nil { + c.Configs = make(map[string]AuthConfiguration) + } + if _, ok := c.Configs[k]; !ok { + c.Configs[k] = v + } + } +} + +// AuthConfigurations119 is used to serialize a set of AuthConfigurations +// for Docker API >= 1.19. +type AuthConfigurations119 map[string]AuthConfiguration + +func (c AuthConfigurations119) isEmpty() bool { + return len(c) == 0 +} + +func (c AuthConfigurations119) headerKey() string { + return "X-Registry-Config" +} + +// dockerConfig represents a registry authentation configuration from the +// .dockercfg file. +type dockerConfig struct { + Auth string `json:"auth"` + Email string `json:"email"` + IdentityToken string `json:"identitytoken"` + RegistryToken string `json:"registrytoken"` +} + +// NewAuthConfigurationsFromFile returns AuthConfigurations from a path containing JSON +// in the same format as the .dockercfg file. +func NewAuthConfigurationsFromFile(path string) (*AuthConfigurations, error) { + r, err := os.Open(path) + if err != nil { + return nil, err + } + return NewAuthConfigurations(r) +} + +func cfgPaths(dockerConfigEnv string, homeEnv string) []string { + if dockerConfigEnv != "" { + return []string{ + path.Join(dockerConfigEnv, "plaintext-passwords.json"), + path.Join(dockerConfigEnv, "config.json"), + } + } + if homeEnv != "" { + return []string{ + path.Join(homeEnv, ".docker", "plaintext-passwords.json"), + path.Join(homeEnv, ".docker", "config.json"), + path.Join(homeEnv, ".dockercfg"), + } + } + return nil +} + +// NewAuthConfigurationsFromDockerCfg returns AuthConfigurations from system +// config files. The following files are checked in the order listed: +// +// If the environment variable DOCKER_CONFIG is set to a non-empty string: +// +// - $DOCKER_CONFIG/plaintext-passwords.json +// - $DOCKER_CONFIG/config.json +// +// Otherwise, it looks for files in the $HOME directory and the legacy +// location: +// +// - $HOME/.docker/plaintext-passwords.json +// - $HOME/.docker/config.json +// - $HOME/.dockercfg +func NewAuthConfigurationsFromDockerCfg() (*AuthConfigurations, error) { + pathsToTry := cfgPaths(os.Getenv("DOCKER_CONFIG"), os.Getenv("HOME")) + if len(pathsToTry) < 1 { + return nil, errors.New("no docker configuration found") + } + return newAuthConfigurationsFromDockerCfg(pathsToTry) +} + +func newAuthConfigurationsFromDockerCfg(pathsToTry []string) (*AuthConfigurations, error) { + var result *AuthConfigurations + var auths *AuthConfigurations + var err error + for _, path := range pathsToTry { + auths, err = NewAuthConfigurationsFromFile(path) + if err != nil { + continue + } + + if result == nil { + result = auths + } else { + result.merge(*auths) + } + } + + if result != nil { + return result, nil + } + return result, err +} + +// NewAuthConfigurations returns AuthConfigurations from a JSON encoded string in the +// same format as the .dockercfg file. +func NewAuthConfigurations(r io.Reader) (*AuthConfigurations, error) { + var auth *AuthConfigurations + confs, err := parseDockerConfig(r) + if err != nil { + return nil, err + } + auth, err = authConfigs(confs) + if err != nil { + return nil, err + } + return auth, nil +} + +func parseDockerConfig(r io.Reader) (map[string]dockerConfig, error) { + buf := new(bytes.Buffer) + buf.ReadFrom(r) + byteData := buf.Bytes() + + confsWrapper := struct { + Auths map[string]dockerConfig `json:"auths"` + }{} + if err := json.Unmarshal(byteData, &confsWrapper); err == nil { + if len(confsWrapper.Auths) > 0 { + return confsWrapper.Auths, nil + } + } + + var confs map[string]dockerConfig + if err := json.Unmarshal(byteData, &confs); err != nil { + return nil, err + } + return confs, nil +} + +// authConfigs converts a dockerConfigs map to a AuthConfigurations object. +func authConfigs(confs map[string]dockerConfig) (*AuthConfigurations, error) { + c := &AuthConfigurations{ + Configs: make(map[string]AuthConfiguration), + } + + for reg, conf := range confs { + if conf.Auth == "" { + continue + } + + // support both padded and unpadded encoding + data, err := base64.StdEncoding.DecodeString(conf.Auth) + if err != nil { + data, err = base64.StdEncoding.WithPadding(base64.NoPadding).DecodeString(conf.Auth) + } + if err != nil { + return nil, errors.New("error decoding plaintext credentials") + } + + userpass := strings.SplitN(string(data), ":", 2) + if len(userpass) != 2 { + return nil, ErrCannotParseDockercfg + } + + authConfig := AuthConfiguration{ + Email: conf.Email, + Username: userpass[0], + Password: userpass[1], + ServerAddress: reg, + } + + // if identitytoken provided then zero the password and set it + if conf.IdentityToken != "" { + authConfig.Password = "" + authConfig.IdentityToken = conf.IdentityToken + } + + // if registrytoken provided then zero the password and set it + if conf.RegistryToken != "" { + authConfig.Password = "" + authConfig.RegistryToken = conf.RegistryToken + } + c.Configs[reg] = authConfig + } + + return c, nil +} + +// AuthStatus returns the authentication status for Docker API versions >= 1.23. +type AuthStatus struct { + Status string `json:"Status,omitempty" yaml:"Status,omitempty" toml:"Status,omitempty"` + IdentityToken string `json:"IdentityToken,omitempty" yaml:"IdentityToken,omitempty" toml:"IdentityToken,omitempty"` +} + +// AuthCheck validates the given credentials. It returns nil if successful. +// +// For Docker API versions >= 1.23, the AuthStatus struct will be populated, otherwise it will be empty.` +// +// See https://goo.gl/6nsZkH for more details. +func (c *Client) AuthCheck(conf *AuthConfiguration) (AuthStatus, error) { + var authStatus AuthStatus + if conf == nil { + return authStatus, errors.New("conf is nil") + } + resp, err := c.do(http.MethodPost, "/auth", doOptions{data: conf}) + if err != nil { + return authStatus, err + } + defer resp.Body.Close() + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + return authStatus, err + } + if len(data) == 0 { + return authStatus, nil + } + if err := json.Unmarshal(data, &authStatus); err != nil { + return authStatus, err + } + return authStatus, nil +} + +// helperCredentials represents credentials commit from an helper +type helperCredentials struct { + Username string `json:"Username,omitempty"` + Secret string `json:"Secret,omitempty"` +} + +// NewAuthConfigurationsFromCredsHelpers returns AuthConfigurations from +// installed credentials helpers +func NewAuthConfigurationsFromCredsHelpers(registry string) (*AuthConfiguration, error) { + // Load docker configuration file in order to find a possible helper provider + pathsToTry := cfgPaths(os.Getenv("DOCKER_CONFIG"), os.Getenv("HOME")) + if len(pathsToTry) < 1 { + return nil, errors.New("no docker configuration found") + } + + provider, err := getHelperProviderFromDockerCfg(pathsToTry, registry) + if err != nil { + return nil, err + } + + c, err := getCredentialsFromHelper(provider, registry) + if err != nil { + return nil, err + } + + creds := new(AuthConfiguration) + creds.Username = c.Username + creds.Password = c.Secret + return creds, nil +} + +func getHelperProviderFromDockerCfg(pathsToTry []string, registry string) (string, error) { + for _, path := range pathsToTry { + content, err := ioutil.ReadFile(path) + if err != nil { + // if we can't read the file keep going + continue + } + + provider, err := parseCredsDockerConfig(content, registry) + if err != nil { + continue + } + if provider != "" { + return provider, nil + } + } + return "", errors.New("no docker credentials provider found") +} + +func parseCredsDockerConfig(config []byte, registry string) (string, error) { + creds := struct { + CredsStore string `json:"credsStore,omitempty"` + CredHelpers map[string]string `json:"credHelpers,omitempty"` + }{} + err := json.Unmarshal(config, &creds) + if err != nil { + return "", err + } + + provider, ok := creds.CredHelpers[registry] + if ok { + return provider, nil + } + return creds.CredsStore, nil +} + +// Run and parse the found credential helper +func getCredentialsFromHelper(provider string, registry string) (*helperCredentials, error) { + helpercreds, err := runDockerCredentialsHelper(provider, registry) + if err != nil { + return nil, err + } + + c := new(helperCredentials) + err = json.Unmarshal(helpercreds, c) + if err != nil { + return nil, err + } + + return c, nil +} + +func runDockerCredentialsHelper(provider string, registry string) ([]byte, error) { + cmd := exec.Command("docker-credential-"+provider, "get") + + var stdout bytes.Buffer + + cmd.Stdin = bytes.NewBuffer([]byte(registry)) + cmd.Stdout = &stdout + + err := cmd.Run() + if err != nil { + return nil, err + } + + return stdout.Bytes(), nil +} diff --git a/vendor/github.com/fsouza/go-dockerclient/change.go b/vendor/github.com/fsouza/go-dockerclient/change.go new file mode 100644 index 00000000000..3f936b22330 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/change.go @@ -0,0 +1,43 @@ +// Copyright 2014 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import "fmt" + +// ChangeType is a type for constants indicating the type of change +// in a container +type ChangeType int + +const ( + // ChangeModify is the ChangeType for container modifications + ChangeModify ChangeType = iota + + // ChangeAdd is the ChangeType for additions to a container + ChangeAdd + + // ChangeDelete is the ChangeType for deletions from a container + ChangeDelete +) + +// Change represents a change in a container. +// +// See https://goo.gl/Wo0JJp for more details. +type Change struct { + Path string + Kind ChangeType +} + +func (change *Change) String() string { + var kind string + switch change.Kind { + case ChangeModify: + kind = "C" + case ChangeAdd: + kind = "A" + case ChangeDelete: + kind = "D" + } + return fmt.Sprintf("%s %s", kind, change.Path) +} diff --git a/vendor/github.com/fsouza/go-dockerclient/client.go b/vendor/github.com/fsouza/go-dockerclient/client.go new file mode 100644 index 00000000000..d0814a5c0bd --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/client.go @@ -0,0 +1,1156 @@ +// Copyright 2013 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package docker provides a client for the Docker remote API. +// +// See https://goo.gl/o2v3rk for more details on the remote API. +package docker + +import ( + "bufio" + "bytes" + "context" + "crypto/tls" + "crypto/x509" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/http/httputil" + "net/url" + "os" + "path/filepath" + "reflect" + "runtime" + "strconv" + "strings" + "sync/atomic" + "time" + + "github.com/docker/docker/pkg/homedir" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/pkg/stdcopy" +) + +const ( + userAgent = "go-dockerclient" + + unixProtocol = "unix" + namedPipeProtocol = "npipe" +) + +var ( + // ErrInvalidEndpoint is returned when the endpoint is not a valid HTTP URL. + ErrInvalidEndpoint = errors.New("invalid endpoint") + + // ErrConnectionRefused is returned when the client cannot connect to the given endpoint. + ErrConnectionRefused = errors.New("cannot connect to Docker endpoint") + + // ErrInactivityTimeout is returned when a streamable call has been inactive for some time. + ErrInactivityTimeout = errors.New("inactivity time exceeded timeout") + + apiVersion112, _ = NewAPIVersion("1.12") + apiVersion118, _ = NewAPIVersion("1.18") + apiVersion119, _ = NewAPIVersion("1.19") + apiVersion121, _ = NewAPIVersion("1.21") + apiVersion124, _ = NewAPIVersion("1.24") + apiVersion125, _ = NewAPIVersion("1.25") + apiVersion135, _ = NewAPIVersion("1.35") +) + +// APIVersion is an internal representation of a version of the Remote API. +type APIVersion []int + +// NewAPIVersion returns an instance of APIVersion for the given string. +// +// The given string must be in the form .., where , +// and are integer numbers. +func NewAPIVersion(input string) (APIVersion, error) { + if !strings.Contains(input, ".") { + return nil, fmt.Errorf("unable to parse version %q", input) + } + raw := strings.Split(input, "-") + arr := strings.Split(raw[0], ".") + ret := make(APIVersion, len(arr)) + var err error + for i, val := range arr { + ret[i], err = strconv.Atoi(val) + if err != nil { + return nil, fmt.Errorf("unable to parse version %q: %q is not an integer", input, val) + } + } + return ret, nil +} + +func (version APIVersion) String() string { + parts := make([]string, len(version)) + for i, val := range version { + parts[i] = strconv.Itoa(val) + } + return strings.Join(parts, ".") +} + +// LessThan is a function for comparing APIVersion structs. +func (version APIVersion) LessThan(other APIVersion) bool { + return version.compare(other) < 0 +} + +// LessThanOrEqualTo is a function for comparing APIVersion structs. +func (version APIVersion) LessThanOrEqualTo(other APIVersion) bool { + return version.compare(other) <= 0 +} + +// GreaterThan is a function for comparing APIVersion structs. +func (version APIVersion) GreaterThan(other APIVersion) bool { + return version.compare(other) > 0 +} + +// GreaterThanOrEqualTo is a function for comparing APIVersion structs. +func (version APIVersion) GreaterThanOrEqualTo(other APIVersion) bool { + return version.compare(other) >= 0 +} + +func (version APIVersion) compare(other APIVersion) int { + for i, v := range version { + if i <= len(other)-1 { + otherVersion := other[i] + + if v < otherVersion { + return -1 + } else if v > otherVersion { + return 1 + } + } + } + if len(version) > len(other) { + return 1 + } + if len(version) < len(other) { + return -1 + } + return 0 +} + +// Client is the basic type of this package. It provides methods for +// interaction with the API. +type Client struct { + SkipServerVersionCheck bool + HTTPClient *http.Client + TLSConfig *tls.Config + Dialer Dialer + + endpoint string + endpointURL *url.URL + eventMonitor *eventMonitoringState + requestedAPIVersion APIVersion + serverAPIVersion APIVersion + expectedAPIVersion APIVersion +} + +// Dialer is an interface that allows network connections to be dialed +// (net.Dialer fulfills this interface) and named pipes (a shim using +// winio.DialPipe) +type Dialer interface { + Dial(network, address string) (net.Conn, error) +} + +// NewClient returns a Client instance ready for communication with the given +// server endpoint. It will use the latest remote API version available in the +// server. +func NewClient(endpoint string) (*Client, error) { + client, err := NewVersionedClient(endpoint, "") + if err != nil { + return nil, err + } + client.SkipServerVersionCheck = true + return client, nil +} + +// NewTLSClient returns a Client instance ready for TLS communications with the givens +// server endpoint, key and certificates . It will use the latest remote API version +// available in the server. +func NewTLSClient(endpoint string, cert, key, ca string) (*Client, error) { + client, err := NewVersionedTLSClient(endpoint, cert, key, ca, "") + if err != nil { + return nil, err + } + client.SkipServerVersionCheck = true + return client, nil +} + +// NewTLSClientFromBytes returns a Client instance ready for TLS communications with the givens +// server endpoint, key and certificates (passed inline to the function as opposed to being +// read from a local file). It will use the latest remote API version available in the server. +func NewTLSClientFromBytes(endpoint string, certPEMBlock, keyPEMBlock, caPEMCert []byte) (*Client, error) { + client, err := NewVersionedTLSClientFromBytes(endpoint, certPEMBlock, keyPEMBlock, caPEMCert, "") + if err != nil { + return nil, err + } + client.SkipServerVersionCheck = true + return client, nil +} + +// NewVersionedClient returns a Client instance ready for communication with +// the given server endpoint, using a specific remote API version. +func NewVersionedClient(endpoint string, apiVersionString string) (*Client, error) { + u, err := parseEndpoint(endpoint, false) + if err != nil { + return nil, err + } + var requestedAPIVersion APIVersion + if strings.Contains(apiVersionString, ".") { + requestedAPIVersion, err = NewAPIVersion(apiVersionString) + if err != nil { + return nil, err + } + } + c := &Client{ + HTTPClient: defaultClient(), + Dialer: &net.Dialer{}, + endpoint: endpoint, + endpointURL: u, + eventMonitor: new(eventMonitoringState), + requestedAPIVersion: requestedAPIVersion, + } + c.initializeNativeClient(defaultTransport) + return c, nil +} + +// WithTransport replaces underlying HTTP client of Docker Client by accepting +// a function that returns pointer to a transport object. +func (c *Client) WithTransport(trFunc func() *http.Transport) { + c.initializeNativeClient(trFunc) +} + +// NewVersionnedTLSClient is like NewVersionedClient, but with ann extra n. +// +// Deprecated: Use NewVersionedTLSClient instead. +func NewVersionnedTLSClient(endpoint string, cert, key, ca, apiVersionString string) (*Client, error) { + return NewVersionedTLSClient(endpoint, cert, key, ca, apiVersionString) +} + +// NewVersionedTLSClient returns a Client instance ready for TLS communications with the givens +// server endpoint, key and certificates, using a specific remote API version. +func NewVersionedTLSClient(endpoint string, cert, key, ca, apiVersionString string) (*Client, error) { + var certPEMBlock []byte + var keyPEMBlock []byte + var caPEMCert []byte + if _, err := os.Stat(cert); !os.IsNotExist(err) { + certPEMBlock, err = ioutil.ReadFile(cert) + if err != nil { + return nil, err + } + } + if _, err := os.Stat(key); !os.IsNotExist(err) { + keyPEMBlock, err = ioutil.ReadFile(key) + if err != nil { + return nil, err + } + } + if _, err := os.Stat(ca); !os.IsNotExist(err) { + caPEMCert, err = ioutil.ReadFile(ca) + if err != nil { + return nil, err + } + } + return NewVersionedTLSClientFromBytes(endpoint, certPEMBlock, keyPEMBlock, caPEMCert, apiVersionString) +} + +// NewClientFromEnv returns a Client instance ready for communication created from +// Docker's default logic for the environment variables DOCKER_HOST, DOCKER_TLS_VERIFY, DOCKER_CERT_PATH, +// and DOCKER_API_VERSION. +// +// See https://github.com/docker/docker/blob/1f963af697e8df3a78217f6fdbf67b8123a7db94/docker/docker.go#L68. +// See https://github.com/docker/compose/blob/81707ef1ad94403789166d2fe042c8a718a4c748/compose/cli/docker_client.py#L7. +// See https://github.com/moby/moby/blob/28d7dba41d0c0d9c7f0dafcc79d3c59f2b3f5dc3/client/options.go#L51 +func NewClientFromEnv() (*Client, error) { + apiVersionString := os.Getenv("DOCKER_API_VERSION") + client, err := NewVersionedClientFromEnv(apiVersionString) + if err != nil { + return nil, err + } + client.SkipServerVersionCheck = apiVersionString == "" + return client, nil +} + +// NewVersionedClientFromEnv returns a Client instance ready for TLS communications created from +// Docker's default logic for the environment variables DOCKER_HOST, DOCKER_TLS_VERIFY, and DOCKER_CERT_PATH, +// and using a specific remote API version. +// +// See https://github.com/docker/docker/blob/1f963af697e8df3a78217f6fdbf67b8123a7db94/docker/docker.go#L68. +// See https://github.com/docker/compose/blob/81707ef1ad94403789166d2fe042c8a718a4c748/compose/cli/docker_client.py#L7. +func NewVersionedClientFromEnv(apiVersionString string) (*Client, error) { + dockerEnv, err := getDockerEnv() + if err != nil { + return nil, err + } + dockerHost := dockerEnv.dockerHost + if dockerEnv.dockerTLSVerify { + parts := strings.SplitN(dockerEnv.dockerHost, "://", 2) + if len(parts) != 2 { + return nil, fmt.Errorf("could not split %s into two parts by ://", dockerHost) + } + cert := filepath.Join(dockerEnv.dockerCertPath, "cert.pem") + key := filepath.Join(dockerEnv.dockerCertPath, "key.pem") + ca := filepath.Join(dockerEnv.dockerCertPath, "ca.pem") + return NewVersionedTLSClient(dockerEnv.dockerHost, cert, key, ca, apiVersionString) + } + return NewVersionedClient(dockerEnv.dockerHost, apiVersionString) +} + +// NewVersionedTLSClientFromBytes returns a Client instance ready for TLS communications with the givens +// server endpoint, key and certificates (passed inline to the function as opposed to being +// read from a local file), using a specific remote API version. +func NewVersionedTLSClientFromBytes(endpoint string, certPEMBlock, keyPEMBlock, caPEMCert []byte, apiVersionString string) (*Client, error) { + u, err := parseEndpoint(endpoint, true) + if err != nil { + return nil, err + } + var requestedAPIVersion APIVersion + if strings.Contains(apiVersionString, ".") { + requestedAPIVersion, err = NewAPIVersion(apiVersionString) + if err != nil { + return nil, err + } + } + tlsConfig := &tls.Config{MinVersion: tls.VersionTLS12} + if certPEMBlock != nil && keyPEMBlock != nil { + tlsCert, err := tls.X509KeyPair(certPEMBlock, keyPEMBlock) + if err != nil { + return nil, err + } + tlsConfig.Certificates = []tls.Certificate{tlsCert} + } + if caPEMCert == nil { + tlsConfig.InsecureSkipVerify = true + } else { + caPool := x509.NewCertPool() + if !caPool.AppendCertsFromPEM(caPEMCert) { + return nil, errors.New("could not add RootCA pem") + } + tlsConfig.RootCAs = caPool + } + tr := defaultTransport() + tr.TLSClientConfig = tlsConfig + if err != nil { + return nil, err + } + c := &Client{ + HTTPClient: &http.Client{Transport: tr}, + TLSConfig: tlsConfig, + Dialer: &net.Dialer{}, + endpoint: endpoint, + endpointURL: u, + eventMonitor: new(eventMonitoringState), + requestedAPIVersion: requestedAPIVersion, + } + c.initializeNativeClient(defaultTransport) + return c, nil +} + +// SetTimeout takes a timeout and applies it to the HTTPClient. It should not +// be called concurrently with any other Client methods. +func (c *Client) SetTimeout(t time.Duration) { + if c.HTTPClient != nil { + c.HTTPClient.Timeout = t + } +} + +func (c *Client) checkAPIVersion() error { + serverAPIVersionString, err := c.getServerAPIVersionString() + if err != nil { + return err + } + c.serverAPIVersion, err = NewAPIVersion(serverAPIVersionString) + if err != nil { + return err + } + if c.requestedAPIVersion == nil { + c.expectedAPIVersion = c.serverAPIVersion + } else { + c.expectedAPIVersion = c.requestedAPIVersion + } + return nil +} + +// Endpoint returns the current endpoint. It's useful for getting the endpoint +// when using functions that get this data from the environment (like +// NewClientFromEnv. +func (c *Client) Endpoint() string { + return c.endpoint +} + +// Ping pings the docker server +// +// See https://goo.gl/wYfgY1 for more details. +func (c *Client) Ping() error { + return c.PingWithContext(context.TODO()) +} + +// PingWithContext pings the docker server +// The context object can be used to cancel the ping request. +// +// See https://goo.gl/wYfgY1 for more details. +func (c *Client) PingWithContext(ctx context.Context) error { + path := "/_ping" + resp, err := c.do(http.MethodGet, path, doOptions{context: ctx}) + if err != nil { + return err + } + if resp.StatusCode != http.StatusOK { + return newError(resp) + } + resp.Body.Close() + return nil +} + +func (c *Client) getServerAPIVersionString() (version string, err error) { + resp, err := c.do(http.MethodGet, "/version", doOptions{}) + if err != nil { + return "", err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return "", fmt.Errorf("received unexpected status %d while trying to retrieve the server version", resp.StatusCode) + } + var versionResponse map[string]interface{} + if err := json.NewDecoder(resp.Body).Decode(&versionResponse); err != nil { + return "", err + } + if version, ok := (versionResponse["ApiVersion"]).(string); ok { + return version, nil + } + return "", nil +} + +type doOptions struct { + data interface{} + forceJSON bool + headers map[string]string + context context.Context +} + +func (c *Client) do(method, path string, doOptions doOptions) (*http.Response, error) { + var params io.Reader + if doOptions.data != nil || doOptions.forceJSON { + buf, err := json.Marshal(doOptions.data) + if err != nil { + return nil, err + } + params = bytes.NewBuffer(buf) + } + if path != "/version" && !c.SkipServerVersionCheck && c.expectedAPIVersion == nil { + err := c.checkAPIVersion() + if err != nil { + return nil, err + } + } + protocol := c.endpointURL.Scheme + var u string + switch protocol { + case unixProtocol, namedPipeProtocol: + u = c.getFakeNativeURL(path) + default: + u = c.getURL(path) + } + + req, err := http.NewRequest(method, u, params) + if err != nil { + return nil, err + } + req.Header.Set("User-Agent", userAgent) + if doOptions.data != nil { + req.Header.Set("Content-Type", "application/json") + } else if method == http.MethodPost { + req.Header.Set("Content-Type", "plain/text") + } + + for k, v := range doOptions.headers { + req.Header.Set(k, v) + } + + ctx := doOptions.context + if ctx == nil { + ctx = context.Background() + } + + resp, err := c.HTTPClient.Do(req.WithContext(ctx)) + if err != nil { + if strings.Contains(err.Error(), "connection refused") { + return nil, ErrConnectionRefused + } + + return nil, chooseError(ctx, err) + } + if resp.StatusCode < 200 || resp.StatusCode >= 400 { + return nil, newError(resp) + } + return resp, nil +} + +type streamOptions struct { + setRawTerminal bool + rawJSONStream bool + useJSONDecoder bool + headers map[string]string + in io.Reader + stdout io.Writer + stderr io.Writer + reqSent chan struct{} + // timeout is the initial connection timeout + timeout time.Duration + // Timeout with no data is received, it's reset every time new data + // arrives + inactivityTimeout time.Duration + context context.Context +} + +func chooseError(ctx context.Context, err error) error { + select { + case <-ctx.Done(): + return ctx.Err() + default: + return err + } +} + +func (c *Client) stream(method, path string, streamOptions streamOptions) error { + if (method == http.MethodPost || method == http.MethodPut) && streamOptions.in == nil { + streamOptions.in = bytes.NewReader(nil) + } + if path != "/version" && !c.SkipServerVersionCheck && c.expectedAPIVersion == nil { + err := c.checkAPIVersion() + if err != nil { + return err + } + } + return c.streamURL(method, c.getURL(path), streamOptions) +} + +func (c *Client) streamURL(method, url string, streamOptions streamOptions) error { + if (method == http.MethodPost || method == http.MethodPut) && streamOptions.in == nil { + streamOptions.in = bytes.NewReader(nil) + } + if !c.SkipServerVersionCheck && c.expectedAPIVersion == nil { + err := c.checkAPIVersion() + if err != nil { + return err + } + } + + // make a sub-context so that our active cancellation does not affect parent + ctx := streamOptions.context + if ctx == nil { + ctx = context.Background() + } + subCtx, cancelRequest := context.WithCancel(ctx) + defer cancelRequest() + + req, err := http.NewRequestWithContext(ctx, method, url, streamOptions.in) + if err != nil { + return err + } + req.Header.Set("User-Agent", userAgent) + if method == http.MethodPost { + req.Header.Set("Content-Type", "plain/text") + } + for key, val := range streamOptions.headers { + req.Header.Set(key, val) + } + var resp *http.Response + protocol := c.endpointURL.Scheme + address := c.endpointURL.Path + if streamOptions.stdout == nil { + streamOptions.stdout = ioutil.Discard + } + if streamOptions.stderr == nil { + streamOptions.stderr = ioutil.Discard + } + + if protocol == unixProtocol || protocol == namedPipeProtocol { + var dial net.Conn + dial, err = c.Dialer.Dial(protocol, address) + if err != nil { + return err + } + go func() { + <-subCtx.Done() + dial.Close() + }() + breader := bufio.NewReader(dial) + err = req.Write(dial) + if err != nil { + return chooseError(subCtx, err) + } + + // ReadResponse may hang if server does not replay + if streamOptions.timeout > 0 { + dial.SetDeadline(time.Now().Add(streamOptions.timeout)) + } + + if streamOptions.reqSent != nil { + close(streamOptions.reqSent) + } + if resp, err = http.ReadResponse(breader, req); err != nil { + // Cancel timeout for future I/O operations + if streamOptions.timeout > 0 { + dial.SetDeadline(time.Time{}) + } + if strings.Contains(err.Error(), "connection refused") { + return ErrConnectionRefused + } + + return chooseError(subCtx, err) + } + defer resp.Body.Close() + } else { + if resp, err = c.HTTPClient.Do(req.WithContext(subCtx)); err != nil { + if strings.Contains(err.Error(), "connection refused") { + return ErrConnectionRefused + } + return chooseError(subCtx, err) + } + defer resp.Body.Close() + if streamOptions.reqSent != nil { + close(streamOptions.reqSent) + } + } + if resp.StatusCode < 200 || resp.StatusCode >= 400 { + return newError(resp) + } + var canceled uint32 + if streamOptions.inactivityTimeout > 0 { + var ch chan<- struct{} + resp.Body, ch = handleInactivityTimeout(resp.Body, streamOptions.inactivityTimeout, cancelRequest, &canceled) + defer close(ch) + } + err = handleStreamResponse(resp, &streamOptions) + if err != nil { + if atomic.LoadUint32(&canceled) != 0 { + return ErrInactivityTimeout + } + return chooseError(subCtx, err) + } + return nil +} + +func handleStreamResponse(resp *http.Response, streamOptions *streamOptions) error { + var err error + if !streamOptions.useJSONDecoder && resp.Header.Get("Content-Type") != "application/json" { + if streamOptions.setRawTerminal { + _, err = io.Copy(streamOptions.stdout, resp.Body) + } else { + _, err = stdcopy.StdCopy(streamOptions.stdout, streamOptions.stderr, resp.Body) + } + return err + } + // if we want to get raw json stream, just copy it back to output + // without decoding it + if streamOptions.rawJSONStream { + _, err = io.Copy(streamOptions.stdout, resp.Body) + return err + } + if st, ok := streamOptions.stdout.(stream); ok { + err = jsonmessage.DisplayJSONMessagesToStream(resp.Body, st, nil) + } else { + err = jsonmessage.DisplayJSONMessagesStream(resp.Body, streamOptions.stdout, 0, false, nil) + } + return err +} + +type stream interface { + io.Writer + FD() uintptr + IsTerminal() bool +} + +type proxyReader struct { + io.ReadCloser + calls uint64 +} + +func (p *proxyReader) callCount() uint64 { + return atomic.LoadUint64(&p.calls) +} + +func (p *proxyReader) Read(data []byte) (int, error) { + atomic.AddUint64(&p.calls, 1) + return p.ReadCloser.Read(data) +} + +func handleInactivityTimeout(reader io.ReadCloser, timeout time.Duration, cancelRequest func(), canceled *uint32) (io.ReadCloser, chan<- struct{}) { + done := make(chan struct{}) + proxyReader := &proxyReader{ReadCloser: reader} + go func() { + var lastCallCount uint64 + for { + select { + case <-time.After(timeout): + case <-done: + return + } + curCallCount := proxyReader.callCount() + if curCallCount == lastCallCount { + atomic.AddUint32(canceled, 1) + cancelRequest() + return + } + lastCallCount = curCallCount + } + }() + return proxyReader, done +} + +type hijackOptions struct { + success chan struct{} + setRawTerminal bool + in io.Reader + stdout io.Writer + stderr io.Writer + data interface{} +} + +// CloseWaiter is an interface with methods for closing the underlying resource +// and then waiting for it to finish processing. +type CloseWaiter interface { + io.Closer + Wait() error +} + +type waiterFunc func() error + +func (w waiterFunc) Wait() error { return w() } + +type closerFunc func() error + +func (c closerFunc) Close() error { return c() } + +func (c *Client) hijack(method, path string, hijackOptions hijackOptions) (CloseWaiter, error) { + if path != "/version" && !c.SkipServerVersionCheck && c.expectedAPIVersion == nil { + err := c.checkAPIVersion() + if err != nil { + return nil, err + } + } + var params io.Reader + if hijackOptions.data != nil { + buf, err := json.Marshal(hijackOptions.data) + if err != nil { + return nil, err + } + params = bytes.NewBuffer(buf) + } + req, err := http.NewRequest(method, c.getURL(path), params) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Connection", "Upgrade") + req.Header.Set("Upgrade", "tcp") + protocol := c.endpointURL.Scheme + address := c.endpointURL.Path + if protocol != unixProtocol && protocol != namedPipeProtocol { + protocol = "tcp" + address = c.endpointURL.Host + } + var dial net.Conn + if c.TLSConfig != nil && protocol != unixProtocol && protocol != namedPipeProtocol { + netDialer, ok := c.Dialer.(*net.Dialer) + if !ok { + return nil, ErrTLSNotSupported + } + dial, err = tlsDialWithDialer(netDialer, protocol, address, c.TLSConfig) + if err != nil { + return nil, err + } + } else { + dial, err = c.Dialer.Dial(protocol, address) + if err != nil { + return nil, err + } + } + + errs := make(chan error, 1) + quit := make(chan struct{}) + go func() { + //lint:ignore SA1019 the alternative doesn't quite work, so keep using the deprecated thing. + clientconn := httputil.NewClientConn(dial, nil) + defer clientconn.Close() + clientconn.Do(req) + if hijackOptions.success != nil { + hijackOptions.success <- struct{}{} + <-hijackOptions.success + } + rwc, br := clientconn.Hijack() + defer rwc.Close() + + errChanOut := make(chan error, 1) + errChanIn := make(chan error, 2) + if hijackOptions.stdout == nil && hijackOptions.stderr == nil { + close(errChanOut) + } else { + // Only copy if hijackOptions.stdout and/or hijackOptions.stderr is actually set. + // Otherwise, if the only stream you care about is stdin, your attach session + // will "hang" until the container terminates, even though you're not reading + // stdout/stderr + if hijackOptions.stdout == nil { + hijackOptions.stdout = ioutil.Discard + } + if hijackOptions.stderr == nil { + hijackOptions.stderr = ioutil.Discard + } + + go func() { + defer func() { + if hijackOptions.in != nil { + if closer, ok := hijackOptions.in.(io.Closer); ok { + closer.Close() + } + errChanIn <- nil + } + }() + + var err error + if hijackOptions.setRawTerminal { + _, err = io.Copy(hijackOptions.stdout, br) + } else { + _, err = stdcopy.StdCopy(hijackOptions.stdout, hijackOptions.stderr, br) + } + errChanOut <- err + }() + } + + go func() { + var err error + if hijackOptions.in != nil { + _, err = io.Copy(rwc, hijackOptions.in) + } + errChanIn <- err + rwc.(interface { + CloseWrite() error + }).CloseWrite() + }() + + var errIn error + select { + case errIn = <-errChanIn: + case <-quit: + } + + var errOut error + select { + case errOut = <-errChanOut: + case <-quit: + } + + if errIn != nil { + errs <- errIn + } else { + errs <- errOut + } + }() + + return struct { + closerFunc + waiterFunc + }{ + closerFunc(func() error { close(quit); return nil }), + waiterFunc(func() error { return <-errs }), + }, nil +} + +func (c *Client) getURL(path string) string { + urlStr := strings.TrimRight(c.endpointURL.String(), "/") + if c.endpointURL.Scheme == unixProtocol || c.endpointURL.Scheme == namedPipeProtocol { + urlStr = "" + } + if c.requestedAPIVersion != nil { + return fmt.Sprintf("%s/v%s%s", urlStr, c.requestedAPIVersion, path) + } + return fmt.Sprintf("%s%s", urlStr, path) +} + +func (c *Client) getPath(basepath string, opts interface{}) (string, error) { + queryStr, requiredAPIVersion := queryStringVersion(opts) + return c.pathVersionCheck(basepath, queryStr, requiredAPIVersion) +} + +func (c *Client) pathVersionCheck(basepath, queryStr string, requiredAPIVersion APIVersion) (string, error) { + urlStr := strings.TrimRight(c.endpointURL.String(), "/") + if c.endpointURL.Scheme == unixProtocol || c.endpointURL.Scheme == namedPipeProtocol { + urlStr = "" + } + if c.requestedAPIVersion != nil { + if c.requestedAPIVersion.GreaterThanOrEqualTo(requiredAPIVersion) { + return fmt.Sprintf("%s/v%s%s?%s", urlStr, c.requestedAPIVersion, basepath, queryStr), nil + } + return "", fmt.Errorf("API %s requires version %s, requested version %s is insufficient", + basepath, requiredAPIVersion, c.requestedAPIVersion) + } + if requiredAPIVersion != nil { + return fmt.Sprintf("%s/v%s%s?%s", urlStr, requiredAPIVersion, basepath, queryStr), nil + } + return fmt.Sprintf("%s%s?%s", urlStr, basepath, queryStr), nil +} + +// getFakeNativeURL returns the URL needed to make an HTTP request over a UNIX +// domain socket to the given path. +func (c *Client) getFakeNativeURL(path string) string { + u := *c.endpointURL // Copy. + + // Override URL so that net/http will not complain. + u.Scheme = "http" + u.Host = "unix.sock" // Doesn't matter what this is - it's not used. + u.Path = "" + urlStr := strings.TrimRight(u.String(), "/") + if c.requestedAPIVersion != nil { + return fmt.Sprintf("%s/v%s%s", urlStr, c.requestedAPIVersion, path) + } + return fmt.Sprintf("%s%s", urlStr, path) +} + +func queryStringVersion(opts interface{}) (string, APIVersion) { + if opts == nil { + return "", nil + } + value := reflect.ValueOf(opts) + if value.Kind() == reflect.Ptr { + value = value.Elem() + } + if value.Kind() != reflect.Struct { + return "", nil + } + var apiVersion APIVersion + items := url.Values(map[string][]string{}) + for i := 0; i < value.NumField(); i++ { + field := value.Type().Field(i) + if field.PkgPath != "" { + continue + } + key := field.Tag.Get("qs") + if key == "" { + key = strings.ToLower(field.Name) + } else if key == "-" { + continue + } + if addQueryStringValue(items, key, value.Field(i)) { + verstr := field.Tag.Get("ver") + if verstr != "" { + ver, _ := NewAPIVersion(verstr) + if apiVersion == nil { + apiVersion = ver + } else if ver.GreaterThan(apiVersion) { + apiVersion = ver + } + } + } + } + return items.Encode(), apiVersion +} + +func queryString(opts interface{}) string { + s, _ := queryStringVersion(opts) + return s +} + +func addQueryStringValue(items url.Values, key string, v reflect.Value) bool { + switch v.Kind() { + case reflect.Bool: + if v.Bool() { + items.Add(key, "1") + return true + } + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if v.Int() > 0 { + items.Add(key, strconv.FormatInt(v.Int(), 10)) + return true + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + if v.Uint() > 0 { + items.Add(key, strconv.FormatUint(v.Uint(), 10)) + return true + } + case reflect.Float32, reflect.Float64: + if v.Float() > 0 { + items.Add(key, strconv.FormatFloat(v.Float(), 'f', -1, 64)) + return true + } + case reflect.String: + if v.String() != "" { + items.Add(key, v.String()) + return true + } + case reflect.Ptr: + if !v.IsNil() { + if b, err := json.Marshal(v.Interface()); err == nil { + items.Add(key, string(b)) + return true + } + } + case reflect.Map: + if len(v.MapKeys()) > 0 { + if b, err := json.Marshal(v.Interface()); err == nil { + items.Add(key, string(b)) + return true + } + } + case reflect.Array, reflect.Slice: + vLen := v.Len() + var valuesAdded int + if vLen > 0 { + for i := 0; i < vLen; i++ { + if addQueryStringValue(items, key, v.Index(i)) { + valuesAdded++ + } + } + } + return valuesAdded > 0 + } + return false +} + +// Error represents failures in the API. It represents a failure from the API. +type Error struct { + Status int + Message string +} + +func newError(resp *http.Response) *Error { + type ErrMsg struct { + Message string `json:"message"` + } + defer resp.Body.Close() + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + return &Error{Status: resp.StatusCode, Message: fmt.Sprintf("cannot read body, err: %v", err)} + } + var emsg ErrMsg + err = json.Unmarshal(data, &emsg) + if err != nil { + return &Error{Status: resp.StatusCode, Message: string(data)} + } + return &Error{Status: resp.StatusCode, Message: emsg.Message} +} + +func (e *Error) Error() string { + return fmt.Sprintf("API error (%d): %s", e.Status, e.Message) +} + +func parseEndpoint(endpoint string, tls bool) (*url.URL, error) { + if endpoint != "" && !strings.Contains(endpoint, "://") { + endpoint = "tcp://" + endpoint + } + u, err := url.Parse(endpoint) + if err != nil { + return nil, ErrInvalidEndpoint + } + if tls && u.Scheme != "unix" { + u.Scheme = "https" + } + switch u.Scheme { + case unixProtocol, namedPipeProtocol: + return u, nil + case "http", "https", "tcp": + _, port, err := net.SplitHostPort(u.Host) + if err != nil { + var e *net.AddrError + if errors.As(err, &e) { + if e.Err == "missing port in address" { + return u, nil + } + } + return nil, ErrInvalidEndpoint + } + number, err := strconv.ParseInt(port, 10, 64) + if err == nil && number > 0 && number < 65536 { + if u.Scheme == "tcp" { + if tls { + u.Scheme = "https" + } else { + u.Scheme = "http" + } + } + return u, nil + } + return nil, ErrInvalidEndpoint + default: + return nil, ErrInvalidEndpoint + } +} + +type dockerEnv struct { + dockerHost string + dockerTLSVerify bool + dockerCertPath string +} + +func getDockerEnv() (*dockerEnv, error) { + dockerHost := os.Getenv("DOCKER_HOST") + var err error + if dockerHost == "" { + dockerHost = defaultHost + } + dockerTLSVerify := os.Getenv("DOCKER_TLS_VERIFY") != "" + var dockerCertPath string + if dockerTLSVerify { + dockerCertPath = os.Getenv("DOCKER_CERT_PATH") + if dockerCertPath == "" { + home := homedir.Get() + if home == "" { + return nil, errors.New("environment variable HOME must be set if DOCKER_CERT_PATH is not set") + } + dockerCertPath = filepath.Join(home, ".docker") + dockerCertPath, err = filepath.Abs(dockerCertPath) + if err != nil { + return nil, err + } + } + } + return &dockerEnv{ + dockerHost: dockerHost, + dockerTLSVerify: dockerTLSVerify, + dockerCertPath: dockerCertPath, + }, nil +} + +// defaultTransport returns a new http.Transport with similar default values to +// http.DefaultTransport, but with idle connections and keepalives disabled. +func defaultTransport() *http.Transport { + transport := defaultPooledTransport() + transport.DisableKeepAlives = true + transport.MaxIdleConnsPerHost = -1 + return transport +} + +// defaultPooledTransport returns a new http.Transport with similar default +// values to http.DefaultTransport. Do not use this for transient transports as +// it can leak file descriptors over time. Only use this for transports that +// will be re-used for the same host(s). +func defaultPooledTransport() *http.Transport { + transport := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).DialContext, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + MaxIdleConnsPerHost: runtime.GOMAXPROCS(0) + 1, + } + return transport +} + +// defaultClient returns a new http.Client with similar default values to +// http.Client, but with a non-shared Transport, idle connections disabled, and +// keepalives disabled. +func defaultClient() *http.Client { + return &http.Client{ + Transport: defaultTransport(), + } +} diff --git a/vendor/github.com/fsouza/go-dockerclient/client_unix.go b/vendor/github.com/fsouza/go-dockerclient/client_unix.go new file mode 100644 index 00000000000..e9f13f3948d --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/client_unix.go @@ -0,0 +1,32 @@ +// Copyright 2016 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !windows +// +build !windows + +package docker + +import ( + "context" + "net" + "net/http" +) + +const defaultHost = "unix:///var/run/docker.sock" + +// initializeNativeClient initializes the native Unix domain socket client on +// Unix-style operating systems +func (c *Client) initializeNativeClient(trFunc func() *http.Transport) { + if c.endpointURL.Scheme != unixProtocol { + return + } + sockPath := c.endpointURL.Path + + tr := trFunc() + tr.Proxy = nil + tr.DialContext = func(_ context.Context, network, addr string) (net.Conn, error) { + return c.Dialer.Dial(unixProtocol, sockPath) + } + c.HTTPClient.Transport = tr +} diff --git a/vendor/github.com/fsouza/go-dockerclient/client_windows.go b/vendor/github.com/fsouza/go-dockerclient/client_windows.go new file mode 100644 index 00000000000..d35f401a44d --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/client_windows.go @@ -0,0 +1,46 @@ +// Copyright 2016 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "context" + "net" + "net/http" + "time" + + winio "github.com/Microsoft/go-winio" +) + +const ( + defaultHost = "npipe:////./pipe/docker_engine" + namedPipeConnectTimeout = 2 * time.Second +) + +type pipeDialer struct { + dialFunc func(network, addr string) (net.Conn, error) +} + +func (p pipeDialer) Dial(network, address string) (net.Conn, error) { + return p.dialFunc(network, address) +} + +// initializeNativeClient initializes the native Named Pipe client for Windows +func (c *Client) initializeNativeClient(trFunc func() *http.Transport) { + if c.endpointURL.Scheme != namedPipeProtocol { + return + } + namedPipePath := c.endpointURL.Path + dialFunc := func(_, addr string) (net.Conn, error) { + timeout := namedPipeConnectTimeout + return winio.DialPipe(namedPipePath, &timeout) + } + tr := trFunc() + tr.Proxy = nil + tr.DialContext = func(ctx context.Context, network, addr string) (net.Conn, error) { + return dialFunc(network, addr) + } + c.Dialer = &pipeDialer{dialFunc} + c.HTTPClient.Transport = tr +} diff --git a/vendor/github.com/fsouza/go-dockerclient/container.go b/vendor/github.com/fsouza/go-dockerclient/container.go new file mode 100644 index 00000000000..48e550495b8 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/container.go @@ -0,0 +1,597 @@ +// Copyright 2013 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "fmt" + "strconv" + "strings" + "time" + + units "github.com/docker/go-units" +) + +// APIPort is a type that represents a port mapping returned by the Docker API +type APIPort struct { + PrivatePort int64 `json:"PrivatePort,omitempty" yaml:"PrivatePort,omitempty" toml:"PrivatePort,omitempty"` + PublicPort int64 `json:"PublicPort,omitempty" yaml:"PublicPort,omitempty" toml:"PublicPort,omitempty"` + Type string `json:"Type,omitempty" yaml:"Type,omitempty" toml:"Type,omitempty"` + IP string `json:"IP,omitempty" yaml:"IP,omitempty" toml:"IP,omitempty"` +} + +// APIMount represents a mount point for a container. +type APIMount struct { + Name string `json:"Name,omitempty" yaml:"Name,omitempty" toml:"Name,omitempty"` + Source string `json:"Source,omitempty" yaml:"Source,omitempty" toml:"Source,omitempty"` + Destination string `json:"Destination,omitempty" yaml:"Destination,omitempty" toml:"Destination,omitempty"` + Driver string `json:"Driver,omitempty" yaml:"Driver,omitempty" toml:"Driver,omitempty"` + Mode string `json:"Mode,omitempty" yaml:"Mode,omitempty" toml:"Mode,omitempty"` + RW bool `json:"RW,omitempty" yaml:"RW,omitempty" toml:"RW,omitempty"` + Propagation string `json:"Propagation,omitempty" yaml:"Propagation,omitempty" toml:"Propagation,omitempty"` + Type string `json:"Type,omitempty" yaml:"Type,omitempty" toml:"Type,omitempty"` +} + +// APIContainers represents each container in the list returned by +// ListContainers. +type APIContainers struct { + ID string `json:"Id" yaml:"Id" toml:"Id"` + Image string `json:"Image,omitempty" yaml:"Image,omitempty" toml:"Image,omitempty"` + Command string `json:"Command,omitempty" yaml:"Command,omitempty" toml:"Command,omitempty"` + Created int64 `json:"Created,omitempty" yaml:"Created,omitempty" toml:"Created,omitempty"` + State string `json:"State,omitempty" yaml:"State,omitempty" toml:"State,omitempty"` + Status string `json:"Status,omitempty" yaml:"Status,omitempty" toml:"Status,omitempty"` + Ports []APIPort `json:"Ports,omitempty" yaml:"Ports,omitempty" toml:"Ports,omitempty"` + SizeRw int64 `json:"SizeRw,omitempty" yaml:"SizeRw,omitempty" toml:"SizeRw,omitempty"` + SizeRootFs int64 `json:"SizeRootFs,omitempty" yaml:"SizeRootFs,omitempty" toml:"SizeRootFs,omitempty"` + Names []string `json:"Names,omitempty" yaml:"Names,omitempty" toml:"Names,omitempty"` + Labels map[string]string `json:"Labels,omitempty" yaml:"Labels,omitempty" toml:"Labels,omitempty"` + Networks NetworkList `json:"NetworkSettings,omitempty" yaml:"NetworkSettings,omitempty" toml:"NetworkSettings,omitempty"` + Mounts []APIMount `json:"Mounts,omitempty" yaml:"Mounts,omitempty" toml:"Mounts,omitempty"` +} + +// NetworkList encapsulates a map of networks, as returned by the Docker API in +// ListContainers. +type NetworkList struct { + Networks map[string]ContainerNetwork `json:"Networks" yaml:"Networks,omitempty" toml:"Networks,omitempty"` +} + +// Port represents the port number and the protocol, in the form +// /. For example: 80/tcp. +type Port string + +// Port returns the number of the port. +func (p Port) Port() string { + return strings.Split(string(p), "/")[0] +} + +// Proto returns the name of the protocol. +func (p Port) Proto() string { + parts := strings.Split(string(p), "/") + if len(parts) == 1 { + return "tcp" + } + return parts[1] +} + +// HealthCheck represents one check of health. +type HealthCheck struct { + Start time.Time `json:"Start,omitempty" yaml:"Start,omitempty" toml:"Start,omitempty"` + End time.Time `json:"End,omitempty" yaml:"End,omitempty" toml:"End,omitempty"` + ExitCode int `json:"ExitCode,omitempty" yaml:"ExitCode,omitempty" toml:"ExitCode,omitempty"` + Output string `json:"Output,omitempty" yaml:"Output,omitempty" toml:"Output,omitempty"` +} + +// Health represents the health of a container. +type Health struct { + Status string `json:"Status,omitempty" yaml:"Status,omitempty" toml:"Status,omitempty"` + FailingStreak int `json:"FailingStreak,omitempty" yaml:"FailingStreak,omitempty" toml:"FailingStreak,omitempty"` + Log []HealthCheck `json:"Log,omitempty" yaml:"Log,omitempty" toml:"Log,omitempty"` +} + +// State represents the state of a container. +type State struct { + Status string `json:"Status,omitempty" yaml:"Status,omitempty" toml:"Status,omitempty"` + Running bool `json:"Running,omitempty" yaml:"Running,omitempty" toml:"Running,omitempty"` + Paused bool `json:"Paused,omitempty" yaml:"Paused,omitempty" toml:"Paused,omitempty"` + Restarting bool `json:"Restarting,omitempty" yaml:"Restarting,omitempty" toml:"Restarting,omitempty"` + OOMKilled bool `json:"OOMKilled,omitempty" yaml:"OOMKilled,omitempty" toml:"OOMKilled,omitempty"` + RemovalInProgress bool `json:"RemovalInProgress,omitempty" yaml:"RemovalInProgress,omitempty" toml:"RemovalInProgress,omitempty"` + Dead bool `json:"Dead,omitempty" yaml:"Dead,omitempty" toml:"Dead,omitempty"` + Pid int `json:"Pid,omitempty" yaml:"Pid,omitempty" toml:"Pid,omitempty"` + ExitCode int `json:"ExitCode,omitempty" yaml:"ExitCode,omitempty" toml:"ExitCode,omitempty"` + Error string `json:"Error,omitempty" yaml:"Error,omitempty" toml:"Error,omitempty"` + StartedAt time.Time `json:"StartedAt,omitempty" yaml:"StartedAt,omitempty" toml:"StartedAt,omitempty"` + FinishedAt time.Time `json:"FinishedAt,omitempty" yaml:"FinishedAt,omitempty" toml:"FinishedAt,omitempty"` + Health Health `json:"Health,omitempty" yaml:"Health,omitempty" toml:"Health,omitempty"` +} + +// String returns a human-readable description of the state +func (s *State) String() string { + if s.Running { + if s.Paused { + return fmt.Sprintf("Up %s (Paused)", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt))) + } + if s.Restarting { + return fmt.Sprintf("Restarting (%d) %s ago", s.ExitCode, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt))) + } + + return fmt.Sprintf("Up %s", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt))) + } + + if s.RemovalInProgress { + return "Removal In Progress" + } + + if s.Dead { + return "Dead" + } + + if s.StartedAt.IsZero() { + return "Created" + } + + if s.FinishedAt.IsZero() { + return "" + } + + return fmt.Sprintf("Exited (%d) %s ago", s.ExitCode, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt))) +} + +// StateString returns a single string to describe state +func (s *State) StateString() string { + if s.Running { + if s.Paused { + return "paused" + } + if s.Restarting { + return "restarting" + } + return "running" + } + + if s.Dead { + return "dead" + } + + if s.StartedAt.IsZero() { + return "created" + } + + return "exited" +} + +// PortBinding represents the host/container port mapping as returned in the +// `docker inspect` json +type PortBinding struct { + HostIP string `json:"HostIp,omitempty" yaml:"HostIp,omitempty" toml:"HostIp,omitempty"` + HostPort string `json:"HostPort,omitempty" yaml:"HostPort,omitempty" toml:"HostPort,omitempty"` +} + +// PortMapping represents a deprecated field in the `docker inspect` output, +// and its value as found in NetworkSettings should always be nil +type PortMapping map[string]string + +// ContainerNetwork represents the networking settings of a container per network. +type ContainerNetwork struct { + Aliases []string `json:"Aliases,omitempty" yaml:"Aliases,omitempty" toml:"Aliases,omitempty"` + MacAddress string `json:"MacAddress,omitempty" yaml:"MacAddress,omitempty" toml:"MacAddress,omitempty"` + GlobalIPv6PrefixLen int `json:"GlobalIPv6PrefixLen,omitempty" yaml:"GlobalIPv6PrefixLen,omitempty" toml:"GlobalIPv6PrefixLen,omitempty"` + GlobalIPv6Address string `json:"GlobalIPv6Address,omitempty" yaml:"GlobalIPv6Address,omitempty" toml:"GlobalIPv6Address,omitempty"` + IPv6Gateway string `json:"IPv6Gateway,omitempty" yaml:"IPv6Gateway,omitempty" toml:"IPv6Gateway,omitempty"` + IPPrefixLen int `json:"IPPrefixLen,omitempty" yaml:"IPPrefixLen,omitempty" toml:"IPPrefixLen,omitempty"` + IPAddress string `json:"IPAddress,omitempty" yaml:"IPAddress,omitempty" toml:"IPAddress,omitempty"` + Gateway string `json:"Gateway,omitempty" yaml:"Gateway,omitempty" toml:"Gateway,omitempty"` + EndpointID string `json:"EndpointID,omitempty" yaml:"EndpointID,omitempty" toml:"EndpointID,omitempty"` + NetworkID string `json:"NetworkID,omitempty" yaml:"NetworkID,omitempty" toml:"NetworkID,omitempty"` +} + +// NetworkSettings contains network-related information about a container +type NetworkSettings struct { + Networks map[string]ContainerNetwork `json:"Networks,omitempty" yaml:"Networks,omitempty" toml:"Networks,omitempty"` + IPAddress string `json:"IPAddress,omitempty" yaml:"IPAddress,omitempty" toml:"IPAddress,omitempty"` + IPPrefixLen int `json:"IPPrefixLen,omitempty" yaml:"IPPrefixLen,omitempty" toml:"IPPrefixLen,omitempty"` + MacAddress string `json:"MacAddress,omitempty" yaml:"MacAddress,omitempty" toml:"MacAddress,omitempty"` + Gateway string `json:"Gateway,omitempty" yaml:"Gateway,omitempty" toml:"Gateway,omitempty"` + Bridge string `json:"Bridge,omitempty" yaml:"Bridge,omitempty" toml:"Bridge,omitempty"` + PortMapping map[string]PortMapping `json:"PortMapping,omitempty" yaml:"PortMapping,omitempty" toml:"PortMapping,omitempty"` + Ports map[Port][]PortBinding `json:"Ports,omitempty" yaml:"Ports,omitempty" toml:"Ports,omitempty"` + NetworkID string `json:"NetworkID,omitempty" yaml:"NetworkID,omitempty" toml:"NetworkID,omitempty"` + EndpointID string `json:"EndpointID,omitempty" yaml:"EndpointID,omitempty" toml:"EndpointID,omitempty"` + SandboxKey string `json:"SandboxKey,omitempty" yaml:"SandboxKey,omitempty" toml:"SandboxKey,omitempty"` + GlobalIPv6Address string `json:"GlobalIPv6Address,omitempty" yaml:"GlobalIPv6Address,omitempty" toml:"GlobalIPv6Address,omitempty"` + GlobalIPv6PrefixLen int `json:"GlobalIPv6PrefixLen,omitempty" yaml:"GlobalIPv6PrefixLen,omitempty" toml:"GlobalIPv6PrefixLen,omitempty"` + IPv6Gateway string `json:"IPv6Gateway,omitempty" yaml:"IPv6Gateway,omitempty" toml:"IPv6Gateway,omitempty"` + LinkLocalIPv6Address string `json:"LinkLocalIPv6Address,omitempty" yaml:"LinkLocalIPv6Address,omitempty" toml:"LinkLocalIPv6Address,omitempty"` + LinkLocalIPv6PrefixLen int `json:"LinkLocalIPv6PrefixLen,omitempty" yaml:"LinkLocalIPv6PrefixLen,omitempty" toml:"LinkLocalIPv6PrefixLen,omitempty"` + SecondaryIPAddresses []string `json:"SecondaryIPAddresses,omitempty" yaml:"SecondaryIPAddresses,omitempty" toml:"SecondaryIPAddresses,omitempty"` + SecondaryIPv6Addresses []string `json:"SecondaryIPv6Addresses,omitempty" yaml:"SecondaryIPv6Addresses,omitempty" toml:"SecondaryIPv6Addresses,omitempty"` +} + +// PortMappingAPI translates the port mappings as contained in NetworkSettings +// into the format in which they would appear when returned by the API +func (settings *NetworkSettings) PortMappingAPI() []APIPort { + var mapping []APIPort + for port, bindings := range settings.Ports { + p, _ := parsePort(port.Port()) + if len(bindings) == 0 { + mapping = append(mapping, APIPort{ + PrivatePort: int64(p), + Type: port.Proto(), + }) + continue + } + for _, binding := range bindings { + p, _ := parsePort(port.Port()) + h, _ := parsePort(binding.HostPort) + mapping = append(mapping, APIPort{ + PrivatePort: int64(p), + PublicPort: int64(h), + Type: port.Proto(), + IP: binding.HostIP, + }) + } + } + return mapping +} + +func parsePort(rawPort string) (int, error) { + port, err := strconv.ParseUint(rawPort, 10, 16) + if err != nil { + return 0, err + } + return int(port), nil +} + +// Config is the list of configuration options used when creating a container. +// Config does not contain the options that are specific to starting a container on a +// given host. Those are contained in HostConfig +type Config struct { + Hostname string `json:"Hostname,omitempty" yaml:"Hostname,omitempty" toml:"Hostname,omitempty"` + Domainname string `json:"Domainname,omitempty" yaml:"Domainname,omitempty" toml:"Domainname,omitempty"` + User string `json:"User,omitempty" yaml:"User,omitempty" toml:"User,omitempty"` + Memory int64 `json:"Memory,omitempty" yaml:"Memory,omitempty" toml:"Memory,omitempty"` + MemorySwap int64 `json:"MemorySwap,omitempty" yaml:"MemorySwap,omitempty" toml:"MemorySwap,omitempty"` + MemoryReservation int64 `json:"MemoryReservation,omitempty" yaml:"MemoryReservation,omitempty" toml:"MemoryReservation,omitempty"` + KernelMemory int64 `json:"KernelMemory,omitempty" yaml:"KernelMemory,omitempty" toml:"KernelMemory,omitempty"` + CPUShares int64 `json:"CpuShares,omitempty" yaml:"CpuShares,omitempty" toml:"CpuShares,omitempty"` + CPUSet string `json:"Cpuset,omitempty" yaml:"Cpuset,omitempty" toml:"Cpuset,omitempty"` + PortSpecs []string `json:"PortSpecs,omitempty" yaml:"PortSpecs,omitempty" toml:"PortSpecs,omitempty"` + ExposedPorts map[Port]struct{} `json:"ExposedPorts,omitempty" yaml:"ExposedPorts,omitempty" toml:"ExposedPorts,omitempty"` + PublishService string `json:"PublishService,omitempty" yaml:"PublishService,omitempty" toml:"PublishService,omitempty"` + StopSignal string `json:"StopSignal,omitempty" yaml:"StopSignal,omitempty" toml:"StopSignal,omitempty"` + StopTimeout int `json:"StopTimeout,omitempty" yaml:"StopTimeout,omitempty" toml:"StopTimeout,omitempty"` + Env []string `json:"Env,omitempty" yaml:"Env,omitempty" toml:"Env,omitempty"` + Cmd []string `json:"Cmd" yaml:"Cmd" toml:"Cmd"` + Shell []string `json:"Shell,omitempty" yaml:"Shell,omitempty" toml:"Shell,omitempty"` + Healthcheck *HealthConfig `json:"Healthcheck,omitempty" yaml:"Healthcheck,omitempty" toml:"Healthcheck,omitempty"` + DNS []string `json:"Dns,omitempty" yaml:"Dns,omitempty" toml:"Dns,omitempty"` // For Docker API v1.9 and below only + Image string `json:"Image,omitempty" yaml:"Image,omitempty" toml:"Image,omitempty"` + Volumes map[string]struct{} `json:"Volumes,omitempty" yaml:"Volumes,omitempty" toml:"Volumes,omitempty"` + VolumeDriver string `json:"VolumeDriver,omitempty" yaml:"VolumeDriver,omitempty" toml:"VolumeDriver,omitempty"` + WorkingDir string `json:"WorkingDir,omitempty" yaml:"WorkingDir,omitempty" toml:"WorkingDir,omitempty"` + MacAddress string `json:"MacAddress,omitempty" yaml:"MacAddress,omitempty" toml:"MacAddress,omitempty"` + Entrypoint []string `json:"Entrypoint" yaml:"Entrypoint" toml:"Entrypoint"` + SecurityOpts []string `json:"SecurityOpts,omitempty" yaml:"SecurityOpts,omitempty" toml:"SecurityOpts,omitempty"` + OnBuild []string `json:"OnBuild,omitempty" yaml:"OnBuild,omitempty" toml:"OnBuild,omitempty"` + Mounts []Mount `json:"Mounts,omitempty" yaml:"Mounts,omitempty" toml:"Mounts,omitempty"` + Labels map[string]string `json:"Labels,omitempty" yaml:"Labels,omitempty" toml:"Labels,omitempty"` + AttachStdin bool `json:"AttachStdin,omitempty" yaml:"AttachStdin,omitempty" toml:"AttachStdin,omitempty"` + AttachStdout bool `json:"AttachStdout,omitempty" yaml:"AttachStdout,omitempty" toml:"AttachStdout,omitempty"` + AttachStderr bool `json:"AttachStderr,omitempty" yaml:"AttachStderr,omitempty" toml:"AttachStderr,omitempty"` + ArgsEscaped bool `json:"ArgsEscaped,omitempty" yaml:"ArgsEscaped,omitempty" toml:"ArgsEscaped,omitempty"` + Tty bool `json:"Tty,omitempty" yaml:"Tty,omitempty" toml:"Tty,omitempty"` + OpenStdin bool `json:"OpenStdin,omitempty" yaml:"OpenStdin,omitempty" toml:"OpenStdin,omitempty"` + StdinOnce bool `json:"StdinOnce,omitempty" yaml:"StdinOnce,omitempty" toml:"StdinOnce,omitempty"` + NetworkDisabled bool `json:"NetworkDisabled,omitempty" yaml:"NetworkDisabled,omitempty" toml:"NetworkDisabled,omitempty"` + + // This is no longer used and has been kept here for backward + // compatibility, please use HostConfig.VolumesFrom. + VolumesFrom string `json:"VolumesFrom,omitempty" yaml:"VolumesFrom,omitempty" toml:"VolumesFrom,omitempty"` +} + +// HostMount represents a mount point in the container in HostConfig. +// +// It has been added in the version 1.25 of the Docker API +type HostMount struct { + Target string `json:"Target,omitempty" yaml:"Target,omitempty" toml:"Target,omitempty"` + Source string `json:"Source,omitempty" yaml:"Source,omitempty" toml:"Source,omitempty"` + Type string `json:"Type,omitempty" yaml:"Type,omitempty" toml:"Type,omitempty"` + ReadOnly bool `json:"ReadOnly,omitempty" yaml:"ReadOnly,omitempty" toml:"ReadOnly,omitempty"` + BindOptions *BindOptions `json:"BindOptions,omitempty" yaml:"BindOptions,omitempty" toml:"BindOptions,omitempty"` + VolumeOptions *VolumeOptions `json:"VolumeOptions,omitempty" yaml:"VolumeOptions,omitempty" toml:"VolumeOptions,omitempty"` + TempfsOptions *TempfsOptions `json:"TmpfsOptions,omitempty" yaml:"TmpfsOptions,omitempty" toml:"TmpfsOptions,omitempty"` +} + +// BindOptions contains optional configuration for the bind type +type BindOptions struct { + Propagation string `json:"Propagation,omitempty" yaml:"Propagation,omitempty" toml:"Propagation,omitempty"` +} + +// VolumeOptions contains optional configuration for the volume type +type VolumeOptions struct { + NoCopy bool `json:"NoCopy,omitempty" yaml:"NoCopy,omitempty" toml:"NoCopy,omitempty"` + Labels map[string]string `json:"Labels,omitempty" yaml:"Labels,omitempty" toml:"Labels,omitempty"` + DriverConfig VolumeDriverConfig `json:"DriverConfig,omitempty" yaml:"DriverConfig,omitempty" toml:"DriverConfig,omitempty"` +} + +// TempfsOptions contains optional configuration for the tempfs type +type TempfsOptions struct { + SizeBytes int64 `json:"SizeBytes,omitempty" yaml:"SizeBytes,omitempty" toml:"SizeBytes,omitempty"` + Mode int `json:"Mode,omitempty" yaml:"Mode,omitempty" toml:"Mode,omitempty"` +} + +// VolumeDriverConfig holds a map of volume driver specific options +type VolumeDriverConfig struct { + Name string `json:"Name,omitempty" yaml:"Name,omitempty" toml:"Name,omitempty"` + Options map[string]string `json:"Options,omitempty" yaml:"Options,omitempty" toml:"Options,omitempty"` +} + +// Mount represents a mount point in the container. +// +// It has been added in the version 1.20 of the Docker API, available since +// Docker 1.8. +type Mount struct { + Name string + Source string + Destination string + Driver string + Mode string + RW bool +} + +// LogConfig defines the log driver type and the configuration for it. +type LogConfig struct { + Type string `json:"Type,omitempty" yaml:"Type,omitempty" toml:"Type,omitempty"` + Config map[string]string `json:"Config,omitempty" yaml:"Config,omitempty" toml:"Config,omitempty"` +} + +// ULimit defines system-wide resource limitations This can help a lot in +// system administration, e.g. when a user starts too many processes and +// therefore makes the system unresponsive for other users. +type ULimit struct { + Name string `json:"Name,omitempty" yaml:"Name,omitempty" toml:"Name,omitempty"` + Soft int64 `json:"Soft,omitempty" yaml:"Soft,omitempty" toml:"Soft,omitempty"` + Hard int64 `json:"Hard,omitempty" yaml:"Hard,omitempty" toml:"Hard,omitempty"` +} + +// SwarmNode containers information about which Swarm node the container is on. +type SwarmNode struct { + ID string `json:"ID,omitempty" yaml:"ID,omitempty" toml:"ID,omitempty"` + IP string `json:"IP,omitempty" yaml:"IP,omitempty" toml:"IP,omitempty"` + Addr string `json:"Addr,omitempty" yaml:"Addr,omitempty" toml:"Addr,omitempty"` + Name string `json:"Name,omitempty" yaml:"Name,omitempty" toml:"Name,omitempty"` + CPUs int64 `json:"CPUs,omitempty" yaml:"CPUs,omitempty" toml:"CPUs,omitempty"` + Memory int64 `json:"Memory,omitempty" yaml:"Memory,omitempty" toml:"Memory,omitempty"` + Labels map[string]string `json:"Labels,omitempty" yaml:"Labels,omitempty" toml:"Labels,omitempty"` +} + +// GraphDriver contains information about the GraphDriver used by the +// container. +type GraphDriver struct { + Name string `json:"Name,omitempty" yaml:"Name,omitempty" toml:"Name,omitempty"` + Data map[string]string `json:"Data,omitempty" yaml:"Data,omitempty" toml:"Data,omitempty"` +} + +// HealthConfig holds configuration settings for the HEALTHCHECK feature +// +// It has been added in the version 1.24 of the Docker API, available since +// Docker 1.12. +type HealthConfig struct { + // Test is the test to perform to check that the container is healthy. + // An empty slice means to inherit the default. + // The options are: + // {} : inherit healthcheck + // {"NONE"} : disable healthcheck + // {"CMD", args...} : exec arguments directly + // {"CMD-SHELL", command} : run command with system's default shell + Test []string `json:"Test,omitempty" yaml:"Test,omitempty" toml:"Test,omitempty"` + + // Zero means to inherit. Durations are expressed as integer nanoseconds. + Interval time.Duration `json:"Interval,omitempty" yaml:"Interval,omitempty" toml:"Interval,omitempty"` // Interval is the time to wait between checks. + Timeout time.Duration `json:"Timeout,omitempty" yaml:"Timeout,omitempty" toml:"Timeout,omitempty"` // Timeout is the time to wait before considering the check to have hung. + StartPeriod time.Duration `json:"StartPeriod,omitempty" yaml:"StartPeriod,omitempty" toml:"StartPeriod,omitempty"` // The start period for the container to initialize before the retries starts to count down. + + // Retries is the number of consecutive failures needed to consider a container as unhealthy. + // Zero means inherit. + Retries int `json:"Retries,omitempty" yaml:"Retries,omitempty" toml:"Retries,omitempty"` +} + +// Container is the type encompasing everything about a container - its config, +// hostconfig, etc. +type Container struct { + ID string `json:"Id" yaml:"Id" toml:"Id"` + + Created time.Time `json:"Created,omitempty" yaml:"Created,omitempty" toml:"Created,omitempty"` + + Path string `json:"Path,omitempty" yaml:"Path,omitempty" toml:"Path,omitempty"` + Args []string `json:"Args,omitempty" yaml:"Args,omitempty" toml:"Args,omitempty"` + + Config *Config `json:"Config,omitempty" yaml:"Config,omitempty" toml:"Config,omitempty"` + State State `json:"State,omitempty" yaml:"State,omitempty" toml:"State,omitempty"` + Image string `json:"Image,omitempty" yaml:"Image,omitempty" toml:"Image,omitempty"` + + Node *SwarmNode `json:"Node,omitempty" yaml:"Node,omitempty" toml:"Node,omitempty"` + + NetworkSettings *NetworkSettings `json:"NetworkSettings,omitempty" yaml:"NetworkSettings,omitempty" toml:"NetworkSettings,omitempty"` + + SysInitPath string `json:"SysInitPath,omitempty" yaml:"SysInitPath,omitempty" toml:"SysInitPath,omitempty"` + ResolvConfPath string `json:"ResolvConfPath,omitempty" yaml:"ResolvConfPath,omitempty" toml:"ResolvConfPath,omitempty"` + HostnamePath string `json:"HostnamePath,omitempty" yaml:"HostnamePath,omitempty" toml:"HostnamePath,omitempty"` + HostsPath string `json:"HostsPath,omitempty" yaml:"HostsPath,omitempty" toml:"HostsPath,omitempty"` + LogPath string `json:"LogPath,omitempty" yaml:"LogPath,omitempty" toml:"LogPath,omitempty"` + Name string `json:"Name,omitempty" yaml:"Name,omitempty" toml:"Name,omitempty"` + Driver string `json:"Driver,omitempty" yaml:"Driver,omitempty" toml:"Driver,omitempty"` + Mounts []Mount `json:"Mounts,omitempty" yaml:"Mounts,omitempty" toml:"Mounts,omitempty"` + + Volumes map[string]string `json:"Volumes,omitempty" yaml:"Volumes,omitempty" toml:"Volumes,omitempty"` + VolumesRW map[string]bool `json:"VolumesRW,omitempty" yaml:"VolumesRW,omitempty" toml:"VolumesRW,omitempty"` + HostConfig *HostConfig `json:"HostConfig,omitempty" yaml:"HostConfig,omitempty" toml:"HostConfig,omitempty"` + ExecIDs []string `json:"ExecIDs,omitempty" yaml:"ExecIDs,omitempty" toml:"ExecIDs,omitempty"` + GraphDriver *GraphDriver `json:"GraphDriver,omitempty" yaml:"GraphDriver,omitempty" toml:"GraphDriver,omitempty"` + + RestartCount int `json:"RestartCount,omitempty" yaml:"RestartCount,omitempty" toml:"RestartCount,omitempty"` + + AppArmorProfile string `json:"AppArmorProfile,omitempty" yaml:"AppArmorProfile,omitempty" toml:"AppArmorProfile,omitempty"` + + MountLabel string `json:"MountLabel,omitempty" yaml:"MountLabel,omitempty" toml:"MountLabel,omitempty"` + ProcessLabel string `json:"ProcessLabel,omitempty" yaml:"ProcessLabel,omitempty" toml:"ProcessLabel,omitempty"` + Platform string `json:"Platform,omitempty" yaml:"Platform,omitempty" toml:"Platform,omitempty"` + SizeRw int64 `json:"SizeRw,omitempty" yaml:"SizeRw,omitempty" toml:"SizeRw,omitempty"` + SizeRootFs int64 `json:"SizeRootFs,omitempty" yaml:"SizeRootFs,omitempty" toml:"SizeRootFs,omitempty"` +} + +// KeyValuePair is a type for generic key/value pairs as used in the Lxc +// configuration +type KeyValuePair struct { + Key string `json:"Key,omitempty" yaml:"Key,omitempty" toml:"Key,omitempty"` + Value string `json:"Value,omitempty" yaml:"Value,omitempty" toml:"Value,omitempty"` +} + +// Device represents a device mapping between the Docker host and the +// container. +type Device struct { + PathOnHost string `json:"PathOnHost,omitempty" yaml:"PathOnHost,omitempty" toml:"PathOnHost,omitempty"` + PathInContainer string `json:"PathInContainer,omitempty" yaml:"PathInContainer,omitempty" toml:"PathInContainer,omitempty"` + CgroupPermissions string `json:"CgroupPermissions,omitempty" yaml:"CgroupPermissions,omitempty" toml:"CgroupPermissions,omitempty"` +} + +// DeviceRequest represents a request for device that's sent to device drivers. +type DeviceRequest struct { + Driver string `json:"Driver,omitempty" yaml:"Driver,omitempty" toml:"Driver,omitempty"` + Count int `json:"Count,omitempty" yaml:"Count,omitempty" toml:"Count,omitempty"` + DeviceIDs []string `json:"DeviceIDs,omitempty" yaml:"DeviceIDs,omitempty" toml:"DeviceIDs,omitempty"` + Capabilities [][]string `json:"Capabilities,omitempty" yaml:"Capabilities,omitempty" toml:"Capabilities,omitempty"` + Options map[string]string `json:"Options,omitempty" yaml:"Options,omitempty" toml:"Options,omitempty"` +} + +// BlockWeight represents a relative device weight for an individual device inside +// of a container +type BlockWeight struct { + Path string `json:"Path,omitempty"` + Weight string `json:"Weight,omitempty"` +} + +// BlockLimit represents a read/write limit in IOPS or Bandwidth for a device +// inside of a container +type BlockLimit struct { + Path string `json:"Path,omitempty"` + Rate int64 `json:"Rate,omitempty"` +} + +// HostConfig contains the container options related to starting a container on +// a given host +type HostConfig struct { + Binds []string `json:"Binds,omitempty" yaml:"Binds,omitempty" toml:"Binds,omitempty"` + CapAdd []string `json:"CapAdd,omitempty" yaml:"CapAdd,omitempty" toml:"CapAdd,omitempty"` + CapDrop []string `json:"CapDrop,omitempty" yaml:"CapDrop,omitempty" toml:"CapDrop,omitempty"` + Capabilities []string `json:"Capabilities,omitempty" yaml:"Capabilities,omitempty" toml:"Capabilities,omitempty"` // Mutually exclusive w.r.t. CapAdd and CapDrop API v1.40 + GroupAdd []string `json:"GroupAdd,omitempty" yaml:"GroupAdd,omitempty" toml:"GroupAdd,omitempty"` + ContainerIDFile string `json:"ContainerIDFile,omitempty" yaml:"ContainerIDFile,omitempty" toml:"ContainerIDFile,omitempty"` + LxcConf []KeyValuePair `json:"LxcConf,omitempty" yaml:"LxcConf,omitempty" toml:"LxcConf,omitempty"` + PortBindings map[Port][]PortBinding `json:"PortBindings,omitempty" yaml:"PortBindings,omitempty" toml:"PortBindings,omitempty"` + Links []string `json:"Links,omitempty" yaml:"Links,omitempty" toml:"Links,omitempty"` + DNS []string `json:"Dns,omitempty" yaml:"Dns,omitempty" toml:"Dns,omitempty"` // For Docker API v1.10 and above only + DNSOptions []string `json:"DnsOptions,omitempty" yaml:"DnsOptions,omitempty" toml:"DnsOptions,omitempty"` + DNSSearch []string `json:"DnsSearch,omitempty" yaml:"DnsSearch,omitempty" toml:"DnsSearch,omitempty"` + ExtraHosts []string `json:"ExtraHosts,omitempty" yaml:"ExtraHosts,omitempty" toml:"ExtraHosts,omitempty"` + VolumesFrom []string `json:"VolumesFrom,omitempty" yaml:"VolumesFrom,omitempty" toml:"VolumesFrom,omitempty"` + UsernsMode string `json:"UsernsMode,omitempty" yaml:"UsernsMode,omitempty" toml:"UsernsMode,omitempty"` + NetworkMode string `json:"NetworkMode,omitempty" yaml:"NetworkMode,omitempty" toml:"NetworkMode,omitempty"` + IpcMode string `json:"IpcMode,omitempty" yaml:"IpcMode,omitempty" toml:"IpcMode,omitempty"` + Isolation string `json:"Isolation,omitempty" yaml:"Isolation,omitempty" toml:"Isolation,omitempty"` // Windows only + ConsoleSize [2]int `json:"ConsoleSize,omitempty" yaml:"ConsoleSize,omitempty" toml:"ConsoleSize,omitempty"` // Windows only height x width + PidMode string `json:"PidMode,omitempty" yaml:"PidMode,omitempty" toml:"PidMode,omitempty"` + UTSMode string `json:"UTSMode,omitempty" yaml:"UTSMode,omitempty" toml:"UTSMode,omitempty"` + RestartPolicy RestartPolicy `json:"RestartPolicy,omitempty" yaml:"RestartPolicy,omitempty" toml:"RestartPolicy,omitempty"` + Devices []Device `json:"Devices,omitempty" yaml:"Devices,omitempty" toml:"Devices,omitempty"` + DeviceCgroupRules []string `json:"DeviceCgroupRules,omitempty" yaml:"DeviceCgroupRules,omitempty" toml:"DeviceCgroupRules,omitempty"` + DeviceRequests []DeviceRequest `json:"DeviceRequests,omitempty" yaml:"DeviceRequests,omitempty" toml:"DeviceRequests,omitempty"` + LogConfig LogConfig `json:"LogConfig,omitempty" yaml:"LogConfig,omitempty" toml:"LogConfig,omitempty"` + SecurityOpt []string `json:"SecurityOpt,omitempty" yaml:"SecurityOpt,omitempty" toml:"SecurityOpt,omitempty"` + CgroupnsMode string `json:"CgroupnsMode,omitempty" yaml:"CgroupnsMode,omitempty" toml:"CgroupnsMode,omitempty"` // v1.40+ + Cgroup string `json:"Cgroup,omitempty" yaml:"Cgroup,omitempty" toml:"Cgroup,omitempty"` + CgroupParent string `json:"CgroupParent,omitempty" yaml:"CgroupParent,omitempty" toml:"CgroupParent,omitempty"` + Memory int64 `json:"Memory,omitempty" yaml:"Memory,omitempty" toml:"Memory,omitempty"` + MemoryReservation int64 `json:"MemoryReservation,omitempty" yaml:"MemoryReservation,omitempty" toml:"MemoryReservation,omitempty"` + KernelMemory int64 `json:"KernelMemory,omitempty" yaml:"KernelMemory,omitempty" toml:"KernelMemory,omitempty"` + MemorySwap int64 `json:"MemorySwap,omitempty" yaml:"MemorySwap,omitempty" toml:"MemorySwap,omitempty"` + CPUShares int64 `json:"CpuShares,omitempty" yaml:"CpuShares,omitempty" toml:"CpuShares,omitempty"` + CPUSet string `json:"Cpuset,omitempty" yaml:"Cpuset,omitempty" toml:"Cpuset,omitempty"` + CPUSetCPUs string `json:"CpusetCpus,omitempty" yaml:"CpusetCpus,omitempty" toml:"CpusetCpus,omitempty"` + CPUSetMEMs string `json:"CpusetMems,omitempty" yaml:"CpusetMems,omitempty" toml:"CpusetMems,omitempty"` + CPUQuota int64 `json:"CpuQuota,omitempty" yaml:"CpuQuota,omitempty" toml:"CpuQuota,omitempty"` + CPUPeriod int64 `json:"CpuPeriod,omitempty" yaml:"CpuPeriod,omitempty" toml:"CpuPeriod,omitempty"` + CPURealtimePeriod int64 `json:"CpuRealtimePeriod,omitempty" yaml:"CpuRealtimePeriod,omitempty" toml:"CpuRealtimePeriod,omitempty"` + CPURealtimeRuntime int64 `json:"CpuRealtimeRuntime,omitempty" yaml:"CpuRealtimeRuntime,omitempty" toml:"CpuRealtimeRuntime,omitempty"` + NanoCPUs int64 `json:"NanoCpus,omitempty" yaml:"NanoCpus,omitempty" toml:"NanoCpus,omitempty"` + BlkioWeight int64 `json:"BlkioWeight,omitempty" yaml:"BlkioWeight,omitempty" toml:"BlkioWeight,omitempty"` + BlkioWeightDevice []BlockWeight `json:"BlkioWeightDevice,omitempty" yaml:"BlkioWeightDevice,omitempty" toml:"BlkioWeightDevice,omitempty"` + BlkioDeviceReadBps []BlockLimit `json:"BlkioDeviceReadBps,omitempty" yaml:"BlkioDeviceReadBps,omitempty" toml:"BlkioDeviceReadBps,omitempty"` + BlkioDeviceReadIOps []BlockLimit `json:"BlkioDeviceReadIOps,omitempty" yaml:"BlkioDeviceReadIOps,omitempty" toml:"BlkioDeviceReadIOps,omitempty"` + BlkioDeviceWriteBps []BlockLimit `json:"BlkioDeviceWriteBps,omitempty" yaml:"BlkioDeviceWriteBps,omitempty" toml:"BlkioDeviceWriteBps,omitempty"` + BlkioDeviceWriteIOps []BlockLimit `json:"BlkioDeviceWriteIOps,omitempty" yaml:"BlkioDeviceWriteIOps,omitempty" toml:"BlkioDeviceWriteIOps,omitempty"` + Ulimits []ULimit `json:"Ulimits,omitempty" yaml:"Ulimits,omitempty" toml:"Ulimits,omitempty"` + VolumeDriver string `json:"VolumeDriver,omitempty" yaml:"VolumeDriver,omitempty" toml:"VolumeDriver,omitempty"` + OomScoreAdj int `json:"OomScoreAdj,omitempty" yaml:"OomScoreAdj,omitempty" toml:"OomScoreAdj,omitempty"` + MemorySwappiness *int64 `json:"MemorySwappiness,omitempty" yaml:"MemorySwappiness,omitempty" toml:"MemorySwappiness,omitempty"` + PidsLimit *int64 `json:"PidsLimit,omitempty" yaml:"PidsLimit,omitempty" toml:"PidsLimit,omitempty"` + OOMKillDisable *bool `json:"OomKillDisable,omitempty" yaml:"OomKillDisable,omitempty" toml:"OomKillDisable,omitempty"` + ShmSize int64 `json:"ShmSize,omitempty" yaml:"ShmSize,omitempty" toml:"ShmSize,omitempty"` + Tmpfs map[string]string `json:"Tmpfs,omitempty" yaml:"Tmpfs,omitempty" toml:"Tmpfs,omitempty"` + StorageOpt map[string]string `json:"StorageOpt,omitempty" yaml:"StorageOpt,omitempty" toml:"StorageOpt,omitempty"` + Sysctls map[string]string `json:"Sysctls,omitempty" yaml:"Sysctls,omitempty" toml:"Sysctls,omitempty"` + CPUCount int64 `json:"CpuCount,omitempty" yaml:"CpuCount,omitempty"` + CPUPercent int64 `json:"CpuPercent,omitempty" yaml:"CpuPercent,omitempty"` + IOMaximumBandwidth int64 `json:"IOMaximumBandwidth,omitempty" yaml:"IOMaximumBandwidth,omitempty"` + IOMaximumIOps int64 `json:"IOMaximumIOps,omitempty" yaml:"IOMaximumIOps,omitempty"` + Mounts []HostMount `json:"Mounts,omitempty" yaml:"Mounts,omitempty" toml:"Mounts,omitempty"` + MaskedPaths []string `json:"MaskedPaths,omitempty" yaml:"MaskedPaths,omitempty" toml:"MaskedPaths,omitempty"` + ReadonlyPaths []string `json:"ReadonlyPaths,omitempty" yaml:"ReadonlyPaths,omitempty" toml:"ReadonlyPaths,omitempty"` + Runtime string `json:"Runtime,omitempty" yaml:"Runtime,omitempty" toml:"Runtime,omitempty"` + Init bool `json:",omitempty" yaml:",omitempty"` + Privileged bool `json:"Privileged,omitempty" yaml:"Privileged,omitempty" toml:"Privileged,omitempty"` + PublishAllPorts bool `json:"PublishAllPorts,omitempty" yaml:"PublishAllPorts,omitempty" toml:"PublishAllPorts,omitempty"` + ReadonlyRootfs bool `json:"ReadonlyRootfs,omitempty" yaml:"ReadonlyRootfs,omitempty" toml:"ReadonlyRootfs,omitempty"` + AutoRemove bool `json:"AutoRemove,omitempty" yaml:"AutoRemove,omitempty" toml:"AutoRemove,omitempty"` +} + +// NetworkingConfig represents the container's networking configuration for each of its interfaces +// Carries the networking configs specified in the `docker run` and `docker network connect` commands +type NetworkingConfig struct { + EndpointsConfig map[string]*EndpointConfig `json:"EndpointsConfig" yaml:"EndpointsConfig" toml:"EndpointsConfig"` // Endpoint configs for each connecting network +} + +// NoSuchContainer is the error returned when a given container does not exist. +type NoSuchContainer struct { + ID string + Err error +} + +func (err *NoSuchContainer) Error() string { + if err.Err != nil { + return err.Err.Error() + } + return "No such container: " + err.ID +} + +// ContainerAlreadyRunning is the error returned when a given container is +// already running. +type ContainerAlreadyRunning struct { + ID string +} + +func (err *ContainerAlreadyRunning) Error() string { + return "Container already running: " + err.ID +} + +// ContainerNotRunning is the error returned when a given container is not +// running. +type ContainerNotRunning struct { + ID string +} + +func (err *ContainerNotRunning) Error() string { + return "Container not running: " + err.ID +} diff --git a/vendor/github.com/fsouza/go-dockerclient/container_archive.go b/vendor/github.com/fsouza/go-dockerclient/container_archive.go new file mode 100644 index 00000000000..6c7e61c793b --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/container_archive.go @@ -0,0 +1,58 @@ +package docker + +import ( + "context" + "fmt" + "io" + "net/http" + "time" +) + +// UploadToContainerOptions is the set of options that can be used when +// uploading an archive into a container. +// +// See https://goo.gl/g25o7u for more details. +type UploadToContainerOptions struct { + InputStream io.Reader `json:"-" qs:"-"` + Path string `qs:"path"` + NoOverwriteDirNonDir bool `qs:"noOverwriteDirNonDir"` + Context context.Context +} + +// UploadToContainer uploads a tar archive to be extracted to a path in the +// filesystem of the container. +// +// See https://goo.gl/g25o7u for more details. +func (c *Client) UploadToContainer(id string, opts UploadToContainerOptions) error { + url := fmt.Sprintf("/containers/%s/archive?", id) + queryString(opts) + + return c.stream(http.MethodPut, url, streamOptions{ + in: opts.InputStream, + context: opts.Context, + }) +} + +// DownloadFromContainerOptions is the set of options that can be used when +// downloading resources from a container. +// +// See https://goo.gl/W49jxK for more details. +type DownloadFromContainerOptions struct { + OutputStream io.Writer `json:"-" qs:"-"` + Path string `qs:"path"` + InactivityTimeout time.Duration `qs:"-"` + Context context.Context +} + +// DownloadFromContainer downloads a tar archive of files or folders in a container. +// +// See https://goo.gl/W49jxK for more details. +func (c *Client) DownloadFromContainer(id string, opts DownloadFromContainerOptions) error { + url := fmt.Sprintf("/containers/%s/archive?", id) + queryString(opts) + + return c.stream(http.MethodGet, url, streamOptions{ + setRawTerminal: true, + stdout: opts.OutputStream, + inactivityTimeout: opts.InactivityTimeout, + context: opts.Context, + }) +} diff --git a/vendor/github.com/fsouza/go-dockerclient/container_attach.go b/vendor/github.com/fsouza/go-dockerclient/container_attach.go new file mode 100644 index 00000000000..e6742d007ed --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/container_attach.go @@ -0,0 +1,74 @@ +package docker + +import ( + "io" + "net/http" +) + +// AttachToContainerOptions is the set of options that can be used when +// attaching to a container. +// +// See https://goo.gl/JF10Zk for more details. +type AttachToContainerOptions struct { + Container string `qs:"-"` + InputStream io.Reader `qs:"-"` + OutputStream io.Writer `qs:"-"` + ErrorStream io.Writer `qs:"-"` + + // If set, after a successful connect, a sentinel will be sent and then the + // client will block on receive before continuing. + // + // It must be an unbuffered channel. Using a buffered channel can lead + // to unexpected behavior. + Success chan struct{} + + // Override the key sequence for detaching a container. + DetachKeys string + + // Use raw terminal? Usually true when the container contains a TTY. + RawTerminal bool `qs:"-"` + + // Get container logs, sending it to OutputStream. + Logs bool + + // Stream the response? + Stream bool + + // Attach to stdin, and use InputStream. + Stdin bool + + // Attach to stdout, and use OutputStream. + Stdout bool + + // Attach to stderr, and use ErrorStream. + Stderr bool +} + +// AttachToContainer attaches to a container, using the given options. +// +// See https://goo.gl/JF10Zk for more details. +func (c *Client) AttachToContainer(opts AttachToContainerOptions) error { + cw, err := c.AttachToContainerNonBlocking(opts) + if err != nil { + return err + } + return cw.Wait() +} + +// AttachToContainerNonBlocking attaches to a container, using the given options. +// This function does not block. +// +// See https://goo.gl/NKpkFk for more details. +func (c *Client) AttachToContainerNonBlocking(opts AttachToContainerOptions) (CloseWaiter, error) { + if opts.Container == "" { + return nil, &NoSuchContainer{ID: opts.Container} + } + path := "/containers/" + opts.Container + "/attach?" + queryString(opts) + return c.hijack(http.MethodPost, path, hijackOptions{ + success: opts.Success, + setRawTerminal: opts.RawTerminal, + in: opts.InputStream, + stdout: opts.OutputStream, + stderr: opts.ErrorStream, + }) +} diff --git a/vendor/github.com/fsouza/go-dockerclient/container_changes.go b/vendor/github.com/fsouza/go-dockerclient/container_changes.go new file mode 100644 index 00000000000..48835e23152 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/container_changes.go @@ -0,0 +1,28 @@ +package docker + +import ( + "encoding/json" + "errors" + "net/http" +) + +// ContainerChanges returns changes in the filesystem of the given container. +// +// See https://goo.gl/15KKzh for more details. +func (c *Client) ContainerChanges(id string) ([]Change, error) { + path := "/containers/" + id + "/changes" + resp, err := c.do(http.MethodGet, path, doOptions{}) + if err != nil { + var e *Error + if errors.As(err, &e) && e.Status == http.StatusNotFound { + return nil, &NoSuchContainer{ID: id} + } + return nil, err + } + defer resp.Body.Close() + var changes []Change + if err := json.NewDecoder(resp.Body).Decode(&changes); err != nil { + return nil, err + } + return changes, nil +} diff --git a/vendor/github.com/fsouza/go-dockerclient/container_commit.go b/vendor/github.com/fsouza/go-dockerclient/container_commit.go new file mode 100644 index 00000000000..902ba645538 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/container_commit.go @@ -0,0 +1,46 @@ +package docker + +import ( + "context" + "encoding/json" + "errors" + "net/http" +) + +// CommitContainerOptions aggregates parameters to the CommitContainer method. +// +// See https://goo.gl/CzIguf for more details. +type CommitContainerOptions struct { + Container string + Repository string `qs:"repo"` + Tag string + Message string `qs:"comment"` + Author string + Changes []string `qs:"changes"` + Run *Config `qs:"-"` + Context context.Context +} + +// CommitContainer creates a new image from a container's changes. +// +// See https://goo.gl/CzIguf for more details. +func (c *Client) CommitContainer(opts CommitContainerOptions) (*Image, error) { + path := "/commit?" + queryString(opts) + resp, err := c.do(http.MethodPost, path, doOptions{ + data: opts.Run, + context: opts.Context, + }) + if err != nil { + var e *Error + if errors.As(err, &e) && e.Status == http.StatusNotFound { + return nil, &NoSuchContainer{ID: opts.Container} + } + return nil, err + } + defer resp.Body.Close() + var image Image + if err := json.NewDecoder(resp.Body).Decode(&image); err != nil { + return nil, err + } + return &image, nil +} diff --git a/vendor/github.com/fsouza/go-dockerclient/container_copy.go b/vendor/github.com/fsouza/go-dockerclient/container_copy.go new file mode 100644 index 00000000000..c8ffb85c305 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/container_copy.go @@ -0,0 +1,50 @@ +package docker + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" +) + +// CopyFromContainerOptions contains the set of options used for copying +// files from a container. +// +// Deprecated: Use DownloadFromContainerOptions and DownloadFromContainer instead. +type CopyFromContainerOptions struct { + OutputStream io.Writer `json:"-"` + Container string `json:"-"` + Resource string + Context context.Context `json:"-"` +} + +// CopyFromContainer copies files from a container. +// +// Deprecated: Use DownloadFromContainer and DownloadFromContainer instead. +func (c *Client) CopyFromContainer(opts CopyFromContainerOptions) error { + if opts.Container == "" { + return &NoSuchContainer{ID: opts.Container} + } + if c.serverAPIVersion == nil { + c.checkAPIVersion() + } + if c.serverAPIVersion != nil && c.serverAPIVersion.GreaterThanOrEqualTo(apiVersion124) { + return errors.New("go-dockerclient: CopyFromContainer is no longer available in Docker >= 1.12, use DownloadFromContainer instead") + } + url := fmt.Sprintf("/containers/%s/copy", opts.Container) + resp, err := c.do(http.MethodPost, url, doOptions{ + data: opts, + context: opts.Context, + }) + if err != nil { + var e *Error + if errors.As(err, &e) && e.Status == http.StatusNotFound { + return &NoSuchContainer{ID: opts.Container} + } + return err + } + defer resp.Body.Close() + _, err = io.Copy(opts.OutputStream, resp.Body) + return err +} diff --git a/vendor/github.com/fsouza/go-dockerclient/container_create.go b/vendor/github.com/fsouza/go-dockerclient/container_create.go new file mode 100644 index 00000000000..5a5ffe0f032 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/container_create.go @@ -0,0 +1,79 @@ +package docker + +import ( + "context" + "encoding/json" + "errors" + "net/http" + "strings" +) + +// ErrContainerAlreadyExists is the error returned by CreateContainer when the +// container already exists. +var ErrContainerAlreadyExists = errors.New("container already exists") + +// CreateContainerOptions specify parameters to the CreateContainer function. +// +// See https://goo.gl/tyzwVM for more details. +type CreateContainerOptions struct { + Name string + Config *Config `qs:"-"` + HostConfig *HostConfig `qs:"-"` + NetworkingConfig *NetworkingConfig `qs:"-"` + Context context.Context +} + +// CreateContainer creates a new container, returning the container instance, +// or an error in case of failure. +// +// The returned container instance contains only the container ID. To get more +// details about the container after creating it, use InspectContainer. +// +// See https://goo.gl/tyzwVM for more details. +func (c *Client) CreateContainer(opts CreateContainerOptions) (*Container, error) { + path := "/containers/create?" + queryString(opts) + resp, err := c.do( + http.MethodPost, + path, + doOptions{ + data: struct { + *Config + HostConfig *HostConfig `json:"HostConfig,omitempty" yaml:"HostConfig,omitempty" toml:"HostConfig,omitempty"` + NetworkingConfig *NetworkingConfig `json:"NetworkingConfig,omitempty" yaml:"NetworkingConfig,omitempty" toml:"NetworkingConfig,omitempty"` + }{ + opts.Config, + opts.HostConfig, + opts.NetworkingConfig, + }, + context: opts.Context, + }, + ) + + var e *Error + if errors.As(err, &e) { + if e.Status == http.StatusNotFound && strings.Contains(e.Message, "No such image") { + return nil, ErrNoSuchImage + } + if e.Status == http.StatusConflict { + return nil, ErrContainerAlreadyExists + } + // Workaround for 17.09 bug returning 400 instead of 409. + // See https://github.com/moby/moby/issues/35021 + if e.Status == http.StatusBadRequest && strings.Contains(e.Message, "Conflict.") { + return nil, ErrContainerAlreadyExists + } + } + + if err != nil { + return nil, err + } + defer resp.Body.Close() + var container Container + if err := json.NewDecoder(resp.Body).Decode(&container); err != nil { + return nil, err + } + + container.Name = opts.Name + + return &container, nil +} diff --git a/vendor/github.com/fsouza/go-dockerclient/container_export.go b/vendor/github.com/fsouza/go-dockerclient/container_export.go new file mode 100644 index 00000000000..312f8cf1053 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/container_export.go @@ -0,0 +1,37 @@ +package docker + +import ( + "context" + "fmt" + "io" + "net/http" + "time" +) + +// ExportContainerOptions is the set of parameters to the ExportContainer +// method. +// +// See https://goo.gl/yGJCIh for more details. +type ExportContainerOptions struct { + ID string + OutputStream io.Writer + InactivityTimeout time.Duration `qs:"-"` + Context context.Context +} + +// ExportContainer export the contents of container id as tar archive +// and prints the exported contents to stdout. +// +// See https://goo.gl/yGJCIh for more details. +func (c *Client) ExportContainer(opts ExportContainerOptions) error { + if opts.ID == "" { + return &NoSuchContainer{ID: opts.ID} + } + url := fmt.Sprintf("/containers/%s/export", opts.ID) + return c.stream(http.MethodGet, url, streamOptions{ + setRawTerminal: true, + stdout: opts.OutputStream, + inactivityTimeout: opts.InactivityTimeout, + context: opts.Context, + }) +} diff --git a/vendor/github.com/fsouza/go-dockerclient/container_inspect.go b/vendor/github.com/fsouza/go-dockerclient/container_inspect.go new file mode 100644 index 00000000000..48c1e8ea7b9 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/container_inspect.go @@ -0,0 +1,55 @@ +package docker + +import ( + "context" + "encoding/json" + "errors" + "net/http" +) + +// InspectContainer returns information about a container by its ID. +// +// Deprecated: Use InspectContainerWithOptions instead. +func (c *Client) InspectContainer(id string) (*Container, error) { + return c.InspectContainerWithOptions(InspectContainerOptions{ID: id}) +} + +// InspectContainerWithContext returns information about a container by its ID. +// The context object can be used to cancel the inspect request. +// +// Deprecated: Use InspectContainerWithOptions instead. +func (c *Client) InspectContainerWithContext(id string, ctx context.Context) (*Container, error) { + return c.InspectContainerWithOptions(InspectContainerOptions{ID: id, Context: ctx}) +} + +// InspectContainerWithOptions returns information about a container by its ID. +// +// See https://goo.gl/FaI5JT for more details. +func (c *Client) InspectContainerWithOptions(opts InspectContainerOptions) (*Container, error) { + path := "/containers/" + opts.ID + "/json?" + queryString(opts) + resp, err := c.do(http.MethodGet, path, doOptions{ + context: opts.Context, + }) + if err != nil { + var e *Error + if errors.As(err, &e) && e.Status == http.StatusNotFound { + return nil, &NoSuchContainer{ID: opts.ID} + } + return nil, err + } + defer resp.Body.Close() + var container Container + if err := json.NewDecoder(resp.Body).Decode(&container); err != nil { + return nil, err + } + return &container, nil +} + +// InspectContainerOptions specifies parameters for InspectContainerWithOptions. +// +// See https://goo.gl/FaI5JT for more details. +type InspectContainerOptions struct { + Context context.Context + ID string `qs:"-"` + Size bool +} diff --git a/vendor/github.com/fsouza/go-dockerclient/container_kill.go b/vendor/github.com/fsouza/go-dockerclient/container_kill.go new file mode 100644 index 00000000000..600c58f1292 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/container_kill.go @@ -0,0 +1,46 @@ +package docker + +import ( + "context" + "errors" + "net/http" +) + +// KillContainerOptions represents the set of options that can be used in a +// call to KillContainer. +// +// See https://goo.gl/JnTxXZ for more details. +type KillContainerOptions struct { + // The ID of the container. + ID string `qs:"-"` + + // The signal to send to the container. When omitted, Docker server + // will assume SIGKILL. + Signal Signal + Context context.Context +} + +// KillContainer sends a signal to a container, returning an error in case of +// failure. +// +// See https://goo.gl/JnTxXZ for more details. +func (c *Client) KillContainer(opts KillContainerOptions) error { + path := "/containers/" + opts.ID + "/kill" + "?" + queryString(opts) + resp, err := c.do(http.MethodPost, path, doOptions{context: opts.Context}) + if err != nil { + var e *Error + if !errors.As(err, &e) { + return err + } + switch e.Status { + case http.StatusNotFound: + return &NoSuchContainer{ID: opts.ID} + case http.StatusConflict: + return &ContainerNotRunning{ID: opts.ID} + default: + return err + } + } + resp.Body.Close() + return nil +} diff --git a/vendor/github.com/fsouza/go-dockerclient/container_list.go b/vendor/github.com/fsouza/go-dockerclient/container_list.go new file mode 100644 index 00000000000..1dec0e915a0 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/container_list.go @@ -0,0 +1,37 @@ +package docker + +import ( + "context" + "encoding/json" + "net/http" +) + +// ListContainersOptions specify parameters to the ListContainers function. +// +// See https://goo.gl/kaOHGw for more details. +type ListContainersOptions struct { + All bool + Size bool + Limit int + Since string + Before string + Filters map[string][]string + Context context.Context +} + +// ListContainers returns a slice of containers matching the given criteria. +// +// See https://goo.gl/kaOHGw for more details. +func (c *Client) ListContainers(opts ListContainersOptions) ([]APIContainers, error) { + path := "/containers/json?" + queryString(opts) + resp, err := c.do(http.MethodGet, path, doOptions{context: opts.Context}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + var containers []APIContainers + if err := json.NewDecoder(resp.Body).Decode(&containers); err != nil { + return nil, err + } + return containers, nil +} diff --git a/vendor/github.com/fsouza/go-dockerclient/container_logs.go b/vendor/github.com/fsouza/go-dockerclient/container_logs.go new file mode 100644 index 00000000000..0e3f1199cc5 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/container_logs.go @@ -0,0 +1,58 @@ +package docker + +import ( + "context" + "io" + "net/http" + "time" +) + +// LogsOptions represents the set of options used when getting logs from a +// container. +// +// See https://goo.gl/krK0ZH for more details. +type LogsOptions struct { + Context context.Context + Container string `qs:"-"` + OutputStream io.Writer `qs:"-"` + ErrorStream io.Writer `qs:"-"` + InactivityTimeout time.Duration `qs:"-"` + Tail string + + Since int64 + Follow bool + Stdout bool + Stderr bool + Timestamps bool + + // Use raw terminal? Usually true when the container contains a TTY. + RawTerminal bool `qs:"-"` +} + +// Logs gets stdout and stderr logs from the specified container. +// +// When LogsOptions.RawTerminal is set to false, go-dockerclient will multiplex +// the streams and send the containers stdout to LogsOptions.OutputStream, and +// stderr to LogsOptions.ErrorStream. +// +// When LogsOptions.RawTerminal is true, callers will get the raw stream on +// LogsOptions.OutputStream. The caller can use libraries such as dlog +// (github.com/ahmetalpbalkan/dlog). +// +// See https://goo.gl/krK0ZH for more details. +func (c *Client) Logs(opts LogsOptions) error { + if opts.Container == "" { + return &NoSuchContainer{ID: opts.Container} + } + if opts.Tail == "" { + opts.Tail = "all" + } + path := "/containers/" + opts.Container + "/logs?" + queryString(opts) + return c.stream(http.MethodGet, path, streamOptions{ + setRawTerminal: opts.RawTerminal, + stdout: opts.OutputStream, + stderr: opts.ErrorStream, + inactivityTimeout: opts.InactivityTimeout, + context: opts.Context, + }) +} diff --git a/vendor/github.com/fsouza/go-dockerclient/container_pause.go b/vendor/github.com/fsouza/go-dockerclient/container_pause.go new file mode 100644 index 00000000000..7d18b32f9b4 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/container_pause.go @@ -0,0 +1,24 @@ +package docker + +import ( + "errors" + "fmt" + "net/http" +) + +// PauseContainer pauses the given container. +// +// See https://goo.gl/D1Yaii for more details. +func (c *Client) PauseContainer(id string) error { + path := fmt.Sprintf("/containers/%s/pause", id) + resp, err := c.do(http.MethodPost, path, doOptions{}) + if err != nil { + var e *Error + if errors.As(err, &e) && e.Status == http.StatusNotFound { + return &NoSuchContainer{ID: id} + } + return err + } + resp.Body.Close() + return nil +} diff --git a/vendor/github.com/fsouza/go-dockerclient/container_prune.go b/vendor/github.com/fsouza/go-dockerclient/container_prune.go new file mode 100644 index 00000000000..3f2bdc6a29a --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/container_prune.go @@ -0,0 +1,40 @@ +package docker + +import ( + "context" + "encoding/json" + "net/http" +) + +// PruneContainersOptions specify parameters to the PruneContainers function. +// +// See https://goo.gl/wnkgDT for more details. +type PruneContainersOptions struct { + Filters map[string][]string + Context context.Context +} + +// PruneContainersResults specify results from the PruneContainers function. +// +// See https://goo.gl/wnkgDT for more details. +type PruneContainersResults struct { + ContainersDeleted []string + SpaceReclaimed int64 +} + +// PruneContainers deletes containers which are stopped. +// +// See https://goo.gl/wnkgDT for more details. +func (c *Client) PruneContainers(opts PruneContainersOptions) (*PruneContainersResults, error) { + path := "/containers/prune?" + queryString(opts) + resp, err := c.do(http.MethodPost, path, doOptions{context: opts.Context}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + var results PruneContainersResults + if err := json.NewDecoder(resp.Body).Decode(&results); err != nil { + return nil, err + } + return &results, nil +} diff --git a/vendor/github.com/fsouza/go-dockerclient/container_remove.go b/vendor/github.com/fsouza/go-dockerclient/container_remove.go new file mode 100644 index 00000000000..dbe0907f0b5 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/container_remove.go @@ -0,0 +1,41 @@ +package docker + +import ( + "context" + "errors" + "net/http" +) + +// RemoveContainerOptions encapsulates options to remove a container. +// +// See https://goo.gl/hL5IPC for more details. +type RemoveContainerOptions struct { + // The ID of the container. + ID string `qs:"-"` + + // A flag that indicates whether Docker should remove the volumes + // associated to the container. + RemoveVolumes bool `qs:"v"` + + // A flag that indicates whether Docker should remove the container + // even if it is currently running. + Force bool + Context context.Context +} + +// RemoveContainer removes a container, returning an error in case of failure. +// +// See https://goo.gl/hL5IPC for more details. +func (c *Client) RemoveContainer(opts RemoveContainerOptions) error { + path := "/containers/" + opts.ID + "?" + queryString(opts) + resp, err := c.do(http.MethodDelete, path, doOptions{context: opts.Context}) + if err != nil { + var e *Error + if errors.As(err, &e) && e.Status == http.StatusNotFound { + return &NoSuchContainer{ID: opts.ID} + } + return err + } + resp.Body.Close() + return nil +} diff --git a/vendor/github.com/fsouza/go-dockerclient/container_rename.go b/vendor/github.com/fsouza/go-dockerclient/container_rename.go new file mode 100644 index 00000000000..893c423bdf3 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/container_rename.go @@ -0,0 +1,33 @@ +package docker + +import ( + "context" + "fmt" + "net/http" +) + +// RenameContainerOptions specify parameters to the RenameContainer function. +// +// See https://goo.gl/46inai for more details. +type RenameContainerOptions struct { + // ID of container to rename + ID string `qs:"-"` + + // New name + Name string `json:"name,omitempty" yaml:"name,omitempty"` + Context context.Context +} + +// RenameContainer updates and existing containers name +// +// See https://goo.gl/46inai for more details. +func (c *Client) RenameContainer(opts RenameContainerOptions) error { + resp, err := c.do(http.MethodPost, fmt.Sprintf("/containers/"+opts.ID+"/rename?%s", queryString(opts)), doOptions{ + context: opts.Context, + }) + if err != nil { + return err + } + resp.Body.Close() + return nil +} diff --git a/vendor/github.com/fsouza/go-dockerclient/container_resize.go b/vendor/github.com/fsouza/go-dockerclient/container_resize.go new file mode 100644 index 00000000000..3445be6b577 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/container_resize.go @@ -0,0 +1,22 @@ +package docker + +import ( + "net/http" + "net/url" + "strconv" +) + +// ResizeContainerTTY resizes the terminal to the given height and width. +// +// See https://goo.gl/FImjeq for more details. +func (c *Client) ResizeContainerTTY(id string, height, width int) error { + params := make(url.Values) + params.Set("h", strconv.Itoa(height)) + params.Set("w", strconv.Itoa(width)) + resp, err := c.do(http.MethodPost, "/containers/"+id+"/resize?"+params.Encode(), doOptions{}) + if err != nil { + return err + } + resp.Body.Close() + return nil +} diff --git a/vendor/github.com/fsouza/go-dockerclient/container_restart.go b/vendor/github.com/fsouza/go-dockerclient/container_restart.go new file mode 100644 index 00000000000..183cbac0ffb --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/container_restart.go @@ -0,0 +1,64 @@ +package docker + +import ( + "errors" + "fmt" + "net/http" +) + +// RestartPolicy represents the policy for automatically restarting a container. +// +// Possible values are: +// +// - always: the docker daemon will always restart the container +// - on-failure: the docker daemon will restart the container on failures, at +// most MaximumRetryCount times +// - unless-stopped: the docker daemon will always restart the container except +// when user has manually stopped the container +// - no: the docker daemon will not restart the container automatically +type RestartPolicy struct { + Name string `json:"Name,omitempty" yaml:"Name,omitempty" toml:"Name,omitempty"` + MaximumRetryCount int `json:"MaximumRetryCount,omitempty" yaml:"MaximumRetryCount,omitempty" toml:"MaximumRetryCount,omitempty"` +} + +// AlwaysRestart returns a restart policy that tells the Docker daemon to +// always restart the container. +func AlwaysRestart() RestartPolicy { + return RestartPolicy{Name: "always"} +} + +// RestartOnFailure returns a restart policy that tells the Docker daemon to +// restart the container on failures, trying at most maxRetry times. +func RestartOnFailure(maxRetry int) RestartPolicy { + return RestartPolicy{Name: "on-failure", MaximumRetryCount: maxRetry} +} + +// RestartUnlessStopped returns a restart policy that tells the Docker daemon to +// always restart the container except when user has manually stopped the container. +func RestartUnlessStopped() RestartPolicy { + return RestartPolicy{Name: "unless-stopped"} +} + +// NeverRestart returns a restart policy that tells the Docker daemon to never +// restart the container on failures. +func NeverRestart() RestartPolicy { + return RestartPolicy{Name: "no"} +} + +// RestartContainer stops a container, killing it after the given timeout (in +// seconds), during the stop process. +// +// See https://goo.gl/MrAKQ5 for more details. +func (c *Client) RestartContainer(id string, timeout uint) error { + path := fmt.Sprintf("/containers/%s/restart?t=%d", id, timeout) + resp, err := c.do(http.MethodPost, path, doOptions{}) + if err != nil { + var e *Error + if errors.As(err, &e) && e.Status == http.StatusNotFound { + return &NoSuchContainer{ID: id} + } + return err + } + resp.Body.Close() + return nil +} diff --git a/vendor/github.com/fsouza/go-dockerclient/container_start.go b/vendor/github.com/fsouza/go-dockerclient/container_start.go new file mode 100644 index 00000000000..0911eaab415 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/container_start.go @@ -0,0 +1,57 @@ +package docker + +import ( + "context" + "errors" + "net/http" +) + +// StartContainer starts a container, returning an error in case of failure. +// +// Passing the HostConfig to this method has been deprecated in Docker API 1.22 +// (Docker Engine 1.10.x) and totally removed in Docker API 1.24 (Docker Engine +// 1.12.x). The client will ignore the parameter when communicating with Docker +// API 1.24 or greater. +// +// See https://goo.gl/fbOSZy for more details. +func (c *Client) StartContainer(id string, hostConfig *HostConfig) error { + return c.startContainer(id, hostConfig, doOptions{}) +} + +// StartContainerWithContext starts a container, returning an error in case of +// failure. The context can be used to cancel the outstanding start container +// request. +// +// Passing the HostConfig to this method has been deprecated in Docker API 1.22 +// (Docker Engine 1.10.x) and totally removed in Docker API 1.24 (Docker Engine +// 1.12.x). The client will ignore the parameter when communicating with Docker +// API 1.24 or greater. +// +// See https://goo.gl/fbOSZy for more details. +func (c *Client) StartContainerWithContext(id string, hostConfig *HostConfig, ctx context.Context) error { + return c.startContainer(id, hostConfig, doOptions{context: ctx}) +} + +func (c *Client) startContainer(id string, hostConfig *HostConfig, opts doOptions) error { + path := "/containers/" + id + "/start" + if c.serverAPIVersion == nil { + c.checkAPIVersion() + } + if c.serverAPIVersion != nil && c.serverAPIVersion.LessThan(apiVersion124) { + opts.data = hostConfig + opts.forceJSON = true + } + resp, err := c.do(http.MethodPost, path, opts) + if err != nil { + var e *Error + if errors.As(err, &e) && e.Status == http.StatusNotFound { + return &NoSuchContainer{ID: id, Err: err} + } + return err + } + defer resp.Body.Close() + if resp.StatusCode == http.StatusNotModified { + return &ContainerAlreadyRunning{ID: id} + } + return nil +} diff --git a/vendor/github.com/fsouza/go-dockerclient/container_stats.go b/vendor/github.com/fsouza/go-dockerclient/container_stats.go new file mode 100644 index 00000000000..ee2499a520a --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/container_stats.go @@ -0,0 +1,215 @@ +package docker + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "time" +) + +// Stats represents container statistics, returned by /containers//stats. +// +// See https://goo.gl/Dk3Xio for more details. +type Stats struct { + Read time.Time `json:"read,omitempty" yaml:"read,omitempty" toml:"read,omitempty"` + PreRead time.Time `json:"preread,omitempty" yaml:"preread,omitempty" toml:"preread,omitempty"` + NumProcs uint32 `json:"num_procs" yaml:"num_procs" toml:"num_procs"` + PidsStats struct { + Current uint64 `json:"current,omitempty" yaml:"current,omitempty"` + } `json:"pids_stats,omitempty" yaml:"pids_stats,omitempty" toml:"pids_stats,omitempty"` + Network NetworkStats `json:"network,omitempty" yaml:"network,omitempty" toml:"network,omitempty"` + Networks map[string]NetworkStats `json:"networks,omitempty" yaml:"networks,omitempty" toml:"networks,omitempty"` + MemoryStats struct { + Stats struct { + TotalPgmafault uint64 `json:"total_pgmafault,omitempty" yaml:"total_pgmafault,omitempty" toml:"total_pgmafault,omitempty"` + Cache uint64 `json:"cache,omitempty" yaml:"cache,omitempty" toml:"cache,omitempty"` + MappedFile uint64 `json:"mapped_file,omitempty" yaml:"mapped_file,omitempty" toml:"mapped_file,omitempty"` + TotalInactiveFile uint64 `json:"total_inactive_file,omitempty" yaml:"total_inactive_file,omitempty" toml:"total_inactive_file,omitempty"` + Pgpgout uint64 `json:"pgpgout,omitempty" yaml:"pgpgout,omitempty" toml:"pgpgout,omitempty"` + Rss uint64 `json:"rss,omitempty" yaml:"rss,omitempty" toml:"rss,omitempty"` + TotalMappedFile uint64 `json:"total_mapped_file,omitempty" yaml:"total_mapped_file,omitempty" toml:"total_mapped_file,omitempty"` + Writeback uint64 `json:"writeback,omitempty" yaml:"writeback,omitempty" toml:"writeback,omitempty"` + Unevictable uint64 `json:"unevictable,omitempty" yaml:"unevictable,omitempty" toml:"unevictable,omitempty"` + Pgpgin uint64 `json:"pgpgin,omitempty" yaml:"pgpgin,omitempty" toml:"pgpgin,omitempty"` + TotalUnevictable uint64 `json:"total_unevictable,omitempty" yaml:"total_unevictable,omitempty" toml:"total_unevictable,omitempty"` + Pgmajfault uint64 `json:"pgmajfault,omitempty" yaml:"pgmajfault,omitempty" toml:"pgmajfault,omitempty"` + TotalRss uint64 `json:"total_rss,omitempty" yaml:"total_rss,omitempty" toml:"total_rss,omitempty"` + TotalRssHuge uint64 `json:"total_rss_huge,omitempty" yaml:"total_rss_huge,omitempty" toml:"total_rss_huge,omitempty"` + TotalWriteback uint64 `json:"total_writeback,omitempty" yaml:"total_writeback,omitempty" toml:"total_writeback,omitempty"` + TotalInactiveAnon uint64 `json:"total_inactive_anon,omitempty" yaml:"total_inactive_anon,omitempty" toml:"total_inactive_anon,omitempty"` + RssHuge uint64 `json:"rss_huge,omitempty" yaml:"rss_huge,omitempty" toml:"rss_huge,omitempty"` + HierarchicalMemoryLimit uint64 `json:"hierarchical_memory_limit,omitempty" yaml:"hierarchical_memory_limit,omitempty" toml:"hierarchical_memory_limit,omitempty"` + TotalPgfault uint64 `json:"total_pgfault,omitempty" yaml:"total_pgfault,omitempty" toml:"total_pgfault,omitempty"` + TotalActiveFile uint64 `json:"total_active_file,omitempty" yaml:"total_active_file,omitempty" toml:"total_active_file,omitempty"` + ActiveAnon uint64 `json:"active_anon,omitempty" yaml:"active_anon,omitempty" toml:"active_anon,omitempty"` + TotalActiveAnon uint64 `json:"total_active_anon,omitempty" yaml:"total_active_anon,omitempty" toml:"total_active_anon,omitempty"` + TotalPgpgout uint64 `json:"total_pgpgout,omitempty" yaml:"total_pgpgout,omitempty" toml:"total_pgpgout,omitempty"` + TotalCache uint64 `json:"total_cache,omitempty" yaml:"total_cache,omitempty" toml:"total_cache,omitempty"` + InactiveAnon uint64 `json:"inactive_anon,omitempty" yaml:"inactive_anon,omitempty" toml:"inactive_anon,omitempty"` + ActiveFile uint64 `json:"active_file,omitempty" yaml:"active_file,omitempty" toml:"active_file,omitempty"` + Pgfault uint64 `json:"pgfault,omitempty" yaml:"pgfault,omitempty" toml:"pgfault,omitempty"` + InactiveFile uint64 `json:"inactive_file,omitempty" yaml:"inactive_file,omitempty" toml:"inactive_file,omitempty"` + TotalPgpgin uint64 `json:"total_pgpgin,omitempty" yaml:"total_pgpgin,omitempty" toml:"total_pgpgin,omitempty"` + HierarchicalMemswLimit uint64 `json:"hierarchical_memsw_limit,omitempty" yaml:"hierarchical_memsw_limit,omitempty" toml:"hierarchical_memsw_limit,omitempty"` + Swap uint64 `json:"swap,omitempty" yaml:"swap,omitempty" toml:"swap,omitempty"` + } `json:"stats,omitempty" yaml:"stats,omitempty" toml:"stats,omitempty"` + MaxUsage uint64 `json:"max_usage,omitempty" yaml:"max_usage,omitempty" toml:"max_usage,omitempty"` + Usage uint64 `json:"usage,omitempty" yaml:"usage,omitempty" toml:"usage,omitempty"` + Failcnt uint64 `json:"failcnt,omitempty" yaml:"failcnt,omitempty" toml:"failcnt,omitempty"` + Limit uint64 `json:"limit,omitempty" yaml:"limit,omitempty" toml:"limit,omitempty"` + Commit uint64 `json:"commitbytes,omitempty" yaml:"commitbytes,omitempty" toml:"privateworkingset,omitempty"` + CommitPeak uint64 `json:"commitpeakbytes,omitempty" yaml:"commitpeakbytes,omitempty" toml:"commitpeakbytes,omitempty"` + PrivateWorkingSet uint64 `json:"privateworkingset,omitempty" yaml:"privateworkingset,omitempty" toml:"privateworkingset,omitempty"` + } `json:"memory_stats,omitempty" yaml:"memory_stats,omitempty" toml:"memory_stats,omitempty"` + BlkioStats struct { + IOServiceBytesRecursive []BlkioStatsEntry `json:"io_service_bytes_recursive,omitempty" yaml:"io_service_bytes_recursive,omitempty" toml:"io_service_bytes_recursive,omitempty"` + IOServicedRecursive []BlkioStatsEntry `json:"io_serviced_recursive,omitempty" yaml:"io_serviced_recursive,omitempty" toml:"io_serviced_recursive,omitempty"` + IOQueueRecursive []BlkioStatsEntry `json:"io_queue_recursive,omitempty" yaml:"io_queue_recursive,omitempty" toml:"io_queue_recursive,omitempty"` + IOServiceTimeRecursive []BlkioStatsEntry `json:"io_service_time_recursive,omitempty" yaml:"io_service_time_recursive,omitempty" toml:"io_service_time_recursive,omitempty"` + IOWaitTimeRecursive []BlkioStatsEntry `json:"io_wait_time_recursive,omitempty" yaml:"io_wait_time_recursive,omitempty" toml:"io_wait_time_recursive,omitempty"` + IOMergedRecursive []BlkioStatsEntry `json:"io_merged_recursive,omitempty" yaml:"io_merged_recursive,omitempty" toml:"io_merged_recursive,omitempty"` + IOTimeRecursive []BlkioStatsEntry `json:"io_time_recursive,omitempty" yaml:"io_time_recursive,omitempty" toml:"io_time_recursive,omitempty"` + SectorsRecursive []BlkioStatsEntry `json:"sectors_recursive,omitempty" yaml:"sectors_recursive,omitempty" toml:"sectors_recursive,omitempty"` + } `json:"blkio_stats,omitempty" yaml:"blkio_stats,omitempty" toml:"blkio_stats,omitempty"` + CPUStats CPUStats `json:"cpu_stats,omitempty" yaml:"cpu_stats,omitempty" toml:"cpu_stats,omitempty"` + PreCPUStats CPUStats `json:"precpu_stats,omitempty"` + StorageStats struct { + ReadCountNormalized uint64 `json:"read_count_normalized,omitempty" yaml:"read_count_normalized,omitempty" toml:"read_count_normalized,omitempty"` + ReadSizeBytes uint64 `json:"read_size_bytes,omitempty" yaml:"read_size_bytes,omitempty" toml:"read_size_bytes,omitempty"` + WriteCountNormalized uint64 `json:"write_count_normalized,omitempty" yaml:"write_count_normalized,omitempty" toml:"write_count_normalized,omitempty"` + WriteSizeBytes uint64 `json:"write_size_bytes,omitempty" yaml:"write_size_bytes,omitempty" toml:"write_size_bytes,omitempty"` + } `json:"storage_stats,omitempty" yaml:"storage_stats,omitempty" toml:"storage_stats,omitempty"` +} + +// NetworkStats is a stats entry for network stats +type NetworkStats struct { + RxDropped uint64 `json:"rx_dropped,omitempty" yaml:"rx_dropped,omitempty" toml:"rx_dropped,omitempty"` + RxBytes uint64 `json:"rx_bytes,omitempty" yaml:"rx_bytes,omitempty" toml:"rx_bytes,omitempty"` + RxErrors uint64 `json:"rx_errors,omitempty" yaml:"rx_errors,omitempty" toml:"rx_errors,omitempty"` + TxPackets uint64 `json:"tx_packets,omitempty" yaml:"tx_packets,omitempty" toml:"tx_packets,omitempty"` + TxDropped uint64 `json:"tx_dropped,omitempty" yaml:"tx_dropped,omitempty" toml:"tx_dropped,omitempty"` + RxPackets uint64 `json:"rx_packets,omitempty" yaml:"rx_packets,omitempty" toml:"rx_packets,omitempty"` + TxErrors uint64 `json:"tx_errors,omitempty" yaml:"tx_errors,omitempty" toml:"tx_errors,omitempty"` + TxBytes uint64 `json:"tx_bytes,omitempty" yaml:"tx_bytes,omitempty" toml:"tx_bytes,omitempty"` +} + +// CPUStats is a stats entry for cpu stats +type CPUStats struct { + CPUUsage struct { + PercpuUsage []uint64 `json:"percpu_usage,omitempty" yaml:"percpu_usage,omitempty" toml:"percpu_usage,omitempty"` + UsageInUsermode uint64 `json:"usage_in_usermode,omitempty" yaml:"usage_in_usermode,omitempty" toml:"usage_in_usermode,omitempty"` + TotalUsage uint64 `json:"total_usage,omitempty" yaml:"total_usage,omitempty" toml:"total_usage,omitempty"` + UsageInKernelmode uint64 `json:"usage_in_kernelmode,omitempty" yaml:"usage_in_kernelmode,omitempty" toml:"usage_in_kernelmode,omitempty"` + } `json:"cpu_usage,omitempty" yaml:"cpu_usage,omitempty" toml:"cpu_usage,omitempty"` + SystemCPUUsage uint64 `json:"system_cpu_usage,omitempty" yaml:"system_cpu_usage,omitempty" toml:"system_cpu_usage,omitempty"` + OnlineCPUs uint64 `json:"online_cpus,omitempty" yaml:"online_cpus,omitempty" toml:"online_cpus,omitempty"` + ThrottlingData struct { + Periods uint64 `json:"periods,omitempty"` + ThrottledPeriods uint64 `json:"throttled_periods,omitempty"` + ThrottledTime uint64 `json:"throttled_time,omitempty"` + } `json:"throttling_data,omitempty" yaml:"throttling_data,omitempty" toml:"throttling_data,omitempty"` +} + +// BlkioStatsEntry is a stats entry for blkio_stats +type BlkioStatsEntry struct { + Major uint64 `json:"major,omitempty" yaml:"major,omitempty" toml:"major,omitempty"` + Minor uint64 `json:"minor,omitempty" yaml:"minor,omitempty" toml:"minor,omitempty"` + Op string `json:"op,omitempty" yaml:"op,omitempty" toml:"op,omitempty"` + Value uint64 `json:"value,omitempty" yaml:"value,omitempty" toml:"value,omitempty"` +} + +// StatsOptions specify parameters to the Stats function. +// +// See https://goo.gl/Dk3Xio for more details. +type StatsOptions struct { + ID string + Stats chan<- *Stats + Stream bool + // A flag that enables stopping the stats operation + Done <-chan bool + // Initial connection timeout + Timeout time.Duration + // Timeout with no data is received, it's reset every time new data + // arrives + InactivityTimeout time.Duration `qs:"-"` + Context context.Context +} + +// Stats sends container statistics for the given container to the given channel. +// +// This function is blocking, similar to a streaming call for logs, and should be run +// on a separate goroutine from the caller. Note that this function will block until +// the given container is removed, not just exited. When finished, this function +// will close the given channel. Alternatively, function can be stopped by +// signaling on the Done channel. +// +// See https://goo.gl/Dk3Xio for more details. +func (c *Client) Stats(opts StatsOptions) (retErr error) { + errC := make(chan error, 1) + readCloser, writeCloser := io.Pipe() + + defer func() { + close(opts.Stats) + + if err := <-errC; err != nil && retErr == nil { + retErr = err + } + + if err := readCloser.Close(); err != nil && retErr == nil { + retErr = err + } + }() + + reqSent := make(chan struct{}) + go func() { + defer close(errC) + err := c.stream(http.MethodGet, fmt.Sprintf("/containers/%s/stats?stream=%v", opts.ID, opts.Stream), streamOptions{ + rawJSONStream: true, + useJSONDecoder: true, + stdout: writeCloser, + timeout: opts.Timeout, + inactivityTimeout: opts.InactivityTimeout, + context: opts.Context, + reqSent: reqSent, + }) + if err != nil { + var dockerError *Error + if errors.As(err, &dockerError) { + if dockerError.Status == http.StatusNotFound { + err = &NoSuchContainer{ID: opts.ID} + } + } + } + if closeErr := writeCloser.Close(); closeErr != nil && err == nil { + err = closeErr + } + errC <- err + }() + + quit := make(chan struct{}) + defer close(quit) + go func() { + // block here waiting for the signal to stop function + select { + case <-opts.Done: + readCloser.Close() + case <-quit: + return + } + }() + + decoder := json.NewDecoder(readCloser) + stats := new(Stats) + <-reqSent + for err := decoder.Decode(stats); !errors.Is(err, io.EOF); err = decoder.Decode(stats) { + if err != nil { + return err + } + opts.Stats <- stats + stats = new(Stats) + } + return nil +} diff --git a/vendor/github.com/fsouza/go-dockerclient/container_stop.go b/vendor/github.com/fsouza/go-dockerclient/container_stop.go new file mode 100644 index 00000000000..43d98987413 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/container_stop.go @@ -0,0 +1,42 @@ +package docker + +import ( + "context" + "errors" + "fmt" + "net/http" +) + +// StopContainer stops a container, killing it after the given timeout (in +// seconds). +// +// See https://goo.gl/R9dZcV for more details. +func (c *Client) StopContainer(id string, timeout uint) error { + return c.stopContainer(id, timeout, doOptions{}) +} + +// StopContainerWithContext stops a container, killing it after the given +// timeout (in seconds). The context can be used to cancel the stop +// container request. +// +// See https://goo.gl/R9dZcV for more details. +func (c *Client) StopContainerWithContext(id string, timeout uint, ctx context.Context) error { + return c.stopContainer(id, timeout, doOptions{context: ctx}) +} + +func (c *Client) stopContainer(id string, timeout uint, opts doOptions) error { + path := fmt.Sprintf("/containers/%s/stop?t=%d", id, timeout) + resp, err := c.do(http.MethodPost, path, opts) + if err != nil { + var e *Error + if errors.As(err, &e) && e.Status == http.StatusNotFound { + return &NoSuchContainer{ID: id} + } + return err + } + defer resp.Body.Close() + if resp.StatusCode == http.StatusNotModified { + return &ContainerNotRunning{ID: id} + } + return nil +} diff --git a/vendor/github.com/fsouza/go-dockerclient/container_top.go b/vendor/github.com/fsouza/go-dockerclient/container_top.go new file mode 100644 index 00000000000..0aec655fb4d --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/container_top.go @@ -0,0 +1,40 @@ +package docker + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" +) + +// TopResult represents the list of processes running in a container, as +// returned by /containers//top. +// +// See https://goo.gl/FLwpPl for more details. +type TopResult struct { + Titles []string + Processes [][]string +} + +// TopContainer returns processes running inside a container +// +// See https://goo.gl/FLwpPl for more details. +func (c *Client) TopContainer(id string, psArgs string) (TopResult, error) { + var args string + var result TopResult + if psArgs != "" { + args = fmt.Sprintf("?ps_args=%s", psArgs) + } + path := fmt.Sprintf("/containers/%s/top%s", id, args) + resp, err := c.do(http.MethodGet, path, doOptions{}) + if err != nil { + var e *Error + if errors.As(err, &e) && e.Status == http.StatusNotFound { + return result, &NoSuchContainer{ID: id} + } + return result, err + } + defer resp.Body.Close() + err = json.NewDecoder(resp.Body).Decode(&result) + return result, err +} diff --git a/vendor/github.com/fsouza/go-dockerclient/container_unpause.go b/vendor/github.com/fsouza/go-dockerclient/container_unpause.go new file mode 100644 index 00000000000..8f3adc34b8f --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/container_unpause.go @@ -0,0 +1,24 @@ +package docker + +import ( + "errors" + "fmt" + "net/http" +) + +// UnpauseContainer unpauses the given container. +// +// See https://goo.gl/sZ2faO for more details. +func (c *Client) UnpauseContainer(id string) error { + path := fmt.Sprintf("/containers/%s/unpause", id) + resp, err := c.do(http.MethodPost, path, doOptions{}) + if err != nil { + var e *Error + if errors.As(err, &e) && e.Status == http.StatusNotFound { + return &NoSuchContainer{ID: id} + } + return err + } + resp.Body.Close() + return nil +} diff --git a/vendor/github.com/fsouza/go-dockerclient/container_update.go b/vendor/github.com/fsouza/go-dockerclient/container_update.go new file mode 100644 index 00000000000..e8de21365b9 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/container_update.go @@ -0,0 +1,43 @@ +package docker + +import ( + "context" + "fmt" + "net/http" +) + +// UpdateContainerOptions specify parameters to the UpdateContainer function. +// +// See https://goo.gl/Y6fXUy for more details. +type UpdateContainerOptions struct { + BlkioWeight int `json:"BlkioWeight"` + CPUShares int `json:"CpuShares"` + CPUPeriod int `json:"CpuPeriod"` + CPURealtimePeriod int64 `json:"CpuRealtimePeriod"` + CPURealtimeRuntime int64 `json:"CpuRealtimeRuntime"` + CPUQuota int `json:"CpuQuota"` + CpusetCpus string `json:"CpusetCpus"` + CpusetMems string `json:"CpusetMems"` + Memory int `json:"Memory"` + MemorySwap int `json:"MemorySwap"` + MemoryReservation int `json:"MemoryReservation"` + KernelMemory int `json:"KernelMemory"` + RestartPolicy RestartPolicy `json:"RestartPolicy,omitempty"` + Context context.Context +} + +// UpdateContainer updates the container at ID with the options +// +// See https://goo.gl/Y6fXUy for more details. +func (c *Client) UpdateContainer(id string, opts UpdateContainerOptions) error { + resp, err := c.do(http.MethodPost, fmt.Sprintf("/containers/"+id+"/update"), doOptions{ + data: opts, + forceJSON: true, + context: opts.Context, + }) + if err != nil { + return err + } + defer resp.Body.Close() + return nil +} diff --git a/vendor/github.com/fsouza/go-dockerclient/container_wait.go b/vendor/github.com/fsouza/go-dockerclient/container_wait.go new file mode 100644 index 00000000000..96f0c25f430 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/container_wait.go @@ -0,0 +1,42 @@ +package docker + +import ( + "context" + "encoding/json" + "errors" + "net/http" +) + +// WaitContainer blocks until the given container stops, return the exit code +// of the container status. +// +// See https://goo.gl/4AGweZ for more details. +func (c *Client) WaitContainer(id string) (int, error) { + return c.waitContainer(id, doOptions{}) +} + +// WaitContainerWithContext blocks until the given container stops, return the exit code +// of the container status. The context object can be used to cancel the +// inspect request. +// +// See https://goo.gl/4AGweZ for more details. +func (c *Client) WaitContainerWithContext(id string, ctx context.Context) (int, error) { + return c.waitContainer(id, doOptions{context: ctx}) +} + +func (c *Client) waitContainer(id string, opts doOptions) (int, error) { + resp, err := c.do(http.MethodPost, "/containers/"+id+"/wait", opts) + if err != nil { + var e *Error + if errors.As(err, &e) && e.Status == http.StatusNotFound { + return 0, &NoSuchContainer{ID: id} + } + return 0, err + } + defer resp.Body.Close() + var r struct{ StatusCode int } + if err := json.NewDecoder(resp.Body).Decode(&r); err != nil { + return 0, err + } + return r.StatusCode, nil +} diff --git a/vendor/github.com/fsouza/go-dockerclient/distribution.go b/vendor/github.com/fsouza/go-dockerclient/distribution.go new file mode 100644 index 00000000000..6e5e12f7dd3 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/distribution.go @@ -0,0 +1,27 @@ +// Copyright 2017 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "encoding/json" + "net/http" + + "github.com/docker/docker/api/types/registry" +) + +// InspectDistribution returns image digest and platform information by contacting the registry +func (c *Client) InspectDistribution(name string) (*registry.DistributionInspect, error) { + path := "/distribution/" + name + "/json" + resp, err := c.do(http.MethodGet, path, doOptions{}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + var distributionInspect registry.DistributionInspect + if err := json.NewDecoder(resp.Body).Decode(&distributionInspect); err != nil { + return nil, err + } + return &distributionInspect, nil +} diff --git a/vendor/github.com/fsouza/go-dockerclient/env.go b/vendor/github.com/fsouza/go-dockerclient/env.go new file mode 100644 index 00000000000..0f2e72f1182 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/env.go @@ -0,0 +1,172 @@ +// Copyright 2014 Docker authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the DOCKER-LICENSE file. + +package docker + +import ( + "encoding/json" + "fmt" + "io" + "strconv" + "strings" +) + +// Env represents a list of key-pair represented in the form KEY=VALUE. +type Env []string + +// Get returns the string value of the given key. +func (env *Env) Get(key string) (value string) { + return env.Map()[key] +} + +// Exists checks whether the given key is defined in the internal Env +// representation. +func (env *Env) Exists(key string) bool { + _, exists := env.Map()[key] + return exists +} + +// GetBool returns a boolean representation of the given key. The key is false +// whenever its value if 0, no, false, none or an empty string. Any other value +// will be interpreted as true. +func (env *Env) GetBool(key string) (value bool) { + s := strings.ToLower(strings.Trim(env.Get(key), " \t")) + if s == "" || s == "0" || s == "no" || s == "false" || s == "none" { + return false + } + return true +} + +// SetBool defines a boolean value to the given key. +func (env *Env) SetBool(key string, value bool) { + if value { + env.Set(key, "1") + } else { + env.Set(key, "0") + } +} + +// GetInt returns the value of the provided key, converted to int. +// +// It the value cannot be represented as an integer, it returns -1. +func (env *Env) GetInt(key string) int { + return int(env.GetInt64(key)) +} + +// SetInt defines an integer value to the given key. +func (env *Env) SetInt(key string, value int) { + env.Set(key, strconv.Itoa(value)) +} + +// GetInt64 returns the value of the provided key, converted to int64. +// +// It the value cannot be represented as an integer, it returns -1. +func (env *Env) GetInt64(key string) int64 { + s := strings.Trim(env.Get(key), " \t") + val, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return -1 + } + return val +} + +// SetInt64 defines an integer (64-bit wide) value to the given key. +func (env *Env) SetInt64(key string, value int64) { + env.Set(key, strconv.FormatInt(value, 10)) +} + +// GetJSON unmarshals the value of the provided key in the provided iface. +// +// iface is a value that can be provided to the json.Unmarshal function. +func (env *Env) GetJSON(key string, iface interface{}) error { + sval := env.Get(key) + if sval == "" { + return nil + } + return json.Unmarshal([]byte(sval), iface) +} + +// SetJSON marshals the given value to JSON format and stores it using the +// provided key. +func (env *Env) SetJSON(key string, value interface{}) error { + sval, err := json.Marshal(value) + if err != nil { + return err + } + env.Set(key, string(sval)) + return nil +} + +// GetList returns a list of strings matching the provided key. It handles the +// list as a JSON representation of a list of strings. +// +// If the given key matches to a single string, it will return a list +// containing only the value that matches the key. +func (env *Env) GetList(key string) []string { + sval := env.Get(key) + if sval == "" { + return nil + } + var l []string + if err := json.Unmarshal([]byte(sval), &l); err != nil { + l = append(l, sval) + } + return l +} + +// SetList stores the given list in the provided key, after serializing it to +// JSON format. +func (env *Env) SetList(key string, value []string) error { + return env.SetJSON(key, value) +} + +// Set defines the value of a key to the given string. +func (env *Env) Set(key, value string) { + *env = append(*env, key+"="+value) +} + +// Decode decodes `src` as a json dictionary, and adds each decoded key-value +// pair to the environment. +// +// If `src` cannot be decoded as a json dictionary, an error is returned. +func (env *Env) Decode(src io.Reader) error { + m := make(map[string]interface{}) + if err := json.NewDecoder(src).Decode(&m); err != nil { + return err + } + for k, v := range m { + env.SetAuto(k, v) + } + return nil +} + +// SetAuto will try to define the Set* method to call based on the given value. +func (env *Env) SetAuto(key string, value interface{}) { + if fval, ok := value.(float64); ok { + env.SetInt64(key, int64(fval)) + } else if sval, ok := value.(string); ok { + env.Set(key, sval) + } else if val, err := json.Marshal(value); err == nil { + env.Set(key, string(val)) + } else { + env.Set(key, fmt.Sprintf("%v", value)) + } +} + +// Map returns the map representation of the env. +func (env *Env) Map() map[string]string { + if env == nil || len(*env) == 0 { + return nil + } + m := make(map[string]string) + for _, kv := range *env { + parts := strings.SplitN(kv, "=", 2) + if len(parts) == 1 { + m[parts[0]] = "" + } else { + m[parts[0]] = parts[1] + } + } + return m +} diff --git a/vendor/github.com/fsouza/go-dockerclient/event.go b/vendor/github.com/fsouza/go-dockerclient/event.go new file mode 100644 index 00000000000..024b4ecc21f --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/event.go @@ -0,0 +1,451 @@ +// Copyright 2014 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "encoding/json" + "errors" + "io" + "math" + "net" + "net/http" + "net/http/httputil" + "strconv" + "sync" + "sync/atomic" + "time" +) + +// EventsOptions to filter events +// See https://docs.docker.com/engine/api/v1.41/#operation/SystemEvents for more details. +type EventsOptions struct { + // Show events created since this timestamp then stream new events. + Since string + + // Show events created until this timestamp then stop streaming. + Until string + + // Filter for events. For example: + // map[string][]string{"type": {"container"}, "event": {"start", "die"}} + // will return events when container was started and stopped or killed + // + // Available filters: + // config= config name or ID + // container= container name or ID + // daemon= daemon name or ID + // event= event type + // image= image name or ID + // label= image or container label + // network= network name or ID + // node= node ID + // plugin= plugin name or ID + // scope= local or swarm + // secret= secret name or ID + // service= service name or ID + // type= container, image, volume, network, daemon, plugin, node, service, secret or config + // volume= volume name + Filters map[string][]string +} + +// APIEvents represents events coming from the Docker API +// The fields in the Docker API changed in API version 1.22, and +// events for more than images and containers are now fired off. +// To maintain forward and backward compatibility, go-dockerclient +// replicates the event in both the new and old format as faithfully as possible. +// +// For events that only exist in 1.22 in later, `Status` is filled in as +// `"Type:Action"` instead of just `Action` to allow for older clients to +// differentiate and not break if they rely on the pre-1.22 Status types. +// +// The transformEvent method can be consulted for more information about how +// events are translated from new/old API formats +type APIEvents struct { + // New API Fields in 1.22 + Action string `json:"action,omitempty"` + Type string `json:"type,omitempty"` + Actor APIActor `json:"actor,omitempty"` + + // Old API fields for < 1.22 + Status string `json:"status,omitempty"` + ID string `json:"id,omitempty"` + From string `json:"from,omitempty"` + + // Fields in both + Time int64 `json:"time,omitempty"` + TimeNano int64 `json:"timeNano,omitempty"` +} + +// APIActor represents an actor that accomplishes something for an event +type APIActor struct { + ID string `json:"id,omitempty"` + Attributes map[string]string `json:"attributes,omitempty"` +} + +type eventMonitoringState struct { + // `sync/atomic` expects the first word in an allocated struct to be 64-bit + // aligned on both ARM and x86-32. See https://goo.gl/zW7dgq for more details. + lastSeen int64 + sync.RWMutex + sync.WaitGroup + enabled bool + C chan *APIEvents + errC chan error + listeners []chan<- *APIEvents +} + +const ( + maxMonitorConnRetries = 5 + retryInitialWaitTime = 10. +) + +var ( + // ErrNoListeners is the error returned when no listeners are available + // to receive an event. + ErrNoListeners = errors.New("no listeners present to receive event") + + // ErrListenerAlreadyExists is the error returned when the listerner already + // exists. + ErrListenerAlreadyExists = errors.New("listener already exists for docker events") + + // ErrTLSNotSupported is the error returned when the client does not support + // TLS (this applies to the Windows named pipe client). + ErrTLSNotSupported = errors.New("tls not supported by this client") + + // EOFEvent is sent when the event listener receives an EOF error. + EOFEvent = &APIEvents{ + Type: "EOF", + Status: "EOF", + } +) + +// AddEventListener adds a new listener to container events in the Docker API. +// +// The parameter is a channel through which events will be sent. +func (c *Client) AddEventListener(listener chan<- *APIEvents) error { + return c.AddEventListenerWithOptions(EventsOptions{}, listener) +} + +// AddEventListener adds a new listener to container events in the Docker API. +// See https://docs.docker.com/engine/api/v1.41/#operation/SystemEvents for more details. +// +// The listener parameter is a channel through which events will be sent. +func (c *Client) AddEventListenerWithOptions(options EventsOptions, listener chan<- *APIEvents) error { + var err error + if !c.eventMonitor.isEnabled() { + err = c.eventMonitor.enableEventMonitoring(c, options) + if err != nil { + return err + } + } + return c.eventMonitor.addListener(listener) +} + +// RemoveEventListener removes a listener from the monitor. +func (c *Client) RemoveEventListener(listener chan *APIEvents) error { + err := c.eventMonitor.removeListener(listener) + if err != nil { + return err + } + if c.eventMonitor.listernersCount() == 0 { + c.eventMonitor.disableEventMonitoring() + } + return nil +} + +func (eventState *eventMonitoringState) addListener(listener chan<- *APIEvents) error { + eventState.Lock() + defer eventState.Unlock() + if listenerExists(listener, &eventState.listeners) { + return ErrListenerAlreadyExists + } + eventState.Add(1) + eventState.listeners = append(eventState.listeners, listener) + return nil +} + +func (eventState *eventMonitoringState) removeListener(listener chan<- *APIEvents) error { + eventState.Lock() + defer eventState.Unlock() + if listenerExists(listener, &eventState.listeners) { + var newListeners []chan<- *APIEvents + for _, l := range eventState.listeners { + if l != listener { + newListeners = append(newListeners, l) + } + } + eventState.listeners = newListeners + eventState.Add(-1) + } + return nil +} + +func (eventState *eventMonitoringState) closeListeners() { + for _, l := range eventState.listeners { + close(l) + eventState.Add(-1) + } + eventState.listeners = nil +} + +func (eventState *eventMonitoringState) listernersCount() int { + eventState.RLock() + defer eventState.RUnlock() + return len(eventState.listeners) +} + +func listenerExists(a chan<- *APIEvents, list *[]chan<- *APIEvents) bool { + for _, b := range *list { + if b == a { + return true + } + } + return false +} + +func (eventState *eventMonitoringState) enableEventMonitoring(c *Client, opts EventsOptions) error { + eventState.Lock() + defer eventState.Unlock() + if !eventState.enabled { + eventState.enabled = true + atomic.StoreInt64(&eventState.lastSeen, 0) + eventState.C = make(chan *APIEvents, 100) + eventState.errC = make(chan error, 1) + go eventState.monitorEvents(c, opts) + } + return nil +} + +func (eventState *eventMonitoringState) disableEventMonitoring() { + eventState.Lock() + defer eventState.Unlock() + + eventState.closeListeners() + + eventState.Wait() + + if eventState.enabled { + eventState.enabled = false + close(eventState.C) + close(eventState.errC) + } +} + +func (eventState *eventMonitoringState) monitorEvents(c *Client, opts EventsOptions) { + const ( + noListenersTimeout = 5 * time.Second + noListenersInterval = 10 * time.Millisecond + noListenersMaxTries = noListenersTimeout / noListenersInterval + ) + + var err error + for i := time.Duration(0); i < noListenersMaxTries && eventState.noListeners(); i++ { + time.Sleep(10 * time.Millisecond) + } + + if eventState.noListeners() { + // terminate if no listener is available after 5 seconds. + // Prevents goroutine leak when RemoveEventListener is called + // right after AddEventListener. + eventState.disableEventMonitoring() + return + } + + if err = eventState.connectWithRetry(c, opts); err != nil { + // terminate if connect failed + eventState.disableEventMonitoring() + return + } + for eventState.isEnabled() { + timeout := time.After(100 * time.Millisecond) + select { + case ev, ok := <-eventState.C: + if !ok { + return + } + if ev == EOFEvent { + eventState.disableEventMonitoring() + return + } + eventState.updateLastSeen(ev) + eventState.sendEvent(ev) + case err = <-eventState.errC: + if errors.Is(err, ErrNoListeners) { + eventState.disableEventMonitoring() + return + } else if err != nil { + defer func() { go eventState.monitorEvents(c, opts) }() + return + } + case <-timeout: + continue + } + } +} + +func (eventState *eventMonitoringState) connectWithRetry(c *Client, opts EventsOptions) error { + var retries int + eventState.RLock() + eventChan := eventState.C + errChan := eventState.errC + eventState.RUnlock() + err := c.eventHijack(opts, atomic.LoadInt64(&eventState.lastSeen), eventChan, errChan) + for ; err != nil && retries < maxMonitorConnRetries; retries++ { + waitTime := int64(retryInitialWaitTime * math.Pow(2, float64(retries))) + time.Sleep(time.Duration(waitTime) * time.Millisecond) + eventState.RLock() + eventChan = eventState.C + errChan = eventState.errC + eventState.RUnlock() + err = c.eventHijack(opts, atomic.LoadInt64(&eventState.lastSeen), eventChan, errChan) + } + return err +} + +func (eventState *eventMonitoringState) noListeners() bool { + eventState.RLock() + defer eventState.RUnlock() + return len(eventState.listeners) == 0 +} + +func (eventState *eventMonitoringState) isEnabled() bool { + eventState.RLock() + defer eventState.RUnlock() + return eventState.enabled +} + +func (eventState *eventMonitoringState) sendEvent(event *APIEvents) { + eventState.RLock() + defer eventState.RUnlock() + eventState.Add(1) + defer eventState.Done() + if eventState.enabled { + if len(eventState.listeners) == 0 { + eventState.errC <- ErrNoListeners + return + } + + for _, listener := range eventState.listeners { + select { + case listener <- event: + default: + } + } + } +} + +func (eventState *eventMonitoringState) updateLastSeen(e *APIEvents) { + eventState.Lock() + defer eventState.Unlock() + if atomic.LoadInt64(&eventState.lastSeen) < e.Time { + atomic.StoreInt64(&eventState.lastSeen, e.Time) + } +} + +func (c *Client) eventHijack(opts EventsOptions, startTime int64, eventChan chan *APIEvents, errChan chan error) error { + // on reconnect override initial Since with last event seen time + if startTime != 0 { + opts.Since = strconv.FormatInt(startTime, 10) + } + uri := "/events?" + queryString(opts) + protocol := c.endpointURL.Scheme + address := c.endpointURL.Path + if protocol != "unix" && protocol != "npipe" { + protocol = "tcp" + address = c.endpointURL.Host + } + var dial net.Conn + var err error + if c.TLSConfig == nil { + dial, err = c.Dialer.Dial(protocol, address) + } else { + netDialer, ok := c.Dialer.(*net.Dialer) + if !ok { + return ErrTLSNotSupported + } + dial, err = tlsDialWithDialer(netDialer, protocol, address, c.TLSConfig) + } + if err != nil { + return err + } + //lint:ignore SA1019 the alternative doesn't quite work, so keep using the deprecated thing. + conn := httputil.NewClientConn(dial, nil) + req, err := http.NewRequest(http.MethodGet, uri, nil) + if err != nil { + return err + } + res, err := conn.Do(req) + if err != nil { + return err + } + //lint:ignore SA1019 the alternative doesn't quite work, so keep using the deprecated thing. + go func(res *http.Response, conn *httputil.ClientConn) { + defer conn.Close() + defer res.Body.Close() + decoder := json.NewDecoder(res.Body) + for { + var event APIEvents + if err = decoder.Decode(&event); err != nil { + if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) { + c.eventMonitor.RLock() + if c.eventMonitor.enabled && c.eventMonitor.C == eventChan { + // Signal that we're exiting. + eventChan <- EOFEvent + } + c.eventMonitor.RUnlock() + break + } + errChan <- err + } + if event.Time == 0 { + continue + } + transformEvent(&event) + c.eventMonitor.RLock() + if c.eventMonitor.enabled && c.eventMonitor.C == eventChan { + eventChan <- &event + } + c.eventMonitor.RUnlock() + } + }(res, conn) + return nil +} + +// transformEvent takes an event and determines what version it is from +// then populates both versions of the event +func transformEvent(event *APIEvents) { + // if event version is <= 1.21 there will be no Action and no Type + if event.Action == "" && event.Type == "" { + event.Action = event.Status + event.Actor.ID = event.ID + event.Actor.Attributes = map[string]string{} + switch event.Status { + case "delete", "import", "pull", "push", "tag", "untag": + event.Type = "image" + default: + event.Type = "container" + if event.From != "" { + event.Actor.Attributes["image"] = event.From + } + } + } else { + if event.Status == "" { + if event.Type == "image" || event.Type == "container" { + event.Status = event.Action + } else { + // Because just the Status has been overloaded with different Types + // if an event is not for an image or a container, we prepend the type + // to avoid problems for people relying on actions being only for + // images and containers + event.Status = event.Type + ":" + event.Action + } + } + if event.ID == "" { + event.ID = event.Actor.ID + } + if event.From == "" { + event.From = event.Actor.Attributes["image"] + } + } +} diff --git a/vendor/github.com/fsouza/go-dockerclient/exec.go b/vendor/github.com/fsouza/go-dockerclient/exec.go new file mode 100644 index 00000000000..c8399b0b0c0 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/exec.go @@ -0,0 +1,224 @@ +// Copyright 2014 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" +) + +// Exec is the type representing a `docker exec` instance and containing the +// instance ID +type Exec struct { + ID string `json:"Id,omitempty" yaml:"Id,omitempty"` +} + +// CreateExecOptions specify parameters to the CreateExecContainer function. +// +// See https://goo.gl/60TeBP for more details +type CreateExecOptions struct { + Env []string `json:"Env,omitempty" yaml:"Env,omitempty" toml:"Env,omitempty"` + Cmd []string `json:"Cmd,omitempty" yaml:"Cmd,omitempty" toml:"Cmd,omitempty"` + Container string `json:"Container,omitempty" yaml:"Container,omitempty" toml:"Container,omitempty"` + User string `json:"User,omitempty" yaml:"User,omitempty" toml:"User,omitempty"` + WorkingDir string `json:"WorkingDir,omitempty" yaml:"WorkingDir,omitempty" toml:"WorkingDir,omitempty"` + DetachKeys string `json:"DetachKeys,omitempty" yaml:"DetachKeys,omitempty" toml:"DetachKeys,omitempty"` + Context context.Context `json:"-"` + AttachStdin bool `json:"AttachStdin,omitempty" yaml:"AttachStdin,omitempty" toml:"AttachStdin,omitempty"` + AttachStdout bool `json:"AttachStdout,omitempty" yaml:"AttachStdout,omitempty" toml:"AttachStdout,omitempty"` + AttachStderr bool `json:"AttachStderr,omitempty" yaml:"AttachStderr,omitempty" toml:"AttachStderr,omitempty"` + Tty bool `json:"Tty,omitempty" yaml:"Tty,omitempty" toml:"Tty,omitempty"` + Privileged bool `json:"Privileged,omitempty" yaml:"Privileged,omitempty" toml:"Privileged,omitempty"` +} + +// CreateExec sets up an exec instance in a running container `id`, returning the exec +// instance, or an error in case of failure. +// +// See https://goo.gl/60TeBP for more details +func (c *Client) CreateExec(opts CreateExecOptions) (*Exec, error) { + if c.serverAPIVersion == nil { + c.checkAPIVersion() + } + if len(opts.Env) > 0 && c.serverAPIVersion.LessThan(apiVersion125) { + return nil, errors.New("exec configuration Env is only supported in API#1.25 and above") + } + if len(opts.WorkingDir) > 0 && c.serverAPIVersion.LessThan(apiVersion135) { + return nil, errors.New("exec configuration WorkingDir is only supported in API#1.35 and above") + } + path := fmt.Sprintf("/containers/%s/exec", opts.Container) + resp, err := c.do(http.MethodPost, path, doOptions{data: opts, context: opts.Context}) + if err != nil { + var e *Error + if errors.As(err, &e) && e.Status == http.StatusNotFound { + return nil, &NoSuchContainer{ID: opts.Container} + } + return nil, err + } + defer resp.Body.Close() + var exec Exec + if err := json.NewDecoder(resp.Body).Decode(&exec); err != nil { + return nil, err + } + + return &exec, nil +} + +// StartExecOptions specify parameters to the StartExecContainer function. +// +// See https://goo.gl/1EeDWi for more details +type StartExecOptions struct { + InputStream io.Reader `qs:"-"` + OutputStream io.Writer `qs:"-"` + ErrorStream io.Writer `qs:"-"` + + Detach bool `json:"Detach,omitempty" yaml:"Detach,omitempty" toml:"Detach,omitempty"` + Tty bool `json:"Tty,omitempty" yaml:"Tty,omitempty" toml:"Tty,omitempty"` + + // Use raw terminal? Usually true when the container contains a TTY. + RawTerminal bool `qs:"-"` + + // If set, after a successful connect, a sentinel will be sent and then the + // client will block on receive before continuing. + // + // It must be an unbuffered channel. Using a buffered channel can lead + // to unexpected behavior. + Success chan struct{} `json:"-"` + + Context context.Context `json:"-"` +} + +// StartExec starts a previously set up exec instance id. If opts.Detach is +// true, it returns after starting the exec command. Otherwise, it sets up an +// interactive session with the exec command. +// +// See https://goo.gl/1EeDWi for more details +func (c *Client) StartExec(id string, opts StartExecOptions) error { + cw, err := c.StartExecNonBlocking(id, opts) + if err != nil { + return err + } + if cw != nil { + return cw.Wait() + } + return nil +} + +// StartExecNonBlocking starts a previously set up exec instance id. If opts.Detach is +// true, it returns after starting the exec command. Otherwise, it sets up an +// interactive session with the exec command. +// +// See https://goo.gl/1EeDWi for more details +func (c *Client) StartExecNonBlocking(id string, opts StartExecOptions) (CloseWaiter, error) { + if id == "" { + return nil, &NoSuchExec{ID: id} + } + + path := fmt.Sprintf("/exec/%s/start", id) + + if opts.Detach { + resp, err := c.do(http.MethodPost, path, doOptions{data: opts, context: opts.Context}) + if err != nil { + var e *Error + if errors.As(err, &e) && e.Status == http.StatusNotFound { + return nil, &NoSuchExec{ID: id} + } + return nil, err + } + defer resp.Body.Close() + return nil, nil + } + + return c.hijack(http.MethodPost, path, hijackOptions{ + success: opts.Success, + setRawTerminal: opts.RawTerminal, + in: opts.InputStream, + stdout: opts.OutputStream, + stderr: opts.ErrorStream, + data: opts, + }) +} + +// ResizeExecTTY resizes the tty session used by the exec command id. This API +// is valid only if Tty was specified as part of creating and starting the exec +// command. +// +// See https://goo.gl/Mo5bxx for more details +func (c *Client) ResizeExecTTY(id string, height, width int) error { + params := make(url.Values) + params.Set("h", strconv.Itoa(height)) + params.Set("w", strconv.Itoa(width)) + + path := fmt.Sprintf("/exec/%s/resize?%s", id, params.Encode()) + resp, err := c.do(http.MethodPost, path, doOptions{}) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// ExecProcessConfig is a type describing the command associated to a Exec +// instance. It's used in the ExecInspect type. +type ExecProcessConfig struct { + User string `json:"user,omitempty" yaml:"user,omitempty" toml:"user,omitempty"` + Privileged bool `json:"privileged,omitempty" yaml:"privileged,omitempty" toml:"privileged,omitempty"` + Tty bool `json:"tty,omitempty" yaml:"tty,omitempty" toml:"tty,omitempty"` + EntryPoint string `json:"entrypoint,omitempty" yaml:"entrypoint,omitempty" toml:"entrypoint,omitempty"` + Arguments []string `json:"arguments,omitempty" yaml:"arguments,omitempty" toml:"arguments,omitempty"` +} + +// ExecInspect is a type with details about a exec instance, including the +// exit code if the command has finished running. It's returned by a api +// call to /exec/(id)/json +// +// See https://goo.gl/ctMUiW for more details +type ExecInspect struct { + ID string `json:"ID,omitempty" yaml:"ID,omitempty" toml:"ID,omitempty"` + ExitCode int `json:"ExitCode,omitempty" yaml:"ExitCode,omitempty" toml:"ExitCode,omitempty"` + ProcessConfig ExecProcessConfig `json:"ProcessConfig,omitempty" yaml:"ProcessConfig,omitempty" toml:"ProcessConfig,omitempty"` + ContainerID string `json:"ContainerID,omitempty" yaml:"ContainerID,omitempty" toml:"ContainerID,omitempty"` + DetachKeys string `json:"DetachKeys,omitempty" yaml:"DetachKeys,omitempty" toml:"DetachKeys,omitempty"` + Running bool `json:"Running,omitempty" yaml:"Running,omitempty" toml:"Running,omitempty"` + OpenStdin bool `json:"OpenStdin,omitempty" yaml:"OpenStdin,omitempty" toml:"OpenStdin,omitempty"` + OpenStderr bool `json:"OpenStderr,omitempty" yaml:"OpenStderr,omitempty" toml:"OpenStderr,omitempty"` + OpenStdout bool `json:"OpenStdout,omitempty" yaml:"OpenStdout,omitempty" toml:"OpenStdout,omitempty"` + CanRemove bool `json:"CanRemove,omitempty" yaml:"CanRemove,omitempty" toml:"CanRemove,omitempty"` +} + +// InspectExec returns low-level information about the exec command id. +// +// See https://goo.gl/ctMUiW for more details +func (c *Client) InspectExec(id string) (*ExecInspect, error) { + path := fmt.Sprintf("/exec/%s/json", id) + resp, err := c.do(http.MethodGet, path, doOptions{}) + if err != nil { + var e *Error + if errors.As(err, &e) && e.Status == http.StatusNotFound { + return nil, &NoSuchExec{ID: id} + } + return nil, err + } + defer resp.Body.Close() + var exec ExecInspect + if err := json.NewDecoder(resp.Body).Decode(&exec); err != nil { + return nil, err + } + return &exec, nil +} + +// NoSuchExec is the error returned when a given exec instance does not exist. +type NoSuchExec struct { + ID string +} + +func (err *NoSuchExec) Error() string { + return "No such exec instance: " + err.ID +} diff --git a/vendor/github.com/fsouza/go-dockerclient/go.mod b/vendor/github.com/fsouza/go-dockerclient/go.mod new file mode 100644 index 00000000000..4854282534c --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/go.mod @@ -0,0 +1,18 @@ +module github.com/fsouza/go-dockerclient + +go 1.16 + +require ( + github.com/Microsoft/go-winio v0.5.1 + github.com/containerd/containerd v1.5.9 // indirect + github.com/docker/docker v20.10.3-0.20220208084023-a5c757555091+incompatible + github.com/docker/go-connections v0.4.0 // indirect + github.com/docker/go-units v0.4.0 + github.com/google/go-cmp v0.5.7 + github.com/gorilla/mux v1.8.0 + github.com/moby/sys/mount v0.2.0 // indirect + github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect + github.com/morikuni/aec v1.0.0 // indirect + github.com/opencontainers/runc v1.0.3 // indirect + golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b +) diff --git a/vendor/github.com/fsouza/go-dockerclient/go.sum b/vendor/github.com/fsouza/go-dockerclient/go.sum new file mode 100644 index 00000000000..869f6d67c35 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/go.sum @@ -0,0 +1,958 @@ +bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= +github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= +github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= +github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= +github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= +github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.5.1 h1:aPJp2QD7OOrhO5tQXqQoGSJc+DjDtWTGLOmNyAm6FgY= +github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= +github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8= +github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg= +github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= +github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= +github.com/Microsoft/hcsshim v0.8.23 h1:47MSwtKGXet80aIn+7h4YI6fwPmwIghAnsx2aOUrG2M= +github.com/Microsoft/hcsshim v0.8.23/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg= +github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= +github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= +github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= +github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= +github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= +github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= +github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= +github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= +github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= +github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= +github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= +github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc= +github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= +github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE= +github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU= +github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= +github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= +github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E= +github.com/containerd/btrfs v0.0.0-20210316141732-918d888fb676/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= +github.com/containerd/btrfs v1.0.0/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= +github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI= +github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= +github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM= +github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= +github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= +github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= +github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= +github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= +github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw= +github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= +github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.1-0.20191213020239-082f7e3aed57/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.9/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ= +github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU= +github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI= +github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s= +github.com/containerd/containerd v1.5.9 h1:rs6Xg1gtIxaeyG+Smsb/0xaSDu1VgFhOCKBXxMxbsF4= +github.com/containerd/containerd v1.5.9/go.mod h1:fvQqCfadDGga5HZyn3j4+dx56qj2I9YwBrlSdalvJYQ= +github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo= +github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y= +github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ= +github.com/containerd/continuity v0.1.0 h1:UFRRY5JemiAhPZrr/uE0n8fMTLcZsUvySPr1+D7pgr8= +github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM= +github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= +github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= +github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= +github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= +github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU= +github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk= +github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= +github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= +github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g= +github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= +github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= +github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0= +github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA= +github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow= +github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms= +github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c= +github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= +github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= +github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= +github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= +github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= +github.com/containerd/ttrpc v1.1.0/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ= +github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= +github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk= +github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg= +github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s= +github.com/containerd/zfs v0.0.0-20200918131355-0a33824f23a2/go.mod h1:8IgZOBdv8fAgXddBT4dBXJPtxyRsejFIpXoklgxgEjw= +github.com/containerd/zfs v0.0.0-20210301145711-11e8f1707f62/go.mod h1:A9zfAbMlQwE+/is6hi0Xw8ktpL+6glmqZYtevJgaB8Y= +github.com/containerd/zfs v0.0.0-20210315114300-dde8f0fda960/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containerd/zfs v0.0.0-20210324211415-d5c4544f0433/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containerd/zfs v1.0.0/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM= +github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8= +github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc= +github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4= +github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= +github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= +github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw= +github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= +github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= +github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= +github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= +github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= +github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= +github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= +github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v20.10.3-0.20220208084023-a5c757555091+incompatible h1:DPMrerxYRbdZnOnlPPwt9QGf207ETn7FebEmxUQI3bE= +github.com/docker/docker v20.10.3-0.20220208084023-a5c757555091+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= +github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= +github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= +github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= +github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= +github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= +github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU= +github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= +github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= +github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= +github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.11.13 h1:eSvu8Tmq6j2psUJqJrLcWH6K3w5Dwc+qipbaA6eVEN4= +github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= +github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= +github.com/moby/sys/mount v0.2.0 h1:WhCW5B355jtxndN5ovugJlMFJawbUODuW8fSnEH6SSM= +github.com/moby/sys/mount v0.2.0/go.mod h1:aAivFE2LB3W4bACsUXChRHQ0qKWsetY4Y9V7sxOougM= +github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/sys/mountinfo v0.4.1 h1:1O+1cHA1aujwEwwVMa2Xm2l+gIpUHyd3+D+d7LZh1kM= +github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= +github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= +github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc= +github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= +github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= +github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= +github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= +github.com/opencontainers/runc v1.0.3 h1:1hbqejyQWCJBvtKAfdO0b1FmaEf2z/bxnjqbARass5k= +github.com/opencontainers/runc v1.0.3/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= +github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= +github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= +github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= +github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= +github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= +github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= +github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= +github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= +github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= +github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= +github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= +github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= +github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= +github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= +github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= +github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= +go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190522044717-8097e1b27ff5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22 h1:RqytpXGR1iVNX7psjB3ff8y7sNFinVFvkx1c8SjBkio= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b h1:9zKuko04nR4gjZ4+DNjHqRlAJqbJETHwiNKDqTfOjfE= +golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190522204451-c2c4e71fbf69/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= +gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= +gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= +k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= +k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= +k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= +k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= +k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= +k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= +k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= +k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= +k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= +k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= +k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= +k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= +k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= +k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM= +k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM= +k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= +k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= +k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc= +k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= +k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= +k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/vendor/github.com/fsouza/go-dockerclient/image.go b/vendor/github.com/fsouza/go-dockerclient/image.go new file mode 100644 index 00000000000..85d7d6a7e4c --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/image.go @@ -0,0 +1,761 @@ +// Copyright 2013 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "context" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "os" + "strings" + "time" +) + +// APIImages represent an image returned in the ListImages call. +type APIImages struct { + ID string `json:"Id" yaml:"Id" toml:"Id"` + RepoTags []string `json:"RepoTags,omitempty" yaml:"RepoTags,omitempty" toml:"RepoTags,omitempty"` + Created int64 `json:"Created,omitempty" yaml:"Created,omitempty" toml:"Created,omitempty"` + Size int64 `json:"Size,omitempty" yaml:"Size,omitempty" toml:"Size,omitempty"` + VirtualSize int64 `json:"VirtualSize,omitempty" yaml:"VirtualSize,omitempty" toml:"VirtualSize,omitempty"` + ParentID string `json:"ParentId,omitempty" yaml:"ParentId,omitempty" toml:"ParentId,omitempty"` + RepoDigests []string `json:"RepoDigests,omitempty" yaml:"RepoDigests,omitempty" toml:"RepoDigests,omitempty"` + Labels map[string]string `json:"Labels,omitempty" yaml:"Labels,omitempty" toml:"Labels,omitempty"` +} + +// RootFS represents the underlying layers used by an image +type RootFS struct { + Type string `json:"Type,omitempty" yaml:"Type,omitempty" toml:"Type,omitempty"` + Layers []string `json:"Layers,omitempty" yaml:"Layers,omitempty" toml:"Layers,omitempty"` +} + +// Image is the type representing a docker image and its various properties +type Image struct { + ID string `json:"Id" yaml:"Id" toml:"Id"` + RepoTags []string `json:"RepoTags,omitempty" yaml:"RepoTags,omitempty" toml:"RepoTags,omitempty"` + Parent string `json:"Parent,omitempty" yaml:"Parent,omitempty" toml:"Parent,omitempty"` + Comment string `json:"Comment,omitempty" yaml:"Comment,omitempty" toml:"Comment,omitempty"` + Created time.Time `json:"Created,omitempty" yaml:"Created,omitempty" toml:"Created,omitempty"` + Container string `json:"Container,omitempty" yaml:"Container,omitempty" toml:"Container,omitempty"` + ContainerConfig Config `json:"ContainerConfig,omitempty" yaml:"ContainerConfig,omitempty" toml:"ContainerConfig,omitempty"` + DockerVersion string `json:"DockerVersion,omitempty" yaml:"DockerVersion,omitempty" toml:"DockerVersion,omitempty"` + Author string `json:"Author,omitempty" yaml:"Author,omitempty" toml:"Author,omitempty"` + Config *Config `json:"Config,omitempty" yaml:"Config,omitempty" toml:"Config,omitempty"` + Architecture string `json:"Architecture,omitempty" yaml:"Architecture,omitempty"` + Size int64 `json:"Size,omitempty" yaml:"Size,omitempty" toml:"Size,omitempty"` + VirtualSize int64 `json:"VirtualSize,omitempty" yaml:"VirtualSize,omitempty" toml:"VirtualSize,omitempty"` + RepoDigests []string `json:"RepoDigests,omitempty" yaml:"RepoDigests,omitempty" toml:"RepoDigests,omitempty"` + RootFS *RootFS `json:"RootFS,omitempty" yaml:"RootFS,omitempty" toml:"RootFS,omitempty"` + OS string `json:"Os,omitempty" yaml:"Os,omitempty" toml:"Os,omitempty"` +} + +// ImagePre012 serves the same purpose as the Image type except that it is for +// earlier versions of the Docker API (pre-012 to be specific) +type ImagePre012 struct { + ID string `json:"id"` + Parent string `json:"parent,omitempty"` + Comment string `json:"comment,omitempty"` + Created time.Time `json:"created"` + Container string `json:"container,omitempty"` + ContainerConfig Config `json:"container_config,omitempty"` + DockerVersion string `json:"docker_version,omitempty"` + Author string `json:"author,omitempty"` + Config *Config `json:"config,omitempty"` + Architecture string `json:"architecture,omitempty"` + Size int64 `json:"size,omitempty"` +} + +var ( + // ErrNoSuchImage is the error returned when the image does not exist. + ErrNoSuchImage = errors.New("no such image") + + // ErrMissingRepo is the error returned when the remote repository is + // missing. + ErrMissingRepo = errors.New("missing remote repository e.g. 'github.com/user/repo'") + + // ErrMissingOutputStream is the error returned when no output stream + // is provided to some calls, like BuildImage. + ErrMissingOutputStream = errors.New("missing output stream") + + // ErrMultipleContexts is the error returned when both a ContextDir and + // InputStream are provided in BuildImageOptions + ErrMultipleContexts = errors.New("image build may not be provided BOTH context dir and input stream") + + // ErrMustSpecifyNames is the error returned when the Names field on + // ExportImagesOptions is nil or empty + ErrMustSpecifyNames = errors.New("must specify at least one name to export") +) + +// ListImagesOptions specify parameters to the ListImages function. +// +// See https://goo.gl/BVzauZ for more details. +type ListImagesOptions struct { + Filters map[string][]string + All bool + Digests bool + Filter string + Context context.Context +} + +// ListImages returns the list of available images in the server. +// +// See https://goo.gl/BVzauZ for more details. +func (c *Client) ListImages(opts ListImagesOptions) ([]APIImages, error) { + path := "/images/json?" + queryString(opts) + resp, err := c.do(http.MethodGet, path, doOptions{context: opts.Context}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + var images []APIImages + if err := json.NewDecoder(resp.Body).Decode(&images); err != nil { + return nil, err + } + return images, nil +} + +// ImageHistory represent a layer in an image's history returned by the +// ImageHistory call. +type ImageHistory struct { + ID string `json:"Id" yaml:"Id" toml:"Id"` + Tags []string `json:"Tags,omitempty" yaml:"Tags,omitempty" toml:"Tags,omitempty"` + Created int64 `json:"Created,omitempty" yaml:"Created,omitempty" toml:"Tags,omitempty"` + CreatedBy string `json:"CreatedBy,omitempty" yaml:"CreatedBy,omitempty" toml:"CreatedBy,omitempty"` + Size int64 `json:"Size,omitempty" yaml:"Size,omitempty" toml:"Size,omitempty"` + Comment string `json:"Comment,omitempty" yaml:"Comment,omitempty" toml:"Comment,omitempty"` +} + +// ImageHistory returns the history of the image by its name or ID. +// +// See https://goo.gl/fYtxQa for more details. +func (c *Client) ImageHistory(name string) ([]ImageHistory, error) { + resp, err := c.do(http.MethodGet, "/images/"+name+"/history", doOptions{}) + if err != nil { + var e *Error + if errors.As(err, &e) && e.Status == http.StatusNotFound { + return nil, ErrNoSuchImage + } + return nil, err + } + defer resp.Body.Close() + var history []ImageHistory + if err := json.NewDecoder(resp.Body).Decode(&history); err != nil { + return nil, err + } + return history, nil +} + +// RemoveImage removes an image by its name or ID. +// +// See https://goo.gl/Vd2Pck for more details. +func (c *Client) RemoveImage(name string) error { + resp, err := c.do(http.MethodDelete, "/images/"+name, doOptions{}) + if err != nil { + var e *Error + if errors.As(err, &e) && e.Status == http.StatusNotFound { + return ErrNoSuchImage + } + return err + } + resp.Body.Close() + return nil +} + +// RemoveImageOptions present the set of options available for removing an image +// from a registry. +// +// See https://goo.gl/Vd2Pck for more details. +type RemoveImageOptions struct { + Force bool `qs:"force"` + NoPrune bool `qs:"noprune"` + Context context.Context +} + +// RemoveImageExtended removes an image by its name or ID. +// Extra params can be passed, see RemoveImageOptions +// +// See https://goo.gl/Vd2Pck for more details. +func (c *Client) RemoveImageExtended(name string, opts RemoveImageOptions) error { + uri := fmt.Sprintf("/images/%s?%s", name, queryString(&opts)) + resp, err := c.do(http.MethodDelete, uri, doOptions{context: opts.Context}) + if err != nil { + var e *Error + if errors.As(err, &e) && e.Status == http.StatusNotFound { + return ErrNoSuchImage + } + return err + } + resp.Body.Close() + return nil +} + +// InspectImage returns an image by its name or ID. +// +// See https://goo.gl/ncLTG8 for more details. +func (c *Client) InspectImage(name string) (*Image, error) { + resp, err := c.do(http.MethodGet, "/images/"+name+"/json", doOptions{}) + if err != nil { + var e *Error + if errors.As(err, &e) && e.Status == http.StatusNotFound { + return nil, ErrNoSuchImage + } + return nil, err + } + defer resp.Body.Close() + + var image Image + + // if the caller elected to skip checking the server's version, assume it's the latest + if c.SkipServerVersionCheck || c.expectedAPIVersion.GreaterThanOrEqualTo(apiVersion112) { + if err := json.NewDecoder(resp.Body).Decode(&image); err != nil { + return nil, err + } + } else { + var imagePre012 ImagePre012 + if err := json.NewDecoder(resp.Body).Decode(&imagePre012); err != nil { + return nil, err + } + + image.ID = imagePre012.ID + image.Parent = imagePre012.Parent + image.Comment = imagePre012.Comment + image.Created = imagePre012.Created + image.Container = imagePre012.Container + image.ContainerConfig = imagePre012.ContainerConfig + image.DockerVersion = imagePre012.DockerVersion + image.Author = imagePre012.Author + image.Config = imagePre012.Config + image.Architecture = imagePre012.Architecture + image.Size = imagePre012.Size + } + + return &image, nil +} + +// PushImageOptions represents options to use in the PushImage method. +// +// See https://goo.gl/BZemGg for more details. +type PushImageOptions struct { + // Name of the image + Name string + + // Tag of the image + Tag string + + // Registry server to push the image + Registry string + + OutputStream io.Writer `qs:"-"` + RawJSONStream bool `qs:"-"` + InactivityTimeout time.Duration `qs:"-"` + + Context context.Context +} + +// PushImage pushes an image to a remote registry, logging progress to w. +// +// An empty instance of AuthConfiguration may be used for unauthenticated +// pushes. +// +// See https://goo.gl/BZemGg for more details. +func (c *Client) PushImage(opts PushImageOptions, auth AuthConfiguration) error { + if opts.Name == "" { + return ErrNoSuchImage + } + headers, err := headersWithAuth(auth) + if err != nil { + return err + } + name := opts.Name + opts.Name = "" + path := "/images/" + name + "/push?" + queryString(&opts) + return c.stream(http.MethodPost, path, streamOptions{ + setRawTerminal: true, + rawJSONStream: opts.RawJSONStream, + headers: headers, + stdout: opts.OutputStream, + inactivityTimeout: opts.InactivityTimeout, + context: opts.Context, + }) +} + +// PullImageOptions present the set of options available for pulling an image +// from a registry. +// +// See https://goo.gl/qkoSsn for more details. +type PullImageOptions struct { + All bool + Repository string `qs:"fromImage"` + Tag string + Platform string `ver:"1.32"` + + // Only required for Docker Engine 1.9 or 1.10 w/ Remote API < 1.21 + // and Docker Engine < 1.9 + // This parameter was removed in Docker Engine 1.11 + Registry string + + OutputStream io.Writer `qs:"-"` + RawJSONStream bool `qs:"-"` + InactivityTimeout time.Duration `qs:"-"` + Context context.Context +} + +// PullImage pulls an image from a remote registry, logging progress to +// opts.OutputStream. +// +// See https://goo.gl/qkoSsn for more details. +func (c *Client) PullImage(opts PullImageOptions, auth AuthConfiguration) error { + if opts.Repository == "" { + return ErrNoSuchImage + } + + headers, err := headersWithAuth(auth) + if err != nil { + return err + } + if opts.Tag == "" && strings.Contains(opts.Repository, "@") { + parts := strings.SplitN(opts.Repository, "@", 2) + opts.Repository = parts[0] + opts.Tag = parts[1] + } + return c.createImage(&opts, headers, nil, opts.OutputStream, opts.RawJSONStream, opts.InactivityTimeout, opts.Context) +} + +func (c *Client) createImage(opts interface{}, headers map[string]string, in io.Reader, w io.Writer, rawJSONStream bool, timeout time.Duration, context context.Context) error { + url, err := c.getPath("/images/create", opts) + if err != nil { + return err + } + return c.streamURL(http.MethodPost, url, streamOptions{ + setRawTerminal: true, + headers: headers, + in: in, + stdout: w, + rawJSONStream: rawJSONStream, + inactivityTimeout: timeout, + context: context, + }) +} + +// LoadImageOptions represents the options for LoadImage Docker API Call +// +// See https://goo.gl/rEsBV3 for more details. +type LoadImageOptions struct { + InputStream io.Reader + OutputStream io.Writer + Context context.Context +} + +// LoadImage imports a tarball docker image +// +// See https://goo.gl/rEsBV3 for more details. +func (c *Client) LoadImage(opts LoadImageOptions) error { + return c.stream(http.MethodPost, "/images/load", streamOptions{ + setRawTerminal: true, + in: opts.InputStream, + stdout: opts.OutputStream, + context: opts.Context, + }) +} + +// ExportImageOptions represent the options for ExportImage Docker API call. +// +// See https://goo.gl/AuySaA for more details. +type ExportImageOptions struct { + Name string + OutputStream io.Writer + InactivityTimeout time.Duration + Context context.Context +} + +// ExportImage exports an image (as a tar file) into the stream. +// +// See https://goo.gl/AuySaA for more details. +func (c *Client) ExportImage(opts ExportImageOptions) error { + return c.stream(http.MethodGet, fmt.Sprintf("/images/%s/get", opts.Name), streamOptions{ + setRawTerminal: true, + stdout: opts.OutputStream, + inactivityTimeout: opts.InactivityTimeout, + context: opts.Context, + }) +} + +// ExportImagesOptions represent the options for ExportImages Docker API call +// +// See https://goo.gl/N9XlDn for more details. +type ExportImagesOptions struct { + Names []string + OutputStream io.Writer `qs:"-"` + InactivityTimeout time.Duration `qs:"-"` + Context context.Context +} + +// ExportImages exports one or more images (as a tar file) into the stream +// +// See https://goo.gl/N9XlDn for more details. +func (c *Client) ExportImages(opts ExportImagesOptions) error { + if opts.Names == nil || len(opts.Names) == 0 { + return ErrMustSpecifyNames + } + // API < 1.25 allows multiple name values + // 1.25 says name must be a comma separated list + var err error + var exporturl string + if c.requestedAPIVersion.GreaterThanOrEqualTo(apiVersion125) { + str := opts.Names[0] + for _, val := range opts.Names[1:] { + str += "," + val + } + exporturl, err = c.getPath("/images/get", ExportImagesOptions{ + Names: []string{str}, + OutputStream: opts.OutputStream, + InactivityTimeout: opts.InactivityTimeout, + Context: opts.Context, + }) + } else { + exporturl, err = c.getPath("/images/get", &opts) + } + if err != nil { + return err + } + return c.streamURL(http.MethodGet, exporturl, streamOptions{ + setRawTerminal: true, + stdout: opts.OutputStream, + inactivityTimeout: opts.InactivityTimeout, + }) +} + +// ImportImageOptions present the set of informations available for importing +// an image from a source file or the stdin. +// +// See https://goo.gl/qkoSsn for more details. +type ImportImageOptions struct { + Repository string `qs:"repo"` + Source string `qs:"fromSrc"` + Tag string `qs:"tag"` + + InputStream io.Reader `qs:"-"` + OutputStream io.Writer `qs:"-"` + RawJSONStream bool `qs:"-"` + InactivityTimeout time.Duration `qs:"-"` + Context context.Context +} + +// ImportImage imports an image from a url, a file or stdin +// +// See https://goo.gl/qkoSsn for more details. +func (c *Client) ImportImage(opts ImportImageOptions) error { + if opts.Repository == "" { + return ErrNoSuchImage + } + if opts.Source != "-" { + opts.InputStream = nil + } + if opts.Source != "-" && !isURL(opts.Source) { + f, err := os.Open(opts.Source) + if err != nil { + return err + } + opts.InputStream = f + opts.Source = "-" + } + return c.createImage(&opts, nil, opts.InputStream, opts.OutputStream, opts.RawJSONStream, opts.InactivityTimeout, opts.Context) +} + +// BuildImageOptions present the set of informations available for building an +// image from a tarfile with a Dockerfile in it. +// +// For more details about the Docker building process, see +// https://goo.gl/4nYHwV. +type BuildImageOptions struct { + Context context.Context + Name string `qs:"t"` + Dockerfile string `ver:"1.25"` + ExtraHosts string `ver:"1.28"` + CacheFrom []string `qs:"-" ver:"1.25"` + Memory int64 + Memswap int64 + ShmSize int64 + CPUShares int64 + CPUQuota int64 `ver:"1.21"` + CPUPeriod int64 `ver:"1.21"` + CPUSetCPUs string + Labels map[string]string + InputStream io.Reader `qs:"-"` + OutputStream io.Writer `qs:"-"` + Remote string + Auth AuthConfiguration `qs:"-"` // for older docker X-Registry-Auth header + AuthConfigs AuthConfigurations `qs:"-"` // for newer docker X-Registry-Config header + ContextDir string `qs:"-"` + Ulimits []ULimit `qs:"-" ver:"1.18"` + BuildArgs []BuildArg `qs:"-" ver:"1.21"` + NetworkMode string `ver:"1.25"` + Platform string `ver:"1.32"` + InactivityTimeout time.Duration `qs:"-"` + CgroupParent string + SecurityOpt []string + Target string + Outputs string `ver:"1.40"` + NoCache bool + SuppressOutput bool `qs:"q"` + Pull bool `ver:"1.16"` + RmTmpContainer bool `qs:"rm"` + ForceRmTmpContainer bool `qs:"forcerm" ver:"1.12"` + RawJSONStream bool `qs:"-"` +} + +// BuildArg represents arguments that can be passed to the image when building +// it from a Dockerfile. +// +// For more details about the Docker building process, see +// https://goo.gl/4nYHwV. +type BuildArg struct { + Name string `json:"Name,omitempty" yaml:"Name,omitempty" toml:"Name,omitempty"` + Value string `json:"Value,omitempty" yaml:"Value,omitempty" toml:"Value,omitempty"` +} + +// BuildImage builds an image from a tarball's url or a Dockerfile in the input +// stream. +// +// See https://goo.gl/4nYHwV for more details. +func (c *Client) BuildImage(opts BuildImageOptions) error { + if opts.OutputStream == nil { + return ErrMissingOutputStream + } + headers, err := headersWithAuth(opts.Auth, c.versionedAuthConfigs(opts.AuthConfigs)) + if err != nil { + return err + } + + if opts.Remote != "" && opts.Name == "" { + opts.Name = opts.Remote + } + if opts.InputStream != nil || opts.ContextDir != "" { + headers["Content-Type"] = "application/tar" + } else if opts.Remote == "" { + return ErrMissingRepo + } + if opts.ContextDir != "" { + if opts.InputStream != nil { + return ErrMultipleContexts + } + var err error + if opts.InputStream, err = createTarStream(opts.ContextDir, opts.Dockerfile); err != nil { + return err + } + } + qs, ver := queryStringVersion(&opts) + + if len(opts.CacheFrom) > 0 { + if b, err := json.Marshal(opts.CacheFrom); err == nil { + item := url.Values(map[string][]string{}) + item.Add("cachefrom", string(b)) + qs = fmt.Sprintf("%s&%s", qs, item.Encode()) + if ver == nil || apiVersion125.GreaterThan(ver) { + ver = apiVersion125 + } + } + } + + if len(opts.Ulimits) > 0 { + if b, err := json.Marshal(opts.Ulimits); err == nil { + item := url.Values(map[string][]string{}) + item.Add("ulimits", string(b)) + qs = fmt.Sprintf("%s&%s", qs, item.Encode()) + if ver == nil || apiVersion118.GreaterThan(ver) { + ver = apiVersion118 + } + } + } + + if len(opts.BuildArgs) > 0 { + v := make(map[string]string) + for _, arg := range opts.BuildArgs { + v[arg.Name] = arg.Value + } + if b, err := json.Marshal(v); err == nil { + item := url.Values(map[string][]string{}) + item.Add("buildargs", string(b)) + qs = fmt.Sprintf("%s&%s", qs, item.Encode()) + if ver == nil || apiVersion121.GreaterThan(ver) { + ver = apiVersion121 + } + } + } + + buildURL, err := c.pathVersionCheck("/build", qs, ver) + if err != nil { + return err + } + + return c.streamURL(http.MethodPost, buildURL, streamOptions{ + setRawTerminal: true, + rawJSONStream: opts.RawJSONStream, + headers: headers, + in: opts.InputStream, + stdout: opts.OutputStream, + inactivityTimeout: opts.InactivityTimeout, + context: opts.Context, + }) +} + +func (c *Client) versionedAuthConfigs(authConfigs AuthConfigurations) registryAuth { + if c.serverAPIVersion == nil { + c.checkAPIVersion() + } + if c.serverAPIVersion != nil && c.serverAPIVersion.GreaterThanOrEqualTo(apiVersion119) { + return AuthConfigurations119(authConfigs.Configs) + } + return authConfigs +} + +// TagImageOptions present the set of options to tag an image. +// +// See https://goo.gl/prHrvo for more details. +type TagImageOptions struct { + Repo string + Tag string + Force bool + Context context.Context +} + +// TagImage adds a tag to the image identified by the given name. +// +// See https://goo.gl/prHrvo for more details. +func (c *Client) TagImage(name string, opts TagImageOptions) error { + if name == "" { + return ErrNoSuchImage + } + resp, err := c.do(http.MethodPost, "/images/"+name+"/tag?"+queryString(&opts), doOptions{ + context: opts.Context, + }) + if err != nil { + return err + } + + defer resp.Body.Close() + + if resp.StatusCode == http.StatusNotFound { + return ErrNoSuchImage + } + + return err +} + +func isURL(u string) bool { + p, err := url.Parse(u) + if err != nil { + return false + } + return p.Scheme == "http" || p.Scheme == "https" +} + +func headersWithAuth(auths ...registryAuth) (map[string]string, error) { + headers := make(map[string]string) + + for _, auth := range auths { + if auth.isEmpty() { + continue + } + data, err := json.Marshal(auth) + if err != nil { + return nil, err + } + headers[auth.headerKey()] = base64.URLEncoding.EncodeToString(data) + } + + return headers, nil +} + +// APIImageSearch reflect the result of a search on the Docker Hub. +// +// See https://goo.gl/KLO9IZ for more details. +type APIImageSearch struct { + Description string `json:"description,omitempty" yaml:"description,omitempty" toml:"description,omitempty"` + IsOfficial bool `json:"is_official,omitempty" yaml:"is_official,omitempty" toml:"is_official,omitempty"` + IsAutomated bool `json:"is_automated,omitempty" yaml:"is_automated,omitempty" toml:"is_automated,omitempty"` + Name string `json:"name,omitempty" yaml:"name,omitempty" toml:"name,omitempty"` + StarCount int `json:"star_count,omitempty" yaml:"star_count,omitempty" toml:"star_count,omitempty"` +} + +// SearchImages search the docker hub with a specific given term. +// +// See https://goo.gl/KLO9IZ for more details. +func (c *Client) SearchImages(term string) ([]APIImageSearch, error) { + resp, err := c.do(http.MethodGet, "/images/search?term="+term, doOptions{}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + var searchResult []APIImageSearch + if err := json.NewDecoder(resp.Body).Decode(&searchResult); err != nil { + return nil, err + } + return searchResult, nil +} + +// SearchImagesEx search the docker hub with a specific given term and authentication. +// +// See https://goo.gl/KLO9IZ for more details. +func (c *Client) SearchImagesEx(term string, auth AuthConfiguration) ([]APIImageSearch, error) { + headers, err := headersWithAuth(auth) + if err != nil { + return nil, err + } + + resp, err := c.do(http.MethodGet, "/images/search?term="+term, doOptions{ + headers: headers, + }) + if err != nil { + return nil, err + } + + defer resp.Body.Close() + + var searchResult []APIImageSearch + if err := json.NewDecoder(resp.Body).Decode(&searchResult); err != nil { + return nil, err + } + + return searchResult, nil +} + +// PruneImagesOptions specify parameters to the PruneImages function. +// +// See https://goo.gl/qfZlbZ for more details. +type PruneImagesOptions struct { + Filters map[string][]string + Context context.Context +} + +// PruneImagesResults specify results from the PruneImages function. +// +// See https://goo.gl/qfZlbZ for more details. +type PruneImagesResults struct { + ImagesDeleted []struct{ Untagged, Deleted string } + SpaceReclaimed int64 +} + +// PruneImages deletes images which are unused. +// +// See https://goo.gl/qfZlbZ for more details. +func (c *Client) PruneImages(opts PruneImagesOptions) (*PruneImagesResults, error) { + path := "/images/prune?" + queryString(opts) + resp, err := c.do(http.MethodPost, path, doOptions{context: opts.Context}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + var results PruneImagesResults + if err := json.NewDecoder(resp.Body).Decode(&results); err != nil { + return nil, err + } + return &results, nil +} diff --git a/vendor/github.com/fsouza/go-dockerclient/misc.go b/vendor/github.com/fsouza/go-dockerclient/misc.go new file mode 100644 index 00000000000..8eaa827040e --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/misc.go @@ -0,0 +1,198 @@ +// Copyright 2013 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "context" + "encoding/json" + "net" + "net/http" + "strings" + + "github.com/docker/docker/api/types/swarm" +) + +// Version returns version information about the docker server. +// +// See https://goo.gl/mU7yje for more details. +func (c *Client) Version() (*Env, error) { + return c.VersionWithContext(context.TODO()) +} + +// VersionWithContext returns version information about the docker server. +func (c *Client) VersionWithContext(ctx context.Context) (*Env, error) { + resp, err := c.do(http.MethodGet, "/version", doOptions{context: ctx}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + var env Env + if err := env.Decode(resp.Body); err != nil { + return nil, err + } + return &env, nil +} + +// DockerInfo contains information about the Docker server +// +// See https://goo.gl/bHUoz9 for more details. +type DockerInfo struct { + ID string + Containers int + ContainersRunning int + ContainersPaused int + ContainersStopped int + Images int + Driver string + DriverStatus [][2]string + SystemStatus [][2]string + Plugins PluginsInfo + NFd int + NGoroutines int + SystemTime string + ExecutionDriver string + LoggingDriver string + CgroupDriver string + NEventsListener int + KernelVersion string + OperatingSystem string + OSType string + Architecture string + IndexServerAddress string + RegistryConfig *ServiceConfig + SecurityOptions []string + NCPU int + MemTotal int64 + DockerRootDir string + HTTPProxy string `json:"HttpProxy"` + HTTPSProxy string `json:"HttpsProxy"` + NoProxy string + Name string + Labels []string + ServerVersion string + ClusterStore string + Runtimes map[string]Runtime + ClusterAdvertise string + Isolation string + InitBinary string + DefaultRuntime string + Swarm swarm.Info + LiveRestoreEnabled bool + MemoryLimit bool + SwapLimit bool + KernelMemory bool + CPUCfsPeriod bool `json:"CpuCfsPeriod"` + CPUCfsQuota bool `json:"CpuCfsQuota"` + CPUShares bool + CPUSet bool + IPv4Forwarding bool + BridgeNfIptables bool + BridgeNfIP6tables bool `json:"BridgeNfIp6tables"` + Debug bool + OomKillDisable bool + ExperimentalBuild bool +} + +// Runtime describes an OCI runtime +// +// for more information, see: https://dockr.ly/2NKM8qq +type Runtime struct { + Path string + Args []string `json:"runtimeArgs"` +} + +// PluginsInfo is a struct with the plugins registered with the docker daemon +// +// for more information, see: https://goo.gl/bHUoz9 +type PluginsInfo struct { + // List of Volume plugins registered + Volume []string + // List of Network plugins registered + Network []string + // List of Authorization plugins registered + Authorization []string +} + +// ServiceConfig stores daemon registry services configuration. +// +// for more information, see: https://goo.gl/7iFFDz +type ServiceConfig struct { + InsecureRegistryCIDRs []*NetIPNet + IndexConfigs map[string]*IndexInfo + Mirrors []string +} + +// NetIPNet is the net.IPNet type, which can be marshalled and +// unmarshalled to JSON. +// +// for more information, see: https://goo.gl/7iFFDz +type NetIPNet net.IPNet + +// MarshalJSON returns the JSON representation of the IPNet. +// +func (ipnet *NetIPNet) MarshalJSON() ([]byte, error) { + return json.Marshal((*net.IPNet)(ipnet).String()) +} + +// UnmarshalJSON sets the IPNet from a byte array of JSON. +// +func (ipnet *NetIPNet) UnmarshalJSON(b []byte) (err error) { + var ipnetStr string + if err = json.Unmarshal(b, &ipnetStr); err == nil { + var cidr *net.IPNet + if _, cidr, err = net.ParseCIDR(ipnetStr); err == nil { + *ipnet = NetIPNet(*cidr) + } + } + return +} + +// IndexInfo contains information about a registry. +// +// for more information, see: https://goo.gl/7iFFDz +type IndexInfo struct { + Name string + Mirrors []string + Secure bool + Official bool +} + +// Info returns system-wide information about the Docker server. +// +// See https://goo.gl/ElTHi2 for more details. +func (c *Client) Info() (*DockerInfo, error) { + resp, err := c.do(http.MethodGet, "/info", doOptions{}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + var info DockerInfo + if err := json.NewDecoder(resp.Body).Decode(&info); err != nil { + return nil, err + } + return &info, nil +} + +// ParseRepositoryTag gets the name of the repository and returns it splitted +// in two parts: the repository and the tag. It ignores the digest when it is +// present. +// +// Some examples: +// +// localhost.localdomain:5000/samalba/hipache:latest -> localhost.localdomain:5000/samalba/hipache, latest +// localhost.localdomain:5000/samalba/hipache -> localhost.localdomain:5000/samalba/hipache, "" +// busybox:latest@sha256:4a731fb46adc5cefe3ae374a8b6020fc1b6ad667a279647766e9a3cd89f6fa92 -> busybox, latest +func ParseRepositoryTag(repoTag string) (repository string, tag string) { + parts := strings.SplitN(repoTag, "@", 2) + repoTag = parts[0] + n := strings.LastIndex(repoTag, ":") + if n < 0 { + return repoTag, "" + } + if tag := repoTag[n+1:]; !strings.Contains(tag, "/") { + return repoTag[:n], tag + } + return repoTag, "" +} diff --git a/vendor/github.com/fsouza/go-dockerclient/network.go b/vendor/github.com/fsouza/go-dockerclient/network.go new file mode 100644 index 00000000000..f3ce4ce96a8 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/network.go @@ -0,0 +1,340 @@ +// Copyright 2015 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" +) + +// ErrNetworkAlreadyExists is the error returned by CreateNetwork when the +// network already exists. +var ErrNetworkAlreadyExists = errors.New("network already exists") + +// Network represents a network. +// +// See https://goo.gl/6GugX3 for more details. +type Network struct { + Name string + ID string `json:"Id"` + Scope string + Driver string + IPAM IPAMOptions + Containers map[string]Endpoint + Options map[string]string + Internal bool + EnableIPv6 bool `json:"EnableIPv6"` + Labels map[string]string +} + +// Endpoint contains network resources allocated and used for a container in a network +// +// See https://goo.gl/6GugX3 for more details. +type Endpoint struct { + Name string + ID string `json:"EndpointID"` + MacAddress string + IPv4Address string + IPv6Address string +} + +// ListNetworks returns all networks. +// +// See https://goo.gl/6GugX3 for more details. +func (c *Client) ListNetworks() ([]Network, error) { + resp, err := c.do(http.MethodGet, "/networks", doOptions{}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + var networks []Network + if err := json.NewDecoder(resp.Body).Decode(&networks); err != nil { + return nil, err + } + return networks, nil +} + +// NetworkFilterOpts is an aggregation of key=value that Docker +// uses to filter networks +type NetworkFilterOpts map[string]map[string]bool + +// FilteredListNetworks returns all networks with the filters applied +// +// See goo.gl/zd2mx4 for more details. +func (c *Client) FilteredListNetworks(opts NetworkFilterOpts) ([]Network, error) { + params, err := json.Marshal(opts) + if err != nil { + return nil, err + } + qs := make(url.Values) + qs.Add("filters", string(params)) + path := "/networks?" + qs.Encode() + resp, err := c.do(http.MethodGet, path, doOptions{}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + var networks []Network + if err := json.NewDecoder(resp.Body).Decode(&networks); err != nil { + return nil, err + } + return networks, nil +} + +// NetworkInfo returns information about a network by its ID. +// +// See https://goo.gl/6GugX3 for more details. +func (c *Client) NetworkInfo(id string) (*Network, error) { + path := "/networks/" + id + resp, err := c.do(http.MethodGet, path, doOptions{}) + if err != nil { + var e *Error + if errors.As(err, &e) && e.Status == http.StatusNotFound { + return nil, &NoSuchNetwork{ID: id} + } + return nil, err + } + defer resp.Body.Close() + var network Network + if err := json.NewDecoder(resp.Body).Decode(&network); err != nil { + return nil, err + } + return &network, nil +} + +// CreateNetworkOptions specify parameters to the CreateNetwork function and +// (for now) is the expected body of the "create network" http request message +// +// See https://goo.gl/6GugX3 for more details. +type CreateNetworkOptions struct { + Name string `json:"Name" yaml:"Name" toml:"Name"` + Driver string `json:"Driver" yaml:"Driver" toml:"Driver"` + Scope string `json:"Scope" yaml:"Scope" toml:"Scope"` + IPAM *IPAMOptions `json:"IPAM,omitempty" yaml:"IPAM" toml:"IPAM"` + ConfigFrom *NetworkConfigFrom `json:"ConfigFrom,omitempty" yaml:"ConfigFrom" toml:"ConfigFrom"` + Options map[string]interface{} `json:"Options" yaml:"Options" toml:"Options"` + Labels map[string]string `json:"Labels" yaml:"Labels" toml:"Labels"` + CheckDuplicate bool `json:"CheckDuplicate" yaml:"CheckDuplicate" toml:"CheckDuplicate"` + Internal bool `json:"Internal" yaml:"Internal" toml:"Internal"` + EnableIPv6 bool `json:"EnableIPv6" yaml:"EnableIPv6" toml:"EnableIPv6"` + Attachable bool `json:"Attachable" yaml:"Attachable" toml:"Attachable"` + ConfigOnly bool `json:"ConfigOnly" yaml:"ConfigOnly" toml:"ConfigOnly"` + Ingress bool `json:"Ingress" yaml:"Ingress" toml:"Ingress"` + Context context.Context `json:"-"` +} + +// NetworkConfigFrom is used in network creation for specifying the source of a +// network configuration. +type NetworkConfigFrom struct { + Network string `json:"Network" yaml:"Network" toml:"Network"` +} + +// IPAMOptions controls IP Address Management when creating a network +// +// See https://goo.gl/T8kRVH for more details. +type IPAMOptions struct { + Driver string `json:"Driver" yaml:"Driver" toml:"Driver"` + Config []IPAMConfig `json:"Config" yaml:"Config" toml:"Config"` + Options map[string]string `json:"Options" yaml:"Options" toml:"Options"` +} + +// IPAMConfig represents IPAM configurations +// +// See https://goo.gl/T8kRVH for more details. +type IPAMConfig struct { + Subnet string `json:",omitempty"` + IPRange string `json:",omitempty"` + Gateway string `json:",omitempty"` + AuxAddress map[string]string `json:"AuxiliaryAddresses,omitempty"` +} + +// CreateNetwork creates a new network, returning the network instance, +// or an error in case of failure. +// +// See https://goo.gl/6GugX3 for more details. +func (c *Client) CreateNetwork(opts CreateNetworkOptions) (*Network, error) { + resp, err := c.do( + http.MethodPost, + "/networks/create", + doOptions{ + data: opts, + context: opts.Context, + }, + ) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + type createNetworkResponse struct { + ID string + } + var ( + network Network + cnr createNetworkResponse + ) + if err := json.NewDecoder(resp.Body).Decode(&cnr); err != nil { + return nil, err + } + + network.Name = opts.Name + network.ID = cnr.ID + network.Driver = opts.Driver + + return &network, nil +} + +// RemoveNetwork removes a network or returns an error in case of failure. +// +// See https://goo.gl/6GugX3 for more details. +func (c *Client) RemoveNetwork(id string) error { + resp, err := c.do(http.MethodDelete, "/networks/"+id, doOptions{}) + if err != nil { + var e *Error + if errors.As(err, &e) && e.Status == http.StatusNotFound { + return &NoSuchNetwork{ID: id} + } + return err + } + resp.Body.Close() + return nil +} + +// NetworkConnectionOptions specify parameters to the ConnectNetwork and +// DisconnectNetwork function. +// +// See https://goo.gl/RV7BJU for more details. +type NetworkConnectionOptions struct { + Container string + + // EndpointConfig is only applicable to the ConnectNetwork call + EndpointConfig *EndpointConfig `json:"EndpointConfig,omitempty"` + + // Force is only applicable to the DisconnectNetwork call + Force bool + + Context context.Context `json:"-"` +} + +// EndpointConfig stores network endpoint details +// +// See https://goo.gl/RV7BJU for more details. +type EndpointConfig struct { + IPAMConfig *EndpointIPAMConfig `json:"IPAMConfig,omitempty" yaml:"IPAMConfig,omitempty" toml:"IPAMConfig,omitempty"` + Links []string `json:"Links,omitempty" yaml:"Links,omitempty" toml:"Links,omitempty"` + Aliases []string `json:"Aliases,omitempty" yaml:"Aliases,omitempty" toml:"Aliases,omitempty"` + NetworkID string `json:"NetworkID,omitempty" yaml:"NetworkID,omitempty" toml:"NetworkID,omitempty"` + EndpointID string `json:"EndpointID,omitempty" yaml:"EndpointID,omitempty" toml:"EndpointID,omitempty"` + Gateway string `json:"Gateway,omitempty" yaml:"Gateway,omitempty" toml:"Gateway,omitempty"` + IPAddress string `json:"IPAddress,omitempty" yaml:"IPAddress,omitempty" toml:"IPAddress,omitempty"` + IPPrefixLen int `json:"IPPrefixLen,omitempty" yaml:"IPPrefixLen,omitempty" toml:"IPPrefixLen,omitempty"` + IPv6Gateway string `json:"IPv6Gateway,omitempty" yaml:"IPv6Gateway,omitempty" toml:"IPv6Gateway,omitempty"` + GlobalIPv6Address string `json:"GlobalIPv6Address,omitempty" yaml:"GlobalIPv6Address,omitempty" toml:"GlobalIPv6Address,omitempty"` + GlobalIPv6PrefixLen int `json:"GlobalIPv6PrefixLen,omitempty" yaml:"GlobalIPv6PrefixLen,omitempty" toml:"GlobalIPv6PrefixLen,omitempty"` + MacAddress string `json:"MacAddress,omitempty" yaml:"MacAddress,omitempty" toml:"MacAddress,omitempty"` + DriverOpts map[string]string `json:"DriverOpts,omitempty" yaml:"DriverOpts,omitempty" toml:"DriverOpts,omitempty"` +} + +// EndpointIPAMConfig represents IPAM configurations for an +// endpoint +// +// See https://goo.gl/RV7BJU for more details. +type EndpointIPAMConfig struct { + IPv4Address string `json:",omitempty"` + IPv6Address string `json:",omitempty"` +} + +// ConnectNetwork adds a container to a network or returns an error in case of +// failure. +// +// See https://goo.gl/6GugX3 for more details. +func (c *Client) ConnectNetwork(id string, opts NetworkConnectionOptions) error { + resp, err := c.do(http.MethodPost, "/networks/"+id+"/connect", doOptions{ + data: opts, + context: opts.Context, + }) + if err != nil { + var e *Error + if errors.As(err, &e) && e.Status == http.StatusNotFound { + return &NoSuchNetworkOrContainer{NetworkID: id, ContainerID: opts.Container} + } + return err + } + resp.Body.Close() + return nil +} + +// DisconnectNetwork removes a container from a network or returns an error in +// case of failure. +// +// See https://goo.gl/6GugX3 for more details. +func (c *Client) DisconnectNetwork(id string, opts NetworkConnectionOptions) error { + resp, err := c.do(http.MethodPost, "/networks/"+id+"/disconnect", doOptions{data: opts}) + if err != nil { + var e *Error + if errors.As(err, &e) && e.Status == http.StatusNotFound { + return &NoSuchNetworkOrContainer{NetworkID: id, ContainerID: opts.Container} + } + return err + } + resp.Body.Close() + return nil +} + +// PruneNetworksOptions specify parameters to the PruneNetworks function. +// +// See https://goo.gl/kX0S9h for more details. +type PruneNetworksOptions struct { + Filters map[string][]string + Context context.Context +} + +// PruneNetworksResults specify results from the PruneNetworks function. +// +// See https://goo.gl/kX0S9h for more details. +type PruneNetworksResults struct { + NetworksDeleted []string +} + +// PruneNetworks deletes networks which are unused. +// +// See https://goo.gl/kX0S9h for more details. +func (c *Client) PruneNetworks(opts PruneNetworksOptions) (*PruneNetworksResults, error) { + path := "/networks/prune?" + queryString(opts) + resp, err := c.do(http.MethodPost, path, doOptions{context: opts.Context}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + var results PruneNetworksResults + if err := json.NewDecoder(resp.Body).Decode(&results); err != nil { + return nil, err + } + return &results, nil +} + +// NoSuchNetwork is the error returned when a given network does not exist. +type NoSuchNetwork struct { + ID string +} + +func (err *NoSuchNetwork) Error() string { + return fmt.Sprintf("No such network: %s", err.ID) +} + +// NoSuchNetworkOrContainer is the error returned when a given network or +// container does not exist. +type NoSuchNetworkOrContainer struct { + NetworkID string + ContainerID string +} + +func (err *NoSuchNetworkOrContainer) Error() string { + return fmt.Sprintf("No such network (%s) or container (%s)", err.NetworkID, err.ContainerID) +} diff --git a/vendor/github.com/fsouza/go-dockerclient/plugin.go b/vendor/github.com/fsouza/go-dockerclient/plugin.go new file mode 100644 index 00000000000..be45607b90f --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/plugin.go @@ -0,0 +1,461 @@ +// Copyright 2018 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "context" + "encoding/json" + "errors" + "io/ioutil" + "net/http" +) + +// PluginPrivilege represents a privilege for a plugin. +type PluginPrivilege struct { + Name string `json:"Name,omitempty" yaml:"Name,omitempty" toml:"Name,omitempty"` + Description string `json:"Description,omitempty" yaml:"Description,omitempty" toml:"Description,omitempty"` + Value []string `json:"Value,omitempty" yaml:"Value,omitempty" toml:"Value,omitempty"` +} + +// InstallPluginOptions specify parameters to the InstallPlugins function. +// +// See https://goo.gl/C4t7Tz for more details. +type InstallPluginOptions struct { + Remote string + Name string + Plugins []PluginPrivilege `qs:"-"` + + Auth AuthConfiguration + + Context context.Context +} + +// InstallPlugins installs a plugin or returns an error in case of failure. +// +// See https://goo.gl/C4t7Tz for more details. +func (c *Client) InstallPlugins(opts InstallPluginOptions) error { + headers, err := headersWithAuth(opts.Auth) + if err != nil { + return err + } + + path := "/plugins/pull?" + queryString(opts) + resp, err := c.do(http.MethodPost, path, doOptions{ + data: opts.Plugins, + context: opts.Context, + headers: headers, + }) + if err != nil { + return err + } + defer resp.Body.Close() + // PullPlugin streams back the progress of the pull, we must consume the whole body + // otherwise the pull will be canceled on the engine. + if _, err := ioutil.ReadAll(resp.Body); err != nil { + return err + } + return nil +} + +// PluginSettings stores plugin settings. +// +// See https://goo.gl/C4t7Tz for more details. +type PluginSettings struct { + Env []string `json:"Env,omitempty" yaml:"Env,omitempty" toml:"Env,omitempty"` + Args []string `json:"Args,omitempty" yaml:"Args,omitempty" toml:"Args,omitempty"` + Devices []string `json:"Devices,omitempty" yaml:"Devices,omitempty" toml:"Devices,omitempty"` +} + +// PluginInterface stores plugin interface. +// +// See https://goo.gl/C4t7Tz for more details. +type PluginInterface struct { + Types []string `json:"Types,omitempty" yaml:"Types,omitempty" toml:"Types,omitempty"` + Socket string `json:"Socket,omitempty" yaml:"Socket,omitempty" toml:"Socket,omitempty"` +} + +// PluginNetwork stores plugin network type. +// +// See https://goo.gl/C4t7Tz for more details. +type PluginNetwork struct { + Type string `json:"Type,omitempty" yaml:"Type,omitempty" toml:"Type,omitempty"` +} + +// PluginLinux stores plugin linux setting. +// +// See https://goo.gl/C4t7Tz for more details. +type PluginLinux struct { + Capabilities []string `json:"Capabilities,omitempty" yaml:"Capabilities,omitempty" toml:"Capabilities,omitempty"` + AllowAllDevices bool `json:"AllowAllDevices,omitempty" yaml:"AllowAllDevices,omitempty" toml:"AllowAllDevices,omitempty"` + Devices []PluginLinuxDevices `json:"Devices,omitempty" yaml:"Devices,omitempty" toml:"Devices,omitempty"` +} + +// PluginLinuxDevices stores plugin linux device setting. +// +// See https://goo.gl/C4t7Tz for more details. +type PluginLinuxDevices struct { + Name string `json:"Name,omitempty" yaml:"Name,omitempty" toml:"Name,omitempty"` + Description string `json:"Documentation,omitempty" yaml:"Documentation,omitempty" toml:"Documentation,omitempty"` + Settable []string `json:"Settable,omitempty" yaml:"Settable,omitempty" toml:"Settable,omitempty"` + Path string `json:"Path,omitempty" yaml:"Path,omitempty" toml:"Path,omitempty"` +} + +// PluginEnv stores plugin environment. +// +// See https://goo.gl/C4t7Tz for more details. +type PluginEnv struct { + Name string `json:"Name,omitempty" yaml:"Name,omitempty" toml:"Name,omitempty"` + Description string `json:"Description,omitempty" yaml:"Description,omitempty" toml:"Description,omitempty"` + Settable []string `json:"Settable,omitempty" yaml:"Settable,omitempty" toml:"Settable,omitempty"` + Value string `json:"Value,omitempty" yaml:"Value,omitempty" toml:"Value,omitempty"` +} + +// PluginArgs stores plugin arguments. +// +// See https://goo.gl/C4t7Tz for more details. +type PluginArgs struct { + Name string `json:"Name,omitempty" yaml:"Name,omitempty" toml:"Name,omitempty"` + Description string `json:"Description,omitempty" yaml:"Description,omitempty" toml:"Description,omitempty"` + Settable []string `json:"Settable,omitempty" yaml:"Settable,omitempty" toml:"Settable,omitempty"` + Value []string `json:"Value,omitempty" yaml:"Value,omitempty" toml:"Value,omitempty"` +} + +// PluginUser stores plugin user. +// +// See https://goo.gl/C4t7Tz for more details. +type PluginUser struct { + UID int32 `json:"UID,omitempty" yaml:"UID,omitempty" toml:"UID,omitempty"` + GID int32 `json:"GID,omitempty" yaml:"GID,omitempty" toml:"GID,omitempty"` +} + +// PluginConfig stores plugin config. +// +// See https://goo.gl/C4t7Tz for more details. +type PluginConfig struct { + Description string `json:"Description,omitempty" yaml:"Description,omitempty" toml:"Description,omitempty"` + Documentation string + Interface PluginInterface `json:"Interface,omitempty" yaml:"Interface,omitempty" toml:"Interface,omitempty"` + Entrypoint []string `json:"Entrypoint,omitempty" yaml:"Entrypoint,omitempty" toml:"Entrypoint,omitempty"` + WorkDir string `json:"WorkDir,omitempty" yaml:"WorkDir,omitempty" toml:"WorkDir,omitempty"` + User PluginUser `json:"User,omitempty" yaml:"User,omitempty" toml:"User,omitempty"` + Network PluginNetwork `json:"Network,omitempty" yaml:"Network,omitempty" toml:"Network,omitempty"` + Linux PluginLinux `json:"Linux,omitempty" yaml:"Linux,omitempty" toml:"Linux,omitempty"` + PropagatedMount string `json:"PropagatedMount,omitempty" yaml:"PropagatedMount,omitempty" toml:"PropagatedMount,omitempty"` + Mounts []Mount `json:"Mounts,omitempty" yaml:"Mounts,omitempty" toml:"Mounts,omitempty"` + Env []PluginEnv `json:"Env,omitempty" yaml:"Env,omitempty" toml:"Env,omitempty"` + Args PluginArgs `json:"Args,omitempty" yaml:"Args,omitempty" toml:"Args,omitempty"` +} + +// PluginDetail specify results from the ListPlugins function. +// +// See https://goo.gl/C4t7Tz for more details. +type PluginDetail struct { + ID string `json:"Id,omitempty" yaml:"Id,omitempty" toml:"Id,omitempty"` + Name string `json:"Name,omitempty" yaml:"Name,omitempty" toml:"Name,omitempty"` + Tag string `json:"Tag,omitempty" yaml:"Tag,omitempty" toml:"Tag,omitempty"` + Active bool `json:"Enabled,omitempty" yaml:"Active,omitempty" toml:"Active,omitempty"` + Settings PluginSettings `json:"Settings,omitempty" yaml:"Settings,omitempty" toml:"Settings,omitempty"` + Config PluginConfig `json:"Config,omitempty" yaml:"Config,omitempty" toml:"Config,omitempty"` +} + +// ListPlugins returns pluginDetails or an error. +// +// See https://goo.gl/C4t7Tz for more details. +func (c *Client) ListPlugins(ctx context.Context) ([]PluginDetail, error) { + resp, err := c.do(http.MethodGet, "/plugins", doOptions{ + context: ctx, + }) + if err != nil { + return nil, err + } + defer resp.Body.Close() + pluginDetails := make([]PluginDetail, 0) + if err := json.NewDecoder(resp.Body).Decode(&pluginDetails); err != nil { + return nil, err + } + return pluginDetails, nil +} + +// ListFilteredPluginsOptions specify parameters to the ListFilteredPlugins function. +// +// See https://goo.gl/C4t7Tz for more details. +type ListFilteredPluginsOptions struct { + Filters map[string][]string + Context context.Context +} + +// ListFilteredPlugins returns pluginDetails or an error. +// +// See https://goo.gl/rmdmWg for more details. +func (c *Client) ListFilteredPlugins(opts ListFilteredPluginsOptions) ([]PluginDetail, error) { + path := "/plugins/json?" + queryString(opts) + resp, err := c.do(http.MethodGet, path, doOptions{ + context: opts.Context, + }) + if err != nil { + return nil, err + } + defer resp.Body.Close() + pluginDetails := make([]PluginDetail, 0) + if err := json.NewDecoder(resp.Body).Decode(&pluginDetails); err != nil { + return nil, err + } + return pluginDetails, nil +} + +// GetPluginPrivileges returns pluginPrivileges or an error. +// +// See https://goo.gl/C4t7Tz for more details. +func (c *Client) GetPluginPrivileges(remote string, ctx context.Context) ([]PluginPrivilege, error) { + return c.GetPluginPrivilegesWithOptions( + GetPluginPrivilegesOptions{ + Remote: remote, + Context: ctx, + }) +} + +// GetPluginPrivilegesOptions specify parameters to the GetPluginPrivilegesWithOptions function. +// +// See https://goo.gl/C4t7Tz for more details. +type GetPluginPrivilegesOptions struct { + Remote string + Auth AuthConfiguration + Context context.Context +} + +// GetPluginPrivilegesWithOptions returns pluginPrivileges or an error. +// +// See https://goo.gl/C4t7Tz for more details. +func (c *Client) GetPluginPrivilegesWithOptions(opts GetPluginPrivilegesOptions) ([]PluginPrivilege, error) { + headers, err := headersWithAuth(opts.Auth) + if err != nil { + return nil, err + } + + path := "/plugins/privileges?" + queryString(opts) + resp, err := c.do(http.MethodGet, path, doOptions{ + context: opts.Context, + headers: headers, + }) + if err != nil { + return nil, err + } + defer resp.Body.Close() + var pluginPrivileges []PluginPrivilege + if err := json.NewDecoder(resp.Body).Decode(&pluginPrivileges); err != nil { + return nil, err + } + return pluginPrivileges, nil +} + +// InspectPlugins returns a pluginDetail or an error. +// +// See https://goo.gl/C4t7Tz for more details. +func (c *Client) InspectPlugins(name string, ctx context.Context) (*PluginDetail, error) { + resp, err := c.do(http.MethodGet, "/plugins/"+name+"/json", doOptions{ + context: ctx, + }) + if err != nil { + var e *Error + if errors.As(err, &e) && e.Status == http.StatusNotFound { + return nil, &NoSuchPlugin{ID: name} + } + return nil, err + } + defer resp.Body.Close() + var pluginDetail PluginDetail + if err := json.NewDecoder(resp.Body).Decode(&pluginDetail); err != nil { + return nil, err + } + return &pluginDetail, nil +} + +// RemovePluginOptions specify parameters to the RemovePlugin function. +// +// See https://goo.gl/C4t7Tz for more details. +type RemovePluginOptions struct { + // The Name of the plugin. + Name string `qs:"-"` + + Force bool `qs:"force"` + Context context.Context +} + +// RemovePlugin returns a PluginDetail or an error. +// +// See https://goo.gl/C4t7Tz for more details. +func (c *Client) RemovePlugin(opts RemovePluginOptions) (*PluginDetail, error) { + path := "/plugins/" + opts.Name + "?" + queryString(opts) + resp, err := c.do(http.MethodDelete, path, doOptions{context: opts.Context}) + if err != nil { + var e *Error + if errors.As(err, &e) && e.Status == http.StatusNotFound { + return nil, &NoSuchPlugin{ID: opts.Name} + } + return nil, err + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + if len(body) == 0 { + // Seems like newer docker versions won't return the plugindetail after removal + return nil, nil + } + + var pluginDetail PluginDetail + if err := json.Unmarshal(body, &pluginDetail); err != nil { + return nil, err + } + return &pluginDetail, nil +} + +// EnablePluginOptions specify parameters to the EnablePlugin function. +// +// See https://goo.gl/C4t7Tz for more details. +type EnablePluginOptions struct { + // The Name of the plugin. + Name string `qs:"-"` + Timeout int64 `qs:"timeout"` + + Context context.Context +} + +// EnablePlugin enables plugin that opts point or returns an error. +// +// See https://goo.gl/C4t7Tz for more details. +func (c *Client) EnablePlugin(opts EnablePluginOptions) error { + path := "/plugins/" + opts.Name + "/enable?" + queryString(opts) + resp, err := c.do(http.MethodPost, path, doOptions{context: opts.Context}) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// DisablePluginOptions specify parameters to the DisablePlugin function. +// +// See https://goo.gl/C4t7Tz for more details. +type DisablePluginOptions struct { + // The Name of the plugin. + Name string `qs:"-"` + + Context context.Context +} + +// DisablePlugin disables plugin that opts point or returns an error. +// +// See https://goo.gl/C4t7Tz for more details. +func (c *Client) DisablePlugin(opts DisablePluginOptions) error { + path := "/plugins/" + opts.Name + "/disable" + resp, err := c.do(http.MethodPost, path, doOptions{context: opts.Context}) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// CreatePluginOptions specify parameters to the CreatePlugin function. +// +// See https://goo.gl/C4t7Tz for more details. +type CreatePluginOptions struct { + // The Name of the plugin. + Name string `qs:"name"` + // Path to tar containing plugin + Path string `qs:"-"` + + Context context.Context +} + +// CreatePlugin creates plugin that opts point or returns an error. +// +// See https://goo.gl/C4t7Tz for more details. +func (c *Client) CreatePlugin(opts CreatePluginOptions) (string, error) { + path := "/plugins/create?" + queryString(opts) + resp, err := c.do(http.MethodPost, path, doOptions{ + data: opts.Path, + context: opts.Context, + }) + if err != nil { + return "", err + } + defer resp.Body.Close() + containerNameBytes, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", err + } + return string(containerNameBytes), nil +} + +// PushPluginOptions specify parameters to PushPlugin function. +// +// See https://goo.gl/C4t7Tz for more details. +type PushPluginOptions struct { + // The Name of the plugin. + Name string + + Context context.Context +} + +// PushPlugin pushes plugin that opts point or returns an error. +// +// See https://goo.gl/C4t7Tz for more details. +func (c *Client) PushPlugin(opts PushPluginOptions) error { + path := "/plugins/" + opts.Name + "/push" + resp, err := c.do(http.MethodPost, path, doOptions{context: opts.Context}) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// ConfigurePluginOptions specify parameters to the ConfigurePlugin +// +// See https://goo.gl/C4t7Tz for more details. +type ConfigurePluginOptions struct { + // The Name of the plugin. + Name string `qs:"name"` + Envs []string + + Context context.Context +} + +// ConfigurePlugin configures plugin that opts point or returns an error. +// +// See https://goo.gl/C4t7Tz for more details. +func (c *Client) ConfigurePlugin(opts ConfigurePluginOptions) error { + path := "/plugins/" + opts.Name + "/set" + resp, err := c.do(http.MethodPost, path, doOptions{ + data: opts.Envs, + context: opts.Context, + }) + if err != nil { + var e *Error + if errors.As(err, &e) && e.Status == http.StatusNotFound { + return &NoSuchPlugin{ID: opts.Name} + } + return err + } + resp.Body.Close() + return nil +} + +// NoSuchPlugin is the error returned when a given plugin does not exist. +type NoSuchPlugin struct { + ID string + Err error +} + +func (err *NoSuchPlugin) Error() string { + if err.Err != nil { + return err.Err.Error() + } + return "No such plugin: " + err.ID +} diff --git a/vendor/github.com/fsouza/go-dockerclient/registry_auth.go b/vendor/github.com/fsouza/go-dockerclient/registry_auth.go new file mode 100644 index 00000000000..1f60d1e8f3b --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/registry_auth.go @@ -0,0 +1,10 @@ +// Copyright 2013 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +type registryAuth interface { + isEmpty() bool + headerKey() string +} diff --git a/vendor/github.com/fsouza/go-dockerclient/signal.go b/vendor/github.com/fsouza/go-dockerclient/signal.go new file mode 100644 index 00000000000..16aa00388fd --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/signal.go @@ -0,0 +1,49 @@ +// Copyright 2014 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +// Signal represents a signal that can be send to the container on +// KillContainer call. +type Signal int + +// These values represent all signals available on Linux, where containers will +// be running. +const ( + SIGABRT = Signal(0x6) + SIGALRM = Signal(0xe) + SIGBUS = Signal(0x7) + SIGCHLD = Signal(0x11) + SIGCLD = Signal(0x11) + SIGCONT = Signal(0x12) + SIGFPE = Signal(0x8) + SIGHUP = Signal(0x1) + SIGILL = Signal(0x4) + SIGINT = Signal(0x2) + SIGIO = Signal(0x1d) + SIGIOT = Signal(0x6) + SIGKILL = Signal(0x9) + SIGPIPE = Signal(0xd) + SIGPOLL = Signal(0x1d) + SIGPROF = Signal(0x1b) + SIGPWR = Signal(0x1e) + SIGQUIT = Signal(0x3) + SIGSEGV = Signal(0xb) + SIGSTKFLT = Signal(0x10) + SIGSTOP = Signal(0x13) + SIGSYS = Signal(0x1f) + SIGTERM = Signal(0xf) + SIGTRAP = Signal(0x5) + SIGTSTP = Signal(0x14) + SIGTTIN = Signal(0x15) + SIGTTOU = Signal(0x16) + SIGUNUSED = Signal(0x1f) + SIGURG = Signal(0x17) + SIGUSR1 = Signal(0xa) + SIGUSR2 = Signal(0xc) + SIGVTALRM = Signal(0x1a) + SIGWINCH = Signal(0x1c) + SIGXCPU = Signal(0x18) + SIGXFSZ = Signal(0x19) +) diff --git a/vendor/github.com/fsouza/go-dockerclient/swarm.go b/vendor/github.com/fsouza/go-dockerclient/swarm.go new file mode 100644 index 00000000000..ae37cd1e8c3 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/swarm.go @@ -0,0 +1,161 @@ +// Copyright 2016 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "context" + "encoding/json" + "errors" + "net/http" + "net/url" + "strconv" + + "github.com/docker/docker/api/types/swarm" +) + +var ( + // ErrNodeAlreadyInSwarm is the error returned by InitSwarm and JoinSwarm + // when the node is already part of a Swarm. + ErrNodeAlreadyInSwarm = errors.New("node already in a Swarm") + + // ErrNodeNotInSwarm is the error returned by LeaveSwarm and UpdateSwarm + // when the node is not part of a Swarm. + ErrNodeNotInSwarm = errors.New("node is not in a Swarm") +) + +// InitSwarmOptions specify parameters to the InitSwarm function. +// See https://goo.gl/hzkgWu for more details. +type InitSwarmOptions struct { + swarm.InitRequest + Context context.Context +} + +// InitSwarm initializes a new Swarm and returns the node ID. +// See https://goo.gl/ZWyG1M for more details. +func (c *Client) InitSwarm(opts InitSwarmOptions) (string, error) { + path := "/swarm/init" + resp, err := c.do(http.MethodPost, path, doOptions{ + data: opts.InitRequest, + forceJSON: true, + context: opts.Context, + }) + if err != nil { + var e *Error + if errors.As(err, &e) && (e.Status == http.StatusNotAcceptable || e.Status == http.StatusServiceUnavailable) { + return "", ErrNodeAlreadyInSwarm + } + return "", err + } + defer resp.Body.Close() + var response string + if err := json.NewDecoder(resp.Body).Decode(&response); err != nil { + return "", err + } + return response, nil +} + +// JoinSwarmOptions specify parameters to the JoinSwarm function. +// See https://goo.gl/TdhJWU for more details. +type JoinSwarmOptions struct { + swarm.JoinRequest + Context context.Context +} + +// JoinSwarm joins an existing Swarm. +// See https://goo.gl/N59IP1 for more details. +func (c *Client) JoinSwarm(opts JoinSwarmOptions) error { + path := "/swarm/join" + resp, err := c.do(http.MethodPost, path, doOptions{ + data: opts.JoinRequest, + forceJSON: true, + context: opts.Context, + }) + if err != nil { + var e *Error + if errors.As(err, &e) && (e.Status == http.StatusNotAcceptable || e.Status == http.StatusServiceUnavailable) { + return ErrNodeAlreadyInSwarm + } + } + resp.Body.Close() + return err +} + +// LeaveSwarmOptions specify parameters to the LeaveSwarm function. +// See https://goo.gl/UWDlLg for more details. +type LeaveSwarmOptions struct { + Force bool + Context context.Context +} + +// LeaveSwarm leaves a Swarm. +// See https://goo.gl/FTX1aD for more details. +func (c *Client) LeaveSwarm(opts LeaveSwarmOptions) error { + params := make(url.Values) + params.Set("force", strconv.FormatBool(opts.Force)) + path := "/swarm/leave?" + params.Encode() + resp, err := c.do(http.MethodPost, path, doOptions{ + context: opts.Context, + }) + if err != nil { + var e *Error + if errors.As(err, &e) && (e.Status == http.StatusNotAcceptable || e.Status == http.StatusServiceUnavailable) { + return ErrNodeNotInSwarm + } + } + resp.Body.Close() + return err +} + +// UpdateSwarmOptions specify parameters to the UpdateSwarm function. +// See https://goo.gl/vFbq36 for more details. +type UpdateSwarmOptions struct { + Version int + RotateWorkerToken bool + RotateManagerToken bool + Swarm swarm.Spec + Context context.Context +} + +// UpdateSwarm updates a Swarm. +// See https://goo.gl/iJFnsw for more details. +func (c *Client) UpdateSwarm(opts UpdateSwarmOptions) error { + params := make(url.Values) + params.Set("version", strconv.Itoa(opts.Version)) + params.Set("rotateWorkerToken", strconv.FormatBool(opts.RotateWorkerToken)) + params.Set("rotateManagerToken", strconv.FormatBool(opts.RotateManagerToken)) + path := "/swarm/update?" + params.Encode() + resp, err := c.do(http.MethodPost, path, doOptions{ + data: opts.Swarm, + forceJSON: true, + context: opts.Context, + }) + if err != nil { + var e *Error + if errors.As(err, &e) && (e.Status == http.StatusNotAcceptable || e.Status == http.StatusServiceUnavailable) { + return ErrNodeNotInSwarm + } + } + resp.Body.Close() + return err +} + +// InspectSwarm inspects a Swarm. +// See https://goo.gl/MFwgX9 for more details. +func (c *Client) InspectSwarm(ctx context.Context) (swarm.Swarm, error) { + response := swarm.Swarm{} + resp, err := c.do(http.MethodGet, "/swarm", doOptions{ + context: ctx, + }) + if err != nil { + var e *Error + if errors.As(err, &e) && (e.Status == http.StatusNotAcceptable || e.Status == http.StatusServiceUnavailable) { + return response, ErrNodeNotInSwarm + } + return response, err + } + defer resp.Body.Close() + err = json.NewDecoder(resp.Body).Decode(&response) + return response, err +} diff --git a/vendor/github.com/fsouza/go-dockerclient/swarm_configs.go b/vendor/github.com/fsouza/go-dockerclient/swarm_configs.go new file mode 100644 index 00000000000..055e99544aa --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/swarm_configs.go @@ -0,0 +1,175 @@ +// Copyright 2017 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "context" + "encoding/json" + "errors" + "net/http" + "net/url" + "strconv" + + "github.com/docker/docker/api/types/swarm" +) + +// NoSuchConfig is the error returned when a given config does not exist. +type NoSuchConfig struct { + ID string + Err error +} + +func (err *NoSuchConfig) Error() string { + if err.Err != nil { + return err.Err.Error() + } + return "No such config: " + err.ID +} + +// CreateConfigOptions specify parameters to the CreateConfig function. +// +// See https://goo.gl/KrVjHz for more details. +type CreateConfigOptions struct { + Auth AuthConfiguration `qs:"-"` + swarm.ConfigSpec + Context context.Context +} + +// CreateConfig creates a new config, returning the config instance +// or an error in case of failure. +// +// See https://goo.gl/KrVjHz for more details. +func (c *Client) CreateConfig(opts CreateConfigOptions) (*swarm.Config, error) { + headers, err := headersWithAuth(opts.Auth) + if err != nil { + return nil, err + } + path := "/configs/create?" + queryString(opts) + resp, err := c.do(http.MethodPost, path, doOptions{ + headers: headers, + data: opts.ConfigSpec, + forceJSON: true, + context: opts.Context, + }) + if err != nil { + return nil, err + } + defer resp.Body.Close() + var config swarm.Config + if err := json.NewDecoder(resp.Body).Decode(&config); err != nil { + return nil, err + } + return &config, nil +} + +// RemoveConfigOptions encapsulates options to remove a config. +// +// See https://goo.gl/Tqrtya for more details. +type RemoveConfigOptions struct { + ID string `qs:"-"` + Context context.Context +} + +// RemoveConfig removes a config, returning an error in case of failure. +// +// See https://goo.gl/Tqrtya for more details. +func (c *Client) RemoveConfig(opts RemoveConfigOptions) error { + path := "/configs/" + opts.ID + resp, err := c.do(http.MethodDelete, path, doOptions{context: opts.Context}) + if err != nil { + var e *Error + if errors.As(err, &e) && e.Status == http.StatusNotFound { + return &NoSuchConfig{ID: opts.ID} + } + return err + } + resp.Body.Close() + return nil +} + +// UpdateConfigOptions specify parameters to the UpdateConfig function. +// +// See https://goo.gl/wu3MmS for more details. +type UpdateConfigOptions struct { + Auth AuthConfiguration `qs:"-"` + swarm.ConfigSpec + Context context.Context + Version uint64 +} + +// UpdateConfig updates the config at ID with the options +// +// Only label can be updated +// https://docs.docker.com/engine/api/v1.33/#operation/ConfigUpdate +// See https://goo.gl/wu3MmS for more details. +func (c *Client) UpdateConfig(id string, opts UpdateConfigOptions) error { + headers, err := headersWithAuth(opts.Auth) + if err != nil { + return err + } + params := make(url.Values) + params.Set("version", strconv.FormatUint(opts.Version, 10)) + resp, err := c.do(http.MethodPost, "/configs/"+id+"/update?"+params.Encode(), doOptions{ + headers: headers, + data: opts.ConfigSpec, + forceJSON: true, + context: opts.Context, + }) + if err != nil { + var e *Error + if errors.As(err, &e) && e.Status == http.StatusNotFound { + return &NoSuchConfig{ID: id} + } + return err + } + defer resp.Body.Close() + return nil +} + +// InspectConfig returns information about a config by its ID. +// +// See https://goo.gl/dHmr75 for more details. +func (c *Client) InspectConfig(id string) (*swarm.Config, error) { + path := "/configs/" + id + resp, err := c.do(http.MethodGet, path, doOptions{}) + if err != nil { + var e *Error + if errors.As(err, &e) && e.Status == http.StatusNotFound { + return nil, &NoSuchConfig{ID: id} + } + return nil, err + } + defer resp.Body.Close() + var config swarm.Config + if err := json.NewDecoder(resp.Body).Decode(&config); err != nil { + return nil, err + } + return &config, nil +} + +// ListConfigsOptions specify parameters to the ListConfigs function. +// +// See https://goo.gl/DwvNMd for more details. +type ListConfigsOptions struct { + Filters map[string][]string + Context context.Context +} + +// ListConfigs returns a slice of configs matching the given criteria. +// +// See https://goo.gl/DwvNMd for more details. +func (c *Client) ListConfigs(opts ListConfigsOptions) ([]swarm.Config, error) { + path := "/configs?" + queryString(opts) + resp, err := c.do(http.MethodGet, path, doOptions{context: opts.Context}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + var configs []swarm.Config + if err := json.NewDecoder(resp.Body).Decode(&configs); err != nil { + return nil, err + } + return configs, nil +} diff --git a/vendor/github.com/fsouza/go-dockerclient/swarm_node.go b/vendor/github.com/fsouza/go-dockerclient/swarm_node.go new file mode 100644 index 00000000000..8538a167b96 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/swarm_node.go @@ -0,0 +1,134 @@ +// Copyright 2016 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "context" + "encoding/json" + "errors" + "net/http" + "net/url" + "strconv" + + "github.com/docker/docker/api/types/swarm" +) + +// NoSuchNode is the error returned when a given node does not exist. +type NoSuchNode struct { + ID string + Err error +} + +func (err *NoSuchNode) Error() string { + if err.Err != nil { + return err.Err.Error() + } + return "No such node: " + err.ID +} + +// ListNodesOptions specify parameters to the ListNodes function. +// +// See http://goo.gl/3K4GwU for more details. +type ListNodesOptions struct { + Filters map[string][]string + Context context.Context +} + +// ListNodes returns a slice of nodes matching the given criteria. +// +// See http://goo.gl/3K4GwU for more details. +func (c *Client) ListNodes(opts ListNodesOptions) ([]swarm.Node, error) { + path := "/nodes?" + queryString(opts) + resp, err := c.do(http.MethodGet, path, doOptions{context: opts.Context}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + var nodes []swarm.Node + if err := json.NewDecoder(resp.Body).Decode(&nodes); err != nil { + return nil, err + } + return nodes, nil +} + +// InspectNode returns information about a node by its ID. +// +// See http://goo.gl/WjkTOk for more details. +func (c *Client) InspectNode(id string) (*swarm.Node, error) { + resp, err := c.do(http.MethodGet, "/nodes/"+id, doOptions{}) + if err != nil { + var e *Error + if errors.As(err, &e) && e.Status == http.StatusNotFound { + return nil, &NoSuchNode{ID: id} + } + return nil, err + } + defer resp.Body.Close() + var node swarm.Node + if err := json.NewDecoder(resp.Body).Decode(&node); err != nil { + return nil, err + } + return &node, nil +} + +// UpdateNodeOptions specify parameters to the NodeUpdate function. +// +// See http://goo.gl/VPBFgA for more details. +type UpdateNodeOptions struct { + swarm.NodeSpec + Version uint64 + Context context.Context +} + +// UpdateNode updates a node. +// +// See http://goo.gl/VPBFgA for more details. +func (c *Client) UpdateNode(id string, opts UpdateNodeOptions) error { + params := make(url.Values) + params.Set("version", strconv.FormatUint(opts.Version, 10)) + path := "/nodes/" + id + "/update?" + params.Encode() + resp, err := c.do(http.MethodPost, path, doOptions{ + context: opts.Context, + forceJSON: true, + data: opts.NodeSpec, + }) + if err != nil { + var e *Error + if errors.As(err, &e) && e.Status == http.StatusNotFound { + return &NoSuchNode{ID: id} + } + return err + } + resp.Body.Close() + return nil +} + +// RemoveNodeOptions specify parameters to the RemoveNode function. +// +// See http://goo.gl/0SNvYg for more details. +type RemoveNodeOptions struct { + ID string + Force bool + Context context.Context +} + +// RemoveNode removes a node. +// +// See http://goo.gl/0SNvYg for more details. +func (c *Client) RemoveNode(opts RemoveNodeOptions) error { + params := make(url.Values) + params.Set("force", strconv.FormatBool(opts.Force)) + path := "/nodes/" + opts.ID + "?" + params.Encode() + resp, err := c.do(http.MethodDelete, path, doOptions{context: opts.Context}) + if err != nil { + var e *Error + if errors.As(err, &e) && e.Status == http.StatusNotFound { + return &NoSuchNode{ID: opts.ID} + } + return err + } + resp.Body.Close() + return nil +} diff --git a/vendor/github.com/fsouza/go-dockerclient/swarm_secrets.go b/vendor/github.com/fsouza/go-dockerclient/swarm_secrets.go new file mode 100644 index 00000000000..375e6e5ba37 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/swarm_secrets.go @@ -0,0 +1,175 @@ +// Copyright 2016 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "context" + "encoding/json" + "errors" + "net/http" + "net/url" + "strconv" + + "github.com/docker/docker/api/types/swarm" +) + +// NoSuchSecret is the error returned when a given secret does not exist. +type NoSuchSecret struct { + ID string + Err error +} + +func (err *NoSuchSecret) Error() string { + if err.Err != nil { + return err.Err.Error() + } + return "No such secret: " + err.ID +} + +// CreateSecretOptions specify parameters to the CreateSecret function. +// +// See https://goo.gl/KrVjHz for more details. +type CreateSecretOptions struct { + Auth AuthConfiguration `qs:"-"` + swarm.SecretSpec + Context context.Context +} + +// CreateSecret creates a new secret, returning the secret instance +// or an error in case of failure. +// +// See https://goo.gl/KrVjHz for more details. +func (c *Client) CreateSecret(opts CreateSecretOptions) (*swarm.Secret, error) { + headers, err := headersWithAuth(opts.Auth) + if err != nil { + return nil, err + } + path := "/secrets/create?" + queryString(opts) + resp, err := c.do(http.MethodPost, path, doOptions{ + headers: headers, + data: opts.SecretSpec, + forceJSON: true, + context: opts.Context, + }) + if err != nil { + return nil, err + } + defer resp.Body.Close() + var secret swarm.Secret + if err := json.NewDecoder(resp.Body).Decode(&secret); err != nil { + return nil, err + } + return &secret, nil +} + +// RemoveSecretOptions encapsulates options to remove a secret. +// +// See https://goo.gl/Tqrtya for more details. +type RemoveSecretOptions struct { + ID string `qs:"-"` + Context context.Context +} + +// RemoveSecret removes a secret, returning an error in case of failure. +// +// See https://goo.gl/Tqrtya for more details. +func (c *Client) RemoveSecret(opts RemoveSecretOptions) error { + path := "/secrets/" + opts.ID + resp, err := c.do(http.MethodDelete, path, doOptions{context: opts.Context}) + if err != nil { + var e *Error + if errors.As(err, &e) && e.Status == http.StatusNotFound { + return &NoSuchSecret{ID: opts.ID} + } + return err + } + resp.Body.Close() + return nil +} + +// UpdateSecretOptions specify parameters to the UpdateSecret function. +// +// Only label can be updated +// See https://docs.docker.com/engine/api/v1.33/#operation/SecretUpdate +// See https://goo.gl/wu3MmS for more details. +type UpdateSecretOptions struct { + Auth AuthConfiguration `qs:"-"` + swarm.SecretSpec + Context context.Context + Version uint64 +} + +// UpdateSecret updates the secret at ID with the options +// +// See https://goo.gl/wu3MmS for more details. +func (c *Client) UpdateSecret(id string, opts UpdateSecretOptions) error { + headers, err := headersWithAuth(opts.Auth) + if err != nil { + return err + } + params := make(url.Values) + params.Set("version", strconv.FormatUint(opts.Version, 10)) + resp, err := c.do(http.MethodPost, "/secrets/"+id+"/update?"+params.Encode(), doOptions{ + headers: headers, + data: opts.SecretSpec, + forceJSON: true, + context: opts.Context, + }) + if err != nil { + var e *Error + if errors.As(err, &e) && e.Status == http.StatusNotFound { + return &NoSuchSecret{ID: id} + } + return err + } + defer resp.Body.Close() + return nil +} + +// InspectSecret returns information about a secret by its ID. +// +// See https://goo.gl/dHmr75 for more details. +func (c *Client) InspectSecret(id string) (*swarm.Secret, error) { + path := "/secrets/" + id + resp, err := c.do(http.MethodGet, path, doOptions{}) + if err != nil { + var e *Error + if errors.As(err, &e) && e.Status == http.StatusNotFound { + return nil, &NoSuchSecret{ID: id} + } + return nil, err + } + defer resp.Body.Close() + var secret swarm.Secret + if err := json.NewDecoder(resp.Body).Decode(&secret); err != nil { + return nil, err + } + return &secret, nil +} + +// ListSecretsOptions specify parameters to the ListSecrets function. +// +// See https://goo.gl/DwvNMd for more details. +type ListSecretsOptions struct { + Filters map[string][]string + Context context.Context +} + +// ListSecrets returns a slice of secrets matching the given criteria. +// +// See https://goo.gl/DwvNMd for more details. +func (c *Client) ListSecrets(opts ListSecretsOptions) ([]swarm.Secret, error) { + path := "/secrets?" + queryString(opts) + resp, err := c.do(http.MethodGet, path, doOptions{context: opts.Context}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + var secrets []swarm.Secret + if err := json.NewDecoder(resp.Body).Decode(&secrets); err != nil { + return nil, err + } + return secrets, nil +} diff --git a/vendor/github.com/fsouza/go-dockerclient/swarm_service.go b/vendor/github.com/fsouza/go-dockerclient/swarm_service.go new file mode 100644 index 00000000000..0d0f007b722 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/swarm_service.go @@ -0,0 +1,218 @@ +// Copyright 2016 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "context" + "encoding/json" + "errors" + "io" + "net/http" + "time" + + "github.com/docker/docker/api/types/swarm" +) + +// NoSuchService is the error returned when a given service does not exist. +type NoSuchService struct { + ID string + Err error +} + +func (err *NoSuchService) Error() string { + if err.Err != nil { + return err.Err.Error() + } + return "No such service: " + err.ID +} + +// CreateServiceOptions specify parameters to the CreateService function. +// +// See https://goo.gl/KrVjHz for more details. +type CreateServiceOptions struct { + Auth AuthConfiguration `qs:"-"` + swarm.ServiceSpec + Context context.Context +} + +// CreateService creates a new service, returning the service instance +// or an error in case of failure. +// +// See https://goo.gl/KrVjHz for more details. +func (c *Client) CreateService(opts CreateServiceOptions) (*swarm.Service, error) { + headers, err := headersWithAuth(opts.Auth) + if err != nil { + return nil, err + } + path := "/services/create?" + queryString(opts) + resp, err := c.do(http.MethodPost, path, doOptions{ + headers: headers, + data: opts.ServiceSpec, + forceJSON: true, + context: opts.Context, + }) + if err != nil { + return nil, err + } + defer resp.Body.Close() + var service swarm.Service + if err := json.NewDecoder(resp.Body).Decode(&service); err != nil { + return nil, err + } + return &service, nil +} + +// RemoveServiceOptions encapsulates options to remove a service. +// +// See https://goo.gl/Tqrtya for more details. +type RemoveServiceOptions struct { + ID string `qs:"-"` + Context context.Context +} + +// RemoveService removes a service, returning an error in case of failure. +// +// See https://goo.gl/Tqrtya for more details. +func (c *Client) RemoveService(opts RemoveServiceOptions) error { + path := "/services/" + opts.ID + resp, err := c.do(http.MethodDelete, path, doOptions{context: opts.Context}) + if err != nil { + var e *Error + if errors.As(err, &e) && e.Status == http.StatusNotFound { + return &NoSuchService{ID: opts.ID} + } + return err + } + resp.Body.Close() + return nil +} + +// UpdateServiceOptions specify parameters to the UpdateService function. +// +// See https://goo.gl/wu3MmS for more details. +type UpdateServiceOptions struct { + Auth AuthConfiguration `qs:"-"` + swarm.ServiceSpec `qs:"-"` + Context context.Context + Version uint64 + Rollback string +} + +// UpdateService updates the service at ID with the options +// +// See https://goo.gl/wu3MmS for more details. +func (c *Client) UpdateService(id string, opts UpdateServiceOptions) error { + headers, err := headersWithAuth(opts.Auth) + if err != nil { + return err + } + resp, err := c.do(http.MethodPost, "/services/"+id+"/update?"+queryString(opts), doOptions{ + headers: headers, + data: opts.ServiceSpec, + forceJSON: true, + context: opts.Context, + }) + if err != nil { + var e *Error + if errors.As(err, &e) && e.Status == http.StatusNotFound { + return &NoSuchService{ID: id} + } + return err + } + defer resp.Body.Close() + return nil +} + +// InspectService returns information about a service by its ID. +// +// See https://goo.gl/dHmr75 for more details. +func (c *Client) InspectService(id string) (*swarm.Service, error) { + path := "/services/" + id + resp, err := c.do(http.MethodGet, path, doOptions{}) + if err != nil { + var e *Error + if errors.As(err, &e) && e.Status == http.StatusNotFound { + return nil, &NoSuchService{ID: id} + } + return nil, err + } + defer resp.Body.Close() + var service swarm.Service + if err := json.NewDecoder(resp.Body).Decode(&service); err != nil { + return nil, err + } + return &service, nil +} + +// ListServicesOptions specify parameters to the ListServices function. +// +// See https://goo.gl/DwvNMd for more details. +type ListServicesOptions struct { + Filters map[string][]string + Status bool + Context context.Context +} + +// ListServices returns a slice of services matching the given criteria. +// +// See https://goo.gl/DwvNMd for more details. +func (c *Client) ListServices(opts ListServicesOptions) ([]swarm.Service, error) { + path := "/services?" + queryString(opts) + resp, err := c.do(http.MethodGet, path, doOptions{context: opts.Context}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + var services []swarm.Service + if err := json.NewDecoder(resp.Body).Decode(&services); err != nil { + return nil, err + } + return services, nil +} + +// LogsServiceOptions represents the set of options used when getting logs from a +// service. +type LogsServiceOptions struct { + Context context.Context + Service string `qs:"-"` + OutputStream io.Writer `qs:"-"` + ErrorStream io.Writer `qs:"-"` + InactivityTimeout time.Duration `qs:"-"` + Tail string + Since int64 + + // Use raw terminal? Usually true when the container contains a TTY. + RawTerminal bool `qs:"-"` + Follow bool + Stdout bool + Stderr bool + Timestamps bool + Details bool +} + +// GetServiceLogs gets stdout and stderr logs from the specified service. +// +// When LogsServiceOptions.RawTerminal is set to false, go-dockerclient will multiplex +// the streams and send the containers stdout to LogsServiceOptions.OutputStream, and +// stderr to LogsServiceOptions.ErrorStream. +// +// When LogsServiceOptions.RawTerminal is true, callers will get the raw stream on +// LogsServiceOptions.OutputStream. +func (c *Client) GetServiceLogs(opts LogsServiceOptions) error { + if opts.Service == "" { + return &NoSuchService{ID: opts.Service} + } + if opts.Tail == "" { + opts.Tail = "all" + } + path := "/services/" + opts.Service + "/logs?" + queryString(opts) + return c.stream(http.MethodGet, path, streamOptions{ + setRawTerminal: opts.RawTerminal, + stdout: opts.OutputStream, + stderr: opts.ErrorStream, + inactivityTimeout: opts.InactivityTimeout, + context: opts.Context, + }) +} diff --git a/vendor/github.com/fsouza/go-dockerclient/swarm_task.go b/vendor/github.com/fsouza/go-dockerclient/swarm_task.go new file mode 100644 index 00000000000..9321368d3f3 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/swarm_task.go @@ -0,0 +1,72 @@ +// Copyright 2016 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "context" + "encoding/json" + "errors" + "net/http" + + "github.com/docker/docker/api/types/swarm" +) + +// NoSuchTask is the error returned when a given task does not exist. +type NoSuchTask struct { + ID string + Err error +} + +func (err *NoSuchTask) Error() string { + if err.Err != nil { + return err.Err.Error() + } + return "No such task: " + err.ID +} + +// ListTasksOptions specify parameters to the ListTasks function. +// +// See http://goo.gl/rByLzw for more details. +type ListTasksOptions struct { + Filters map[string][]string + Context context.Context +} + +// ListTasks returns a slice of tasks matching the given criteria. +// +// See http://goo.gl/rByLzw for more details. +func (c *Client) ListTasks(opts ListTasksOptions) ([]swarm.Task, error) { + path := "/tasks?" + queryString(opts) + resp, err := c.do(http.MethodGet, path, doOptions{context: opts.Context}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + var tasks []swarm.Task + if err := json.NewDecoder(resp.Body).Decode(&tasks); err != nil { + return nil, err + } + return tasks, nil +} + +// InspectTask returns information about a task by its ID. +// +// See http://goo.gl/kyziuq for more details. +func (c *Client) InspectTask(id string) (*swarm.Task, error) { + resp, err := c.do(http.MethodGet, "/tasks/"+id, doOptions{}) + if err != nil { + var e *Error + if errors.As(err, &e) && e.Status == http.StatusNotFound { + return nil, &NoSuchTask{ID: id} + } + return nil, err + } + defer resp.Body.Close() + var task swarm.Task + if err := json.NewDecoder(resp.Body).Decode(&task); err != nil { + return nil, err + } + return &task, nil +} diff --git a/vendor/github.com/fsouza/go-dockerclient/system.go b/vendor/github.com/fsouza/go-dockerclient/system.go new file mode 100644 index 00000000000..46b9faf00e2 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/system.go @@ -0,0 +1,73 @@ +package docker + +import ( + "context" + "encoding/json" + "net/http" +) + +// VolumeUsageData represents usage data from the docker system api +// More Info Here https://dockr.ly/2PNzQyO +type VolumeUsageData struct { + + // The number of containers referencing this volume. This field + // is set to `-1` if the reference-count is not available. + // + // Required: true + RefCount int64 `json:"RefCount"` + + // Amount of disk space used by the volume (in bytes). This information + // is only available for volumes created with the `"local"` volume + // driver. For volumes created with other volume drivers, this field + // is set to `-1` ("not available") + // + // Required: true + Size int64 `json:"Size"` +} + +// ImageSummary represents data about what images are +// currently known to docker +// More Info Here https://dockr.ly/2PNzQyO +type ImageSummary struct { + Containers int64 `json:"Containers"` + Created int64 `json:"Created"` + ID string `json:"Id"` + Labels map[string]string `json:"Labels"` + ParentID string `json:"ParentId"` + RepoDigests []string `json:"RepoDigests"` + RepoTags []string `json:"RepoTags"` + SharedSize int64 `json:"SharedSize"` + Size int64 `json:"Size"` + VirtualSize int64 `json:"VirtualSize"` +} + +// DiskUsage holds information about what docker is using disk space on. +// More Info Here https://dockr.ly/2PNzQyO +type DiskUsage struct { + LayersSize int64 + Images []*ImageSummary + Containers []*APIContainers + Volumes []*Volume +} + +// DiskUsageOptions only contains a context for canceling. +type DiskUsageOptions struct { + Context context.Context +} + +// DiskUsage returns a *DiskUsage describing what docker is using disk on. +// +// More Info Here https://dockr.ly/2PNzQyO +func (c *Client) DiskUsage(opts DiskUsageOptions) (*DiskUsage, error) { + path := "/system/df" + resp, err := c.do(http.MethodGet, path, doOptions{context: opts.Context}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + var du *DiskUsage + if err := json.NewDecoder(resp.Body).Decode(&du); err != nil { + return nil, err + } + return du, nil +} diff --git a/vendor/github.com/fsouza/go-dockerclient/tar.go b/vendor/github.com/fsouza/go-dockerclient/tar.go new file mode 100644 index 00000000000..f27a7bbf21f --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/tar.go @@ -0,0 +1,122 @@ +// Copyright 2014 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/fileutils" +) + +func createTarStream(srcPath, dockerfilePath string) (io.ReadCloser, error) { + srcPath, err := filepath.Abs(srcPath) + if err != nil { + return nil, err + } + + excludes, err := parseDockerignore(srcPath) + if err != nil { + return nil, err + } + + includes := []string{"."} + + // If .dockerignore mentions .dockerignore or the Dockerfile + // then make sure we send both files over to the daemon + // because Dockerfile is, obviously, needed no matter what, and + // .dockerignore is needed to know if either one needs to be + // removed. The deamon will remove them for us, if needed, after it + // parses the Dockerfile. + // + // https://github.com/docker/docker/issues/8330 + // + forceIncludeFiles := []string{".dockerignore", dockerfilePath} + + for _, includeFile := range forceIncludeFiles { + if includeFile == "" { + continue + } + keepThem, err := fileutils.Matches(includeFile, excludes) + if err != nil { + return nil, fmt.Errorf("cannot match .dockerfileignore: '%s', error: %w", includeFile, err) + } + if keepThem { + includes = append(includes, includeFile) + } + } + + if err := validateContextDirectory(srcPath, excludes); err != nil { + return nil, err + } + tarOpts := &archive.TarOptions{ + ExcludePatterns: excludes, + IncludeFiles: includes, + Compression: archive.Uncompressed, + NoLchown: true, + } + return archive.TarWithOptions(srcPath, tarOpts) +} + +// validateContextDirectory checks if all the contents of the directory +// can be read and returns an error if some files can't be read. +// Symlinks which point to non-existing files don't trigger an error +func validateContextDirectory(srcPath string, excludes []string) error { + return filepath.Walk(filepath.Join(srcPath, "."), func(filePath string, f os.FileInfo, err error) error { + // skip this directory/file if it's not in the path, it won't get added to the context + if relFilePath, relErr := filepath.Rel(srcPath, filePath); relErr != nil { + return relErr + } else if skip, matchErr := fileutils.Matches(relFilePath, excludes); matchErr != nil { + return matchErr + } else if skip { + if f.IsDir() { + return filepath.SkipDir + } + return nil + } + + if err != nil { + if os.IsPermission(err) { + return fmt.Errorf("cannot stat %q: %w", filePath, err) + } + if os.IsNotExist(err) { + return nil + } + return err + } + + // skip checking if symlinks point to non-existing files, such symlinks can be useful + // also skip named pipes, because they hanging on open + if f.Mode()&(os.ModeSymlink|os.ModeNamedPipe) != 0 { + return nil + } + + if !f.IsDir() { + currentFile, err := os.Open(filePath) + if err != nil { + return fmt.Errorf("cannot open %q for reading: %w", filePath, err) + } + currentFile.Close() + } + return nil + }) +} + +func parseDockerignore(root string) ([]string, error) { + var excludes []string + ignore, err := ioutil.ReadFile(path.Join(root, ".dockerignore")) + if err != nil && !os.IsNotExist(err) { + return excludes, fmt.Errorf("error reading .dockerignore: %w", err) + } + excludes = strings.Split(string(ignore), "\n") + + return excludes, nil +} diff --git a/vendor/github.com/fsouza/go-dockerclient/tls.go b/vendor/github.com/fsouza/go-dockerclient/tls.go new file mode 100644 index 00000000000..56f00589bea --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/tls.go @@ -0,0 +1,116 @@ +// Copyright 2014 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// +// The content is borrowed from Docker's own source code to provide a simple +// tls based dialer + +package docker + +import ( + "crypto/tls" + "errors" + "net" + "strings" + "time" +) + +type tlsClientCon struct { + *tls.Conn + rawConn net.Conn +} + +func (c *tlsClientCon) CloseWrite() error { + // Go standard tls.Conn doesn't provide the CloseWrite() method so we do it + // on its underlying connection. + if cwc, ok := c.rawConn.(interface { + CloseWrite() error + }); ok { + return cwc.CloseWrite() + } + return nil +} + +func tlsDialWithDialer(dialer *net.Dialer, network, addr string, config *tls.Config) (net.Conn, error) { + // We want the Timeout and Deadline values from dialer to cover the + // whole process: TCP connection and TLS handshake. This means that we + // also need to start our own timers now. + timeout := dialer.Timeout + + if !dialer.Deadline.IsZero() { + deadlineTimeout := time.Until(dialer.Deadline) + if timeout == 0 || deadlineTimeout < timeout { + timeout = deadlineTimeout + } + } + + var errChannel chan error + + if timeout != 0 { + errChannel = make(chan error, 2) + time.AfterFunc(timeout, func() { + errChannel <- errors.New("") + }) + } + + rawConn, err := dialer.Dial(network, addr) + if err != nil { + return nil, err + } + + colonPos := strings.LastIndex(addr, ":") + if colonPos == -1 { + colonPos = len(addr) + } + hostname := addr[:colonPos] + + // If no ServerName is set, infer the ServerName + // from the hostname we're connecting to. + if config.ServerName == "" { + // Make a copy to avoid polluting argument or default. + config = copyTLSConfig(config) + config.ServerName = hostname + } + + conn := tls.Client(rawConn, config) + + if timeout == 0 { + err = conn.Handshake() + } else { + go func() { + errChannel <- conn.Handshake() + }() + + err = <-errChannel + } + + if err != nil { + rawConn.Close() + return nil, err + } + + // This is Docker difference with standard's crypto/tls package: returned a + // wrapper which holds both the TLS and raw connections. + return &tlsClientCon{conn, rawConn}, nil +} + +// this exists to silent an error message in go vet +func copyTLSConfig(cfg *tls.Config) *tls.Config { + return &tls.Config{ + Certificates: cfg.Certificates, + CipherSuites: cfg.CipherSuites, + ClientAuth: cfg.ClientAuth, + ClientCAs: cfg.ClientCAs, + ClientSessionCache: cfg.ClientSessionCache, + CurvePreferences: cfg.CurvePreferences, + InsecureSkipVerify: cfg.InsecureSkipVerify, + MaxVersion: cfg.MaxVersion, + MinVersion: cfg.MinVersion, + NextProtos: cfg.NextProtos, + PreferServerCipherSuites: cfg.PreferServerCipherSuites, + Rand: cfg.Rand, + RootCAs: cfg.RootCAs, + ServerName: cfg.ServerName, + SessionTicketsDisabled: cfg.SessionTicketsDisabled, + } +} diff --git a/vendor/github.com/fsouza/go-dockerclient/volume.go b/vendor/github.com/fsouza/go-dockerclient/volume.go new file mode 100644 index 00000000000..9f8a435c91f --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/volume.go @@ -0,0 +1,194 @@ +// Copyright 2015 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "context" + "encoding/json" + "errors" + "net/http" + "time" +) + +var ( + // ErrNoSuchVolume is the error returned when the volume does not exist. + ErrNoSuchVolume = errors.New("no such volume") + + // ErrVolumeInUse is the error returned when the volume requested to be removed is still in use. + ErrVolumeInUse = errors.New("volume in use and cannot be removed") +) + +// Volume represents a volume. +// +// See https://goo.gl/3wgTsd for more details. +type Volume struct { + Name string `json:"Name" yaml:"Name" toml:"Name"` + Driver string `json:"Driver,omitempty" yaml:"Driver,omitempty" toml:"Driver,omitempty"` + Mountpoint string `json:"Mountpoint,omitempty" yaml:"Mountpoint,omitempty" toml:"Mountpoint,omitempty"` + Labels map[string]string `json:"Labels,omitempty" yaml:"Labels,omitempty" toml:"Labels,omitempty"` + Options map[string]string `json:"Options,omitempty" yaml:"Options,omitempty" toml:"Options,omitempty"` + CreatedAt time.Time `json:"CreatedAt,omitempty" yaml:"CreatedAt,omitempty" toml:"CreatedAt,omitempty"` +} + +// ListVolumesOptions specify parameters to the ListVolumes function. +// +// See https://goo.gl/3wgTsd for more details. +type ListVolumesOptions struct { + Filters map[string][]string + Context context.Context +} + +// ListVolumes returns a list of available volumes in the server. +// +// See https://goo.gl/3wgTsd for more details. +func (c *Client) ListVolumes(opts ListVolumesOptions) ([]Volume, error) { + resp, err := c.do(http.MethodGet, "/volumes?"+queryString(opts), doOptions{ + context: opts.Context, + }) + if err != nil { + return nil, err + } + defer resp.Body.Close() + m := make(map[string]interface{}) + if err = json.NewDecoder(resp.Body).Decode(&m); err != nil { + return nil, err + } + var volumes []Volume + volumesJSON, ok := m["Volumes"] + if !ok { + return volumes, nil + } + data, err := json.Marshal(volumesJSON) + if err != nil { + return nil, err + } + if err := json.Unmarshal(data, &volumes); err != nil { + return nil, err + } + return volumes, nil +} + +// CreateVolumeOptions specify parameters to the CreateVolume function. +// +// See https://goo.gl/qEhmEC for more details. +type CreateVolumeOptions struct { + Name string + Driver string + DriverOpts map[string]string + Context context.Context `json:"-"` + Labels map[string]string +} + +// CreateVolume creates a volume on the server. +// +// See https://goo.gl/qEhmEC for more details. +func (c *Client) CreateVolume(opts CreateVolumeOptions) (*Volume, error) { + resp, err := c.do(http.MethodPost, "/volumes/create", doOptions{ + data: opts, + context: opts.Context, + }) + if err != nil { + return nil, err + } + defer resp.Body.Close() + var volume Volume + if err := json.NewDecoder(resp.Body).Decode(&volume); err != nil { + return nil, err + } + return &volume, nil +} + +// InspectVolume returns a volume by its name. +// +// See https://goo.gl/GMjsMc for more details. +func (c *Client) InspectVolume(name string) (*Volume, error) { + resp, err := c.do(http.MethodGet, "/volumes/"+name, doOptions{}) + if err != nil { + var e *Error + if errors.As(err, &e) && e.Status == http.StatusNotFound { + return nil, ErrNoSuchVolume + } + return nil, err + } + defer resp.Body.Close() + var volume Volume + if err := json.NewDecoder(resp.Body).Decode(&volume); err != nil { + return nil, err + } + return &volume, nil +} + +// RemoveVolume removes a volume by its name. +// +// Deprecated: Use RemoveVolumeWithOptions instead. +func (c *Client) RemoveVolume(name string) error { + return c.RemoveVolumeWithOptions(RemoveVolumeOptions{Name: name}) +} + +// RemoveVolumeOptions specify parameters to the RemoveVolumeWithOptions +// function. +// +// See https://goo.gl/nvd6qj for more details. +type RemoveVolumeOptions struct { + Context context.Context + Name string `qs:"-"` + Force bool +} + +// RemoveVolumeWithOptions removes a volume by its name and takes extra +// parameters. +// +// See https://goo.gl/nvd6qj for more details. +func (c *Client) RemoveVolumeWithOptions(opts RemoveVolumeOptions) error { + path := "/volumes/" + opts.Name + resp, err := c.do(http.MethodDelete, path+"?"+queryString(opts), doOptions{context: opts.Context}) + if err != nil { + var e *Error + if errors.As(err, &e) { + if e.Status == http.StatusNotFound { + return ErrNoSuchVolume + } + if e.Status == http.StatusConflict { + return ErrVolumeInUse + } + } + return err + } + defer resp.Body.Close() + return nil +} + +// PruneVolumesOptions specify parameters to the PruneVolumes function. +// +// See https://goo.gl/f9XDem for more details. +type PruneVolumesOptions struct { + Filters map[string][]string + Context context.Context +} + +// PruneVolumesResults specify results from the PruneVolumes function. +// +// See https://goo.gl/f9XDem for more details. +type PruneVolumesResults struct { + VolumesDeleted []string + SpaceReclaimed int64 +} + +// PruneVolumes deletes volumes which are unused. +// +// See https://goo.gl/f9XDem for more details. +func (c *Client) PruneVolumes(opts PruneVolumesOptions) (*PruneVolumesResults, error) { + path := "/volumes/prune?" + queryString(opts) + resp, err := c.do(http.MethodPost, path, doOptions{context: opts.Context}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + var results PruneVolumesResults + if err := json.NewDecoder(resp.Body).Decode(&results); err != nil { + return nil, err + } + return &results, nil +} diff --git a/vendor/github.com/ghodss/yaml/.gitignore b/vendor/github.com/ghodss/yaml/.gitignore new file mode 100644 index 00000000000..e256a31e00a --- /dev/null +++ b/vendor/github.com/ghodss/yaml/.gitignore @@ -0,0 +1,20 @@ +# OSX leaves these everywhere on SMB shares +._* + +# Eclipse files +.classpath +.project +.settings/** + +# Emacs save files +*~ + +# Vim-related files +[._]*.s[a-w][a-z] +[._]s[a-w][a-z] +*.un~ +Session.vim +.netrwhist + +# Go test binaries +*.test diff --git a/vendor/github.com/ghodss/yaml/.travis.yml b/vendor/github.com/ghodss/yaml/.travis.yml new file mode 100644 index 00000000000..0e9d6edc010 --- /dev/null +++ b/vendor/github.com/ghodss/yaml/.travis.yml @@ -0,0 +1,7 @@ +language: go +go: + - 1.3 + - 1.4 +script: + - go test + - go build diff --git a/vendor/github.com/ghodss/yaml/LICENSE b/vendor/github.com/ghodss/yaml/LICENSE new file mode 100644 index 00000000000..7805d36de73 --- /dev/null +++ b/vendor/github.com/ghodss/yaml/LICENSE @@ -0,0 +1,50 @@ +The MIT License (MIT) + +Copyright (c) 2014 Sam Ghods + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/ghodss/yaml/README.md b/vendor/github.com/ghodss/yaml/README.md new file mode 100644 index 00000000000..0200f75b4d1 --- /dev/null +++ b/vendor/github.com/ghodss/yaml/README.md @@ -0,0 +1,121 @@ +# YAML marshaling and unmarshaling support for Go + +[![Build Status](https://travis-ci.org/ghodss/yaml.svg)](https://travis-ci.org/ghodss/yaml) + +## Introduction + +A wrapper around [go-yaml](https://github.com/go-yaml/yaml) designed to enable a better way of handling YAML when marshaling to and from structs. + +In short, this library first converts YAML to JSON using go-yaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike go-yaml. For a detailed overview of the rationale behind this method, [see this blog post](http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/). + +## Compatibility + +This package uses [go-yaml](https://github.com/go-yaml/yaml) and therefore supports [everything go-yaml supports](https://github.com/go-yaml/yaml#compatibility). + +## Caveats + +**Caveat #1:** When using `yaml.Marshal` and `yaml.Unmarshal`, binary data should NOT be preceded with the `!!binary` YAML tag. If you do, go-yaml will convert the binary data from base64 to native binary data, which is not compatible with JSON. You can still use binary in your YAML files though - just store them without the `!!binary` tag and decode the base64 in your code (e.g. in the custom JSON methods `MarshalJSON` and `UnmarshalJSON`). This also has the benefit that your YAML and your JSON binary data will be decoded exactly the same way. As an example: + +``` +BAD: + exampleKey: !!binary gIGC + +GOOD: + exampleKey: gIGC +... and decode the base64 data in your code. +``` + +**Caveat #2:** When using `YAMLToJSON` directly, maps with keys that are maps will result in an error since this is not supported by JSON. This error will occur in `Unmarshal` as well since you can't unmarshal map keys anyways since struct fields can't be keys. + +## Installation and usage + +To install, run: + +``` +$ go get github.com/ghodss/yaml +``` + +And import using: + +``` +import "github.com/ghodss/yaml" +``` + +Usage is very similar to the JSON library: + +```go +package main + +import ( + "fmt" + + "github.com/ghodss/yaml" +) + +type Person struct { + Name string `json:"name"` // Affects YAML field names too. + Age int `json:"age"` +} + +func main() { + // Marshal a Person struct to YAML. + p := Person{"John", 30} + y, err := yaml.Marshal(p) + if err != nil { + fmt.Printf("err: %v\n", err) + return + } + fmt.Println(string(y)) + /* Output: + age: 30 + name: John + */ + + // Unmarshal the YAML back into a Person struct. + var p2 Person + err = yaml.Unmarshal(y, &p2) + if err != nil { + fmt.Printf("err: %v\n", err) + return + } + fmt.Println(p2) + /* Output: + {John 30} + */ +} +``` + +`yaml.YAMLToJSON` and `yaml.JSONToYAML` methods are also available: + +```go +package main + +import ( + "fmt" + + "github.com/ghodss/yaml" +) + +func main() { + j := []byte(`{"name": "John", "age": 30}`) + y, err := yaml.JSONToYAML(j) + if err != nil { + fmt.Printf("err: %v\n", err) + return + } + fmt.Println(string(y)) + /* Output: + name: John + age: 30 + */ + j2, err := yaml.YAMLToJSON(y) + if err != nil { + fmt.Printf("err: %v\n", err) + return + } + fmt.Println(string(j2)) + /* Output: + {"age":30,"name":"John"} + */ +} +``` diff --git a/vendor/github.com/ghodss/yaml/fields.go b/vendor/github.com/ghodss/yaml/fields.go new file mode 100644 index 00000000000..58600740266 --- /dev/null +++ b/vendor/github.com/ghodss/yaml/fields.go @@ -0,0 +1,501 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +package yaml + +import ( + "bytes" + "encoding" + "encoding/json" + "reflect" + "sort" + "strings" + "sync" + "unicode" + "unicode/utf8" +) + +// indirect walks down v allocating pointers as needed, +// until it gets to a non-pointer. +// if it encounters an Unmarshaler, indirect stops and returns that. +// if decodingNull is true, indirect stops at the last pointer so it can be set to nil. +func indirect(v reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { + // If v is a named type and is addressable, + // start with its address, so that if the type has pointer methods, + // we find them. + if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { + v = v.Addr() + } + for { + // Load value from interface, but only if the result will be + // usefully addressable. + if v.Kind() == reflect.Interface && !v.IsNil() { + e := v.Elem() + if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { + v = e + continue + } + } + + if v.Kind() != reflect.Ptr { + break + } + + if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { + break + } + if v.IsNil() { + if v.CanSet() { + v.Set(reflect.New(v.Type().Elem())) + } else { + v = reflect.New(v.Type().Elem()) + } + } + if v.Type().NumMethod() > 0 { + if u, ok := v.Interface().(json.Unmarshaler); ok { + return u, nil, reflect.Value{} + } + if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { + return nil, u, reflect.Value{} + } + } + v = v.Elem() + } + return nil, nil, v +} + +// A field represents a single field found in a struct. +type field struct { + name string + nameBytes []byte // []byte(name) + equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent + + tag bool + index []int + typ reflect.Type + omitEmpty bool + quoted bool +} + +func fillField(f field) field { + f.nameBytes = []byte(f.name) + f.equalFold = foldFunc(f.nameBytes) + return f +} + +// byName sorts field by name, breaking ties with depth, +// then breaking ties with "name came from json tag", then +// breaking ties with index sequence. +type byName []field + +func (x byName) Len() int { return len(x) } + +func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byName) Less(i, j int) bool { + if x[i].name != x[j].name { + return x[i].name < x[j].name + } + if len(x[i].index) != len(x[j].index) { + return len(x[i].index) < len(x[j].index) + } + if x[i].tag != x[j].tag { + return x[i].tag + } + return byIndex(x).Less(i, j) +} + +// byIndex sorts field by index sequence. +type byIndex []field + +func (x byIndex) Len() int { return len(x) } + +func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byIndex) Less(i, j int) bool { + for k, xik := range x[i].index { + if k >= len(x[j].index) { + return false + } + if xik != x[j].index[k] { + return xik < x[j].index[k] + } + } + return len(x[i].index) < len(x[j].index) +} + +// typeFields returns a list of fields that JSON should recognize for the given type. +// The algorithm is breadth-first search over the set of structs to include - the top struct +// and then any reachable anonymous structs. +func typeFields(t reflect.Type) []field { + // Anonymous fields to explore at the current level and the next. + current := []field{} + next := []field{{typ: t}} + + // Count of queued names for current level and the next. + count := map[reflect.Type]int{} + nextCount := map[reflect.Type]int{} + + // Types already visited at an earlier level. + visited := map[reflect.Type]bool{} + + // Fields found. + var fields []field + + for len(next) > 0 { + current, next = next, current[:0] + count, nextCount = nextCount, map[reflect.Type]int{} + + for _, f := range current { + if visited[f.typ] { + continue + } + visited[f.typ] = true + + // Scan f.typ for fields to include. + for i := 0; i < f.typ.NumField(); i++ { + sf := f.typ.Field(i) + if sf.PkgPath != "" { // unexported + continue + } + tag := sf.Tag.Get("json") + if tag == "-" { + continue + } + name, opts := parseTag(tag) + if !isValidTag(name) { + name = "" + } + index := make([]int, len(f.index)+1) + copy(index, f.index) + index[len(f.index)] = i + + ft := sf.Type + if ft.Name() == "" && ft.Kind() == reflect.Ptr { + // Follow pointer. + ft = ft.Elem() + } + + // Record found field and index sequence. + if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { + tagged := name != "" + if name == "" { + name = sf.Name + } + fields = append(fields, fillField(field{ + name: name, + tag: tagged, + index: index, + typ: ft, + omitEmpty: opts.Contains("omitempty"), + quoted: opts.Contains("string"), + })) + if count[f.typ] > 1 { + // If there were multiple instances, add a second, + // so that the annihilation code will see a duplicate. + // It only cares about the distinction between 1 or 2, + // so don't bother generating any more copies. + fields = append(fields, fields[len(fields)-1]) + } + continue + } + + // Record new anonymous struct to explore in next round. + nextCount[ft]++ + if nextCount[ft] == 1 { + next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft})) + } + } + } + } + + sort.Sort(byName(fields)) + + // Delete all fields that are hidden by the Go rules for embedded fields, + // except that fields with JSON tags are promoted. + + // The fields are sorted in primary order of name, secondary order + // of field index length. Loop over names; for each name, delete + // hidden fields by choosing the one dominant field that survives. + out := fields[:0] + for advance, i := 0, 0; i < len(fields); i += advance { + // One iteration per name. + // Find the sequence of fields with the name of this first field. + fi := fields[i] + name := fi.name + for advance = 1; i+advance < len(fields); advance++ { + fj := fields[i+advance] + if fj.name != name { + break + } + } + if advance == 1 { // Only one field with this name + out = append(out, fi) + continue + } + dominant, ok := dominantField(fields[i : i+advance]) + if ok { + out = append(out, dominant) + } + } + + fields = out + sort.Sort(byIndex(fields)) + + return fields +} + +// dominantField looks through the fields, all of which are known to +// have the same name, to find the single field that dominates the +// others using Go's embedding rules, modified by the presence of +// JSON tags. If there are multiple top-level fields, the boolean +// will be false: This condition is an error in Go and we skip all +// the fields. +func dominantField(fields []field) (field, bool) { + // The fields are sorted in increasing index-length order. The winner + // must therefore be one with the shortest index length. Drop all + // longer entries, which is easy: just truncate the slice. + length := len(fields[0].index) + tagged := -1 // Index of first tagged field. + for i, f := range fields { + if len(f.index) > length { + fields = fields[:i] + break + } + if f.tag { + if tagged >= 0 { + // Multiple tagged fields at the same level: conflict. + // Return no field. + return field{}, false + } + tagged = i + } + } + if tagged >= 0 { + return fields[tagged], true + } + // All remaining fields have the same length. If there's more than one, + // we have a conflict (two fields named "X" at the same level) and we + // return no field. + if len(fields) > 1 { + return field{}, false + } + return fields[0], true +} + +var fieldCache struct { + sync.RWMutex + m map[reflect.Type][]field +} + +// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. +func cachedTypeFields(t reflect.Type) []field { + fieldCache.RLock() + f := fieldCache.m[t] + fieldCache.RUnlock() + if f != nil { + return f + } + + // Compute fields without lock. + // Might duplicate effort but won't hold other computations back. + f = typeFields(t) + if f == nil { + f = []field{} + } + + fieldCache.Lock() + if fieldCache.m == nil { + fieldCache.m = map[reflect.Type][]field{} + } + fieldCache.m[t] = f + fieldCache.Unlock() + return f +} + +func isValidTag(s string) bool { + if s == "" { + return false + } + for _, c := range s { + switch { + case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c): + // Backslash and quote chars are reserved, but + // otherwise any punctuation chars are allowed + // in a tag name. + default: + if !unicode.IsLetter(c) && !unicode.IsDigit(c) { + return false + } + } + } + return true +} + +const ( + caseMask = ^byte(0x20) // Mask to ignore case in ASCII. + kelvin = '\u212a' + smallLongEss = '\u017f' +) + +// foldFunc returns one of four different case folding equivalence +// functions, from most general (and slow) to fastest: +// +// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8 +// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S') +// 3) asciiEqualFold, no special, but includes non-letters (including _) +// 4) simpleLetterEqualFold, no specials, no non-letters. +// +// The letters S and K are special because they map to 3 runes, not just 2: +// * S maps to s and to U+017F 'ſ' Latin small letter long s +// * k maps to K and to U+212A 'K' Kelvin sign +// See http://play.golang.org/p/tTxjOc0OGo +// +// The returned function is specialized for matching against s and +// should only be given s. It's not curried for performance reasons. +func foldFunc(s []byte) func(s, t []byte) bool { + nonLetter := false + special := false // special letter + for _, b := range s { + if b >= utf8.RuneSelf { + return bytes.EqualFold + } + upper := b & caseMask + if upper < 'A' || upper > 'Z' { + nonLetter = true + } else if upper == 'K' || upper == 'S' { + // See above for why these letters are special. + special = true + } + } + if special { + return equalFoldRight + } + if nonLetter { + return asciiEqualFold + } + return simpleLetterEqualFold +} + +// equalFoldRight is a specialization of bytes.EqualFold when s is +// known to be all ASCII (including punctuation), but contains an 's', +// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t. +// See comments on foldFunc. +func equalFoldRight(s, t []byte) bool { + for _, sb := range s { + if len(t) == 0 { + return false + } + tb := t[0] + if tb < utf8.RuneSelf { + if sb != tb { + sbUpper := sb & caseMask + if 'A' <= sbUpper && sbUpper <= 'Z' { + if sbUpper != tb&caseMask { + return false + } + } else { + return false + } + } + t = t[1:] + continue + } + // sb is ASCII and t is not. t must be either kelvin + // sign or long s; sb must be s, S, k, or K. + tr, size := utf8.DecodeRune(t) + switch sb { + case 's', 'S': + if tr != smallLongEss { + return false + } + case 'k', 'K': + if tr != kelvin { + return false + } + default: + return false + } + t = t[size:] + + } + if len(t) > 0 { + return false + } + return true +} + +// asciiEqualFold is a specialization of bytes.EqualFold for use when +// s is all ASCII (but may contain non-letters) and contains no +// special-folding letters. +// See comments on foldFunc. +func asciiEqualFold(s, t []byte) bool { + if len(s) != len(t) { + return false + } + for i, sb := range s { + tb := t[i] + if sb == tb { + continue + } + if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') { + if sb&caseMask != tb&caseMask { + return false + } + } else { + return false + } + } + return true +} + +// simpleLetterEqualFold is a specialization of bytes.EqualFold for +// use when s is all ASCII letters (no underscores, etc) and also +// doesn't contain 'k', 'K', 's', or 'S'. +// See comments on foldFunc. +func simpleLetterEqualFold(s, t []byte) bool { + if len(s) != len(t) { + return false + } + for i, b := range s { + if b&caseMask != t[i]&caseMask { + return false + } + } + return true +} + +// tagOptions is the string following a comma in a struct field's "json" +// tag, or the empty string. It does not include the leading comma. +type tagOptions string + +// parseTag splits a struct field's json tag into its name and +// comma-separated options. +func parseTag(tag string) (string, tagOptions) { + if idx := strings.Index(tag, ","); idx != -1 { + return tag[:idx], tagOptions(tag[idx+1:]) + } + return tag, tagOptions("") +} + +// Contains reports whether a comma-separated list of options +// contains a particular substr flag. substr must be surrounded by a +// string boundary or commas. +func (o tagOptions) Contains(optionName string) bool { + if len(o) == 0 { + return false + } + s := string(o) + for s != "" { + var next string + i := strings.Index(s, ",") + if i >= 0 { + s, next = s[:i], s[i+1:] + } + if s == optionName { + return true + } + s = next + } + return false +} diff --git a/vendor/github.com/ghodss/yaml/yaml.go b/vendor/github.com/ghodss/yaml/yaml.go new file mode 100644 index 00000000000..4fb4054a8b7 --- /dev/null +++ b/vendor/github.com/ghodss/yaml/yaml.go @@ -0,0 +1,277 @@ +package yaml + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" + "strconv" + + "gopkg.in/yaml.v2" +) + +// Marshals the object into JSON then converts JSON to YAML and returns the +// YAML. +func Marshal(o interface{}) ([]byte, error) { + j, err := json.Marshal(o) + if err != nil { + return nil, fmt.Errorf("error marshaling into JSON: %v", err) + } + + y, err := JSONToYAML(j) + if err != nil { + return nil, fmt.Errorf("error converting JSON to YAML: %v", err) + } + + return y, nil +} + +// Converts YAML to JSON then uses JSON to unmarshal into an object. +func Unmarshal(y []byte, o interface{}) error { + vo := reflect.ValueOf(o) + j, err := yamlToJSON(y, &vo) + if err != nil { + return fmt.Errorf("error converting YAML to JSON: %v", err) + } + + err = json.Unmarshal(j, o) + if err != nil { + return fmt.Errorf("error unmarshaling JSON: %v", err) + } + + return nil +} + +// Convert JSON to YAML. +func JSONToYAML(j []byte) ([]byte, error) { + // Convert the JSON to an object. + var jsonObj interface{} + // We are using yaml.Unmarshal here (instead of json.Unmarshal) because the + // Go JSON library doesn't try to pick the right number type (int, float, + // etc.) when unmarshalling to interface{}, it just picks float64 + // universally. go-yaml does go through the effort of picking the right + // number type, so we can preserve number type throughout this process. + err := yaml.Unmarshal(j, &jsonObj) + if err != nil { + return nil, err + } + + // Marshal this object into YAML. + return yaml.Marshal(jsonObj) +} + +// Convert YAML to JSON. Since JSON is a subset of YAML, passing JSON through +// this method should be a no-op. +// +// Things YAML can do that are not supported by JSON: +// * In YAML you can have binary and null keys in your maps. These are invalid +// in JSON. (int and float keys are converted to strings.) +// * Binary data in YAML with the !!binary tag is not supported. If you want to +// use binary data with this library, encode the data as base64 as usual but do +// not use the !!binary tag in your YAML. This will ensure the original base64 +// encoded data makes it all the way through to the JSON. +func YAMLToJSON(y []byte) ([]byte, error) { + return yamlToJSON(y, nil) +} + +func yamlToJSON(y []byte, jsonTarget *reflect.Value) ([]byte, error) { + // Convert the YAML to an object. + var yamlObj interface{} + err := yaml.Unmarshal(y, &yamlObj) + if err != nil { + return nil, err + } + + // YAML objects are not completely compatible with JSON objects (e.g. you + // can have non-string keys in YAML). So, convert the YAML-compatible object + // to a JSON-compatible object, failing with an error if irrecoverable + // incompatibilties happen along the way. + jsonObj, err := convertToJSONableObject(yamlObj, jsonTarget) + if err != nil { + return nil, err + } + + // Convert this object to JSON and return the data. + return json.Marshal(jsonObj) +} + +func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (interface{}, error) { + var err error + + // Resolve jsonTarget to a concrete value (i.e. not a pointer or an + // interface). We pass decodingNull as false because we're not actually + // decoding into the value, we're just checking if the ultimate target is a + // string. + if jsonTarget != nil { + ju, tu, pv := indirect(*jsonTarget, false) + // We have a JSON or Text Umarshaler at this level, so we can't be trying + // to decode into a string. + if ju != nil || tu != nil { + jsonTarget = nil + } else { + jsonTarget = &pv + } + } + + // If yamlObj is a number or a boolean, check if jsonTarget is a string - + // if so, coerce. Else return normal. + // If yamlObj is a map or array, find the field that each key is + // unmarshaling to, and when you recurse pass the reflect.Value for that + // field back into this function. + switch typedYAMLObj := yamlObj.(type) { + case map[interface{}]interface{}: + // JSON does not support arbitrary keys in a map, so we must convert + // these keys to strings. + // + // From my reading of go-yaml v2 (specifically the resolve function), + // keys can only have the types string, int, int64, float64, binary + // (unsupported), or null (unsupported). + strMap := make(map[string]interface{}) + for k, v := range typedYAMLObj { + // Resolve the key to a string first. + var keyString string + switch typedKey := k.(type) { + case string: + keyString = typedKey + case int: + keyString = strconv.Itoa(typedKey) + case int64: + // go-yaml will only return an int64 as a key if the system + // architecture is 32-bit and the key's value is between 32-bit + // and 64-bit. Otherwise the key type will simply be int. + keyString = strconv.FormatInt(typedKey, 10) + case float64: + // Stolen from go-yaml to use the same conversion to string as + // the go-yaml library uses to convert float to string when + // Marshaling. + s := strconv.FormatFloat(typedKey, 'g', -1, 32) + switch s { + case "+Inf": + s = ".inf" + case "-Inf": + s = "-.inf" + case "NaN": + s = ".nan" + } + keyString = s + case bool: + if typedKey { + keyString = "true" + } else { + keyString = "false" + } + default: + return nil, fmt.Errorf("Unsupported map key of type: %s, key: %+#v, value: %+#v", + reflect.TypeOf(k), k, v) + } + + // jsonTarget should be a struct or a map. If it's a struct, find + // the field it's going to map to and pass its reflect.Value. If + // it's a map, find the element type of the map and pass the + // reflect.Value created from that type. If it's neither, just pass + // nil - JSON conversion will error for us if it's a real issue. + if jsonTarget != nil { + t := *jsonTarget + if t.Kind() == reflect.Struct { + keyBytes := []byte(keyString) + // Find the field that the JSON library would use. + var f *field + fields := cachedTypeFields(t.Type()) + for i := range fields { + ff := &fields[i] + if bytes.Equal(ff.nameBytes, keyBytes) { + f = ff + break + } + // Do case-insensitive comparison. + if f == nil && ff.equalFold(ff.nameBytes, keyBytes) { + f = ff + } + } + if f != nil { + // Find the reflect.Value of the most preferential + // struct field. + jtf := t.Field(f.index[0]) + strMap[keyString], err = convertToJSONableObject(v, &jtf) + if err != nil { + return nil, err + } + continue + } + } else if t.Kind() == reflect.Map { + // Create a zero value of the map's element type to use as + // the JSON target. + jtv := reflect.Zero(t.Type().Elem()) + strMap[keyString], err = convertToJSONableObject(v, &jtv) + if err != nil { + return nil, err + } + continue + } + } + strMap[keyString], err = convertToJSONableObject(v, nil) + if err != nil { + return nil, err + } + } + return strMap, nil + case []interface{}: + // We need to recurse into arrays in case there are any + // map[interface{}]interface{}'s inside and to convert any + // numbers to strings. + + // If jsonTarget is a slice (which it really should be), find the + // thing it's going to map to. If it's not a slice, just pass nil + // - JSON conversion will error for us if it's a real issue. + var jsonSliceElemValue *reflect.Value + if jsonTarget != nil { + t := *jsonTarget + if t.Kind() == reflect.Slice { + // By default slices point to nil, but we need a reflect.Value + // pointing to a value of the slice type, so we create one here. + ev := reflect.Indirect(reflect.New(t.Type().Elem())) + jsonSliceElemValue = &ev + } + } + + // Make and use a new array. + arr := make([]interface{}, len(typedYAMLObj)) + for i, v := range typedYAMLObj { + arr[i], err = convertToJSONableObject(v, jsonSliceElemValue) + if err != nil { + return nil, err + } + } + return arr, nil + default: + // If the target type is a string and the YAML type is a number, + // convert the YAML type to a string. + if jsonTarget != nil && (*jsonTarget).Kind() == reflect.String { + // Based on my reading of go-yaml, it may return int, int64, + // float64, or uint64. + var s string + switch typedVal := typedYAMLObj.(type) { + case int: + s = strconv.FormatInt(int64(typedVal), 10) + case int64: + s = strconv.FormatInt(typedVal, 10) + case float64: + s = strconv.FormatFloat(typedVal, 'g', -1, 32) + case uint64: + s = strconv.FormatUint(typedVal, 10) + case bool: + if typedVal { + s = "true" + } else { + s = "false" + } + } + if len(s) > 0 { + yamlObj = interface{}(s) + } + } + return yamlObj, nil + } + + return nil, nil +} diff --git a/vendor/github.com/godbus/dbus/v5/CONTRIBUTING.md b/vendor/github.com/godbus/dbus/v5/CONTRIBUTING.md new file mode 100644 index 00000000000..c88f9b2bdd0 --- /dev/null +++ b/vendor/github.com/godbus/dbus/v5/CONTRIBUTING.md @@ -0,0 +1,50 @@ +# How to Contribute + +## Getting Started + +- Fork the repository on GitHub +- Read the [README](README.markdown) for build and test instructions +- Play with the project, submit bugs, submit patches! + +## Contribution Flow + +This is a rough outline of what a contributor's workflow looks like: + +- Create a topic branch from where you want to base your work (usually master). +- Make commits of logical units. +- Make sure your commit messages are in the proper format (see below). +- Push your changes to a topic branch in your fork of the repository. +- Make sure the tests pass, and add any new tests as appropriate. +- Submit a pull request to the original repository. + +Thanks for your contributions! + +### Format of the Commit Message + +We follow a rough convention for commit messages that is designed to answer two +questions: what changed and why. The subject line should feature the what and +the body of the commit should describe the why. + +``` +scripts: add the test-cluster command + +this uses tmux to setup a test cluster that you can easily kill and +start for debugging. + +Fixes #38 +``` + +The format can be described more formally as follows: + +``` +: + + + +